hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
f330057a5888a80f89b70f62fe4b61bb69f06565.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Homework 2 // Image Blurring // // In this homework we are blurring an image. To do this, imagine that we have // a square array of weight values. For each pixel in the image, imagine that we // overlay this square array of weights on top of the image such that the center // of the weight array is aligned with the current pixel. To compute a blurred // pixel value, we multiply each pair of numbers that line up. In other words, we // multiply each weight with the pixel underneath it. Finally, we add up all of the // multiplied numbers and assign that value to our output for the current pixel. // We repeat this process for all the pixels in the image. // To help get you started, we have included some useful notes here. //**************************************************************************** // For a color image that has multiple channels, we suggest separating // the different color channels so that each color is stored contiguously // instead of being interleaved. This will simplify your code. // That is instead of RGBARGBARGBARGBA... we suggest transforming to three // arrays (as in the previous homework we ignore the alpha channel again): // 1) RRRRRRRR... // 2) GGGGGGGG... // 3) BBBBBBBB... // // The original layout is known an Array of Structures (AoS) whereas the // format we are converting to is known as a Structure of Arrays (SoA). // As a warm-up, we will ask you to write the kernel that performs this // separation. You should then write the "meat" of the assignment, // which is the kernel that performs the actual blur. We provide code that // re-combines your blurred results for each color channel. //**************************************************************************** // You must fill in the gaussian_blur kernel to perform the blurring of the // inputChannel, using the array of weights, and put the result in the outputChannel. // Here is an example of computing a blur, using a weighted average, for a single // pixel in a small image. // // Array of weights: // // 0.0 0.2 0.0 // 0.2 0.2 0.2 // 0.0 0.2 0.0 // // Image (note that we align the array of weights to the center of the box): // // 1 2 5 2 0 3 // ------- // 3 |2 5 1| 6 0 0.0*2 + 0.2*5 + 0.0*1 + // | | // 4 |3 6 2| 1 4 -> 0.2*3 + 0.2*6 + 0.2*2 + -> 3.2 // | | // 0 |4 0 3| 4 2 0.0*4 + 0.2*0 + 0.0*3 // ------- // 9 6 5 0 3 9 // // (1) (2) (3) // // A good starting place is to map each thread to a pixel as you have before. // Then every thread can perform steps 2 and 3 in the diagram above // completely independently of one another. // Note that the array of weights is square, so its height is the same as its width. // We refer to the array of weights as a filter, and we refer to its width with the // variable filterWidth. //**************************************************************************** // Your homework submission will be evaluated based on correctness and speed. // We test each pixel against a reference solution. If any pixel differs by // more than some small threshold value, the system will tell you that your // solution is incorrect, and it will let you try again. // Once you have gotten that working correctly, then you can think about using // shared memory and having the threads cooperate to achieve better performance. //**************************************************************************** // Also note that we've supplied a helpful debugging function called checkCudaErrors. // You should wrap your allocation and copying statements like we've done in the // code we're supplying you. Here is an example of the unsafe way to allocate // memory on the GPU: // // hipMalloc(&d_red, sizeof(unsigned char) * numRows * numCols); // // Here is an example of the safe way to do the same thing: // // checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRows * numCols)); // // Writing code the safe way requires slightly more typing, but is very helpful for // catching mistakes. If you write code the unsafe way and you make a mistake, then // any subsequent kernels won't compute anything, and it will be hard to figure out // why. Writing code the safe way will inform you as soon as you make a mistake. // Finally, remember to free the memory you allocate at the end of the function. //**************************************************************************** #include "utils.h" __global__ void gaussian_blur(const unsigned char* const inputChannel, unsigned char* const outputChannel, int numRows, int numCols, const float* const filter, const int filterWidth) { // TODO // NOTE: Be sure to compute any intermediate results in floating point // before storing the final result as unsigned char. // NOTE: Be careful not to try to access memory that is outside the bounds of // the image. You'll want code that performs the following check before accessing // GPU memory: // // if ( absolute_image_position_x >= numCols || // absolute_image_position_y >= numRows ) // { // return; // } // NOTE: If a thread's absolute position 2D position is within the image, but some of // its neighbors are outside the image, then you will need to be extra careful. Instead // of trying to read such a neighbor value from GPU memory (which won't work because // the value is out of bounds), you should explicitly clamp the neighbor values you read // to be within the bounds of the image. If this is not clear to you, then please refer // to sequential reference solution for the exact clamping semantics you should follow. //becomes the (j,i) in the image const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y); const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x; if ( thread_2D_pos.y >= numRows || thread_2D_pos.x >= numCols ) return; int current_row = thread_2D_pos.y; // center int current_col = thread_2D_pos.x; //const int filterCenter = int(filterWidth / 2.0f); float result = 0.f; // for (int i=0;i<filterWidth;i++) { // int f_i = i - filterCenter; // filter_i // int i_i = c_i + f_i; // image_i // int pi_i = min(numRows-1, max(0, i_i)); // paddedImage_i // for (int j=0;j<filterWidth;j++) { // int f_j = j - filterCenter; // int i_j = c_j + f_j; // int pi_j = min(numCols-1, max(0, i_j)); // float weight = filter[i*filterWidth + j]; // acc = acc + weight * inputChannel[pi_i*numCols + pi_j]; // } // } // __syncthreads(); // outputChannel[c_i*numCols + c_j] = (int)acc; for (int filter_row = -filterWidth/2; filter_row <= filterWidth/2; ++filter_row) { for (int filter_col = -filterWidth/2; filter_col <= filterWidth/2; ++filter_col) { // int image_row = min(max(current_row + filter_row, 0), static_cast<int>(numRows - 1)); // int image_col = min(max(current_col + filter_col, 0), static_cast<int>(numCols - 1)); int image_row = min(max(current_row + filter_row, 0), (numRows - 1)); int image_col = min(max(current_col + filter_col, 0), (numCols - 1)); float image_value = static_cast<float>(inputChannel[image_row * numCols + image_col]); float filter_value = filter[(filter_row + filterWidth/2) * filterWidth + filter_col + filterWidth/2]; result += image_value * filter_value; } } outputChannel[thread_1D_pos] = result; } //This kernel takes in an image represented as a uchar4 and splits //it into three images consisting of only one color channel each __global__ void separateChannels(const uchar4* const inputImageRGBA, int numRows, int numCols, unsigned char* const redChannel, unsigned char* const greenChannel, unsigned char* const blueChannel) { // TODO // // NOTE: Be careful not to try to access memory that is outside the bounds of // the image. You'll want code that performs the following check before accessing // GPU memory: // // if ( absolute_image_position_x >= numCols || // absolute_image_position_y >= numRows ) // { // return; // } int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int idx = x + y * numCols; // Important! if ( x >= numCols || y >= numRows ) { return; } uchar4 rgba = inputImageRGBA[idx]; redChannel[idx] = rgba.x ; greenChannel[idx] = rgba.y ; blueChannel[idx] = rgba.z ; } //This kernel takes in three color channels and recombines them //into one image. The alpha channel is set to 255 to represent //that this image has no transparency. __global__ void recombineChannels(const unsigned char* const redChannel, const unsigned char* const greenChannel, const unsigned char* const blueChannel, uchar4* const outputImageRGBA, int numRows, int numCols) { const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y); const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x; //make sure we don't try and access memory outside the image //by having any threads mapped there return early if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows) return; unsigned char red = redChannel[thread_1D_pos]; unsigned char green = greenChannel[thread_1D_pos]; unsigned char blue = blueChannel[thread_1D_pos]; //Alpha should be 255 for no transparency uchar4 outputPixel = make_uchar4(red, green, blue, 255); outputImageRGBA[thread_1D_pos] = outputPixel; } unsigned char *d_red, *d_green, *d_blue; float *d_filter; void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage, const float* const h_filter, const size_t filterWidth) { //allocate memory for the three different channels //original checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage)); checkCudaErrors(hipMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage)); checkCudaErrors(hipMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage)); //TODO: //Allocate memory for the filter on the GPU //Use the pointer d_filter that we have already declared for you //device GPU //You need to allocate memory for the filter with hipMalloc //be sure to use checkCudaErrors like the above examples to //be able to tell if anything goes wrong //IMPORTANT: Notice that we pass a pointer to a pointer to hipMalloc checkCudaErrors(hipMalloc(&d_filter, sizeof(float) * filterWidth * filterWidth)); //TODO: //Copy the filter on the host (h_filter) to the memory you just allocated //on the GPU. hipMemcpy(dst, src, numBytes, hipMemcpyHostToDevice); // dst destination //Remember to use checkCudaErrors! // src source checkCudaErrors(hipMemcpy(d_filter, h_filter, sizeof(float) * filterWidth * filterWidth, hipMemcpyHostToDevice)); } void your_gaussian_blur(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA, uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols, unsigned char *d_redBlurred, unsigned char *d_greenBlurred, unsigned char *d_blueBlurred, const int filterWidth) { //TODO: Set reasonable block size (i.e., number of threads per block) const dim3 blockSize(16, 16, 1); //TODO: //Compute correct grid size (i.e., number of blocks per kernel launch) //from the image size and and block size. const dim3 gridSize( (numCols+15)/16, (numRows+15)/16, 1); //TODO: Launch a kernel for separating the RGBA image into different color channels hipLaunchKernelGGL(( separateChannels), dim3(gridSize), dim3(blockSize), 0, 0, d_inputImageRGBA, numRows, numCols, d_red, d_green, d_blue); // Call hipDeviceSynchronize(), then call checkCudaErrors() immediately after // launching your kernel to make sure that you didn't make any mistakes. hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); //TODO: Call your convolution kernel here 3 times, once for each color channel. hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), 0, 0, d_red,d_redBlurred,numRows,numCols,d_filter,filterWidth); hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), 0, 0, d_green,d_greenBlurred,numRows,numCols,d_filter,filterWidth); hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), 0, 0, d_blue,d_blueBlurred,numRows,numCols,d_filter,filterWidth); // Again, call hipDeviceSynchronize(), then call checkCudaErrors() immediately after // launching your kernel to make sure that you didn't make any mistakes. hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); // Now we recombine your results. We take care of launching this kernel for you. // // NOTE: This kernel launch depends on the gridSize and blockSize variables, // which you must set yourself. hipLaunchKernelGGL(( recombineChannels), dim3(gridSize), dim3(blockSize), 0, 0, d_redBlurred, d_greenBlurred, d_blueBlurred, d_outputImageRGBA, numRows, numCols); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); } //Free all the memory that we allocated //TODO: make sure you free any arrays that you allocated void cleanup() { checkCudaErrors(hipFree(d_red)); checkCudaErrors(hipFree(d_green)); checkCudaErrors(hipFree(d_blue)); checkCudaErrors(hipFree(d_filter));//TODO } // Good job! Your image matched perfectly to the reference image //Your program ran and executed in 1.627488 ms.
f330057a5888a80f89b70f62fe4b61bb69f06565.cu
// Homework 2 // Image Blurring // // In this homework we are blurring an image. To do this, imagine that we have // a square array of weight values. For each pixel in the image, imagine that we // overlay this square array of weights on top of the image such that the center // of the weight array is aligned with the current pixel. To compute a blurred // pixel value, we multiply each pair of numbers that line up. In other words, we // multiply each weight with the pixel underneath it. Finally, we add up all of the // multiplied numbers and assign that value to our output for the current pixel. // We repeat this process for all the pixels in the image. // To help get you started, we have included some useful notes here. //**************************************************************************** // For a color image that has multiple channels, we suggest separating // the different color channels so that each color is stored contiguously // instead of being interleaved. This will simplify your code. // That is instead of RGBARGBARGBARGBA... we suggest transforming to three // arrays (as in the previous homework we ignore the alpha channel again): // 1) RRRRRRRR... // 2) GGGGGGGG... // 3) BBBBBBBB... // // The original layout is known an Array of Structures (AoS) whereas the // format we are converting to is known as a Structure of Arrays (SoA). // As a warm-up, we will ask you to write the kernel that performs this // separation. You should then write the "meat" of the assignment, // which is the kernel that performs the actual blur. We provide code that // re-combines your blurred results for each color channel. //**************************************************************************** // You must fill in the gaussian_blur kernel to perform the blurring of the // inputChannel, using the array of weights, and put the result in the outputChannel. // Here is an example of computing a blur, using a weighted average, for a single // pixel in a small image. // // Array of weights: // // 0.0 0.2 0.0 // 0.2 0.2 0.2 // 0.0 0.2 0.0 // // Image (note that we align the array of weights to the center of the box): // // 1 2 5 2 0 3 // ------- // 3 |2 5 1| 6 0 0.0*2 + 0.2*5 + 0.0*1 + // | | // 4 |3 6 2| 1 4 -> 0.2*3 + 0.2*6 + 0.2*2 + -> 3.2 // | | // 0 |4 0 3| 4 2 0.0*4 + 0.2*0 + 0.0*3 // ------- // 9 6 5 0 3 9 // // (1) (2) (3) // // A good starting place is to map each thread to a pixel as you have before. // Then every thread can perform steps 2 and 3 in the diagram above // completely independently of one another. // Note that the array of weights is square, so its height is the same as its width. // We refer to the array of weights as a filter, and we refer to its width with the // variable filterWidth. //**************************************************************************** // Your homework submission will be evaluated based on correctness and speed. // We test each pixel against a reference solution. If any pixel differs by // more than some small threshold value, the system will tell you that your // solution is incorrect, and it will let you try again. // Once you have gotten that working correctly, then you can think about using // shared memory and having the threads cooperate to achieve better performance. //**************************************************************************** // Also note that we've supplied a helpful debugging function called checkCudaErrors. // You should wrap your allocation and copying statements like we've done in the // code we're supplying you. Here is an example of the unsafe way to allocate // memory on the GPU: // // cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols); // // Here is an example of the safe way to do the same thing: // // checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols)); // // Writing code the safe way requires slightly more typing, but is very helpful for // catching mistakes. If you write code the unsafe way and you make a mistake, then // any subsequent kernels won't compute anything, and it will be hard to figure out // why. Writing code the safe way will inform you as soon as you make a mistake. // Finally, remember to free the memory you allocate at the end of the function. //**************************************************************************** #include "utils.h" __global__ void gaussian_blur(const unsigned char* const inputChannel, unsigned char* const outputChannel, int numRows, int numCols, const float* const filter, const int filterWidth) { // TODO // NOTE: Be sure to compute any intermediate results in floating point // before storing the final result as unsigned char. // NOTE: Be careful not to try to access memory that is outside the bounds of // the image. You'll want code that performs the following check before accessing // GPU memory: // // if ( absolute_image_position_x >= numCols || // absolute_image_position_y >= numRows ) // { // return; // } // NOTE: If a thread's absolute position 2D position is within the image, but some of // its neighbors are outside the image, then you will need to be extra careful. Instead // of trying to read such a neighbor value from GPU memory (which won't work because // the value is out of bounds), you should explicitly clamp the neighbor values you read // to be within the bounds of the image. If this is not clear to you, then please refer // to sequential reference solution for the exact clamping semantics you should follow. //becomes the (j,i) in the image const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y); const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x; if ( thread_2D_pos.y >= numRows || thread_2D_pos.x >= numCols ) return; int current_row = thread_2D_pos.y; // center int current_col = thread_2D_pos.x; //const int filterCenter = int(filterWidth / 2.0f); float result = 0.f; // for (int i=0;i<filterWidth;i++) { // int f_i = i - filterCenter; // filter_i // int i_i = c_i + f_i; // image_i // int pi_i = min(numRows-1, max(0, i_i)); // paddedImage_i // for (int j=0;j<filterWidth;j++) { // int f_j = j - filterCenter; // int i_j = c_j + f_j; // int pi_j = min(numCols-1, max(0, i_j)); // float weight = filter[i*filterWidth + j]; // acc = acc + weight * inputChannel[pi_i*numCols + pi_j]; // } // } // __syncthreads(); // outputChannel[c_i*numCols + c_j] = (int)acc; for (int filter_row = -filterWidth/2; filter_row <= filterWidth/2; ++filter_row) { for (int filter_col = -filterWidth/2; filter_col <= filterWidth/2; ++filter_col) { // int image_row = min(max(current_row + filter_row, 0), static_cast<int>(numRows - 1)); // int image_col = min(max(current_col + filter_col, 0), static_cast<int>(numCols - 1)); int image_row = min(max(current_row + filter_row, 0), (numRows - 1)); int image_col = min(max(current_col + filter_col, 0), (numCols - 1)); float image_value = static_cast<float>(inputChannel[image_row * numCols + image_col]); float filter_value = filter[(filter_row + filterWidth/2) * filterWidth + filter_col + filterWidth/2]; result += image_value * filter_value; } } outputChannel[thread_1D_pos] = result; } //This kernel takes in an image represented as a uchar4 and splits //it into three images consisting of only one color channel each __global__ void separateChannels(const uchar4* const inputImageRGBA, int numRows, int numCols, unsigned char* const redChannel, unsigned char* const greenChannel, unsigned char* const blueChannel) { // TODO // // NOTE: Be careful not to try to access memory that is outside the bounds of // the image. You'll want code that performs the following check before accessing // GPU memory: // // if ( absolute_image_position_x >= numCols || // absolute_image_position_y >= numRows ) // { // return; // } int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int idx = x + y * numCols; // Important! if ( x >= numCols || y >= numRows ) { return; } uchar4 rgba = inputImageRGBA[idx]; redChannel[idx] = rgba.x ; greenChannel[idx] = rgba.y ; blueChannel[idx] = rgba.z ; } //This kernel takes in three color channels and recombines them //into one image. The alpha channel is set to 255 to represent //that this image has no transparency. __global__ void recombineChannels(const unsigned char* const redChannel, const unsigned char* const greenChannel, const unsigned char* const blueChannel, uchar4* const outputImageRGBA, int numRows, int numCols) { const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y); const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x; //make sure we don't try and access memory outside the image //by having any threads mapped there return early if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows) return; unsigned char red = redChannel[thread_1D_pos]; unsigned char green = greenChannel[thread_1D_pos]; unsigned char blue = blueChannel[thread_1D_pos]; //Alpha should be 255 for no transparency uchar4 outputPixel = make_uchar4(red, green, blue, 255); outputImageRGBA[thread_1D_pos] = outputPixel; } unsigned char *d_red, *d_green, *d_blue; float *d_filter; void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage, const float* const h_filter, const size_t filterWidth) { //allocate memory for the three different channels //original checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage)); checkCudaErrors(cudaMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage)); checkCudaErrors(cudaMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage)); //TODO: //Allocate memory for the filter on the GPU //Use the pointer d_filter that we have already declared for you //device GPU //You need to allocate memory for the filter with cudaMalloc //be sure to use checkCudaErrors like the above examples to //be able to tell if anything goes wrong //IMPORTANT: Notice that we pass a pointer to a pointer to cudaMalloc checkCudaErrors(cudaMalloc(&d_filter, sizeof(float) * filterWidth * filterWidth)); //TODO: //Copy the filter on the host (h_filter) to the memory you just allocated //on the GPU. cudaMemcpy(dst, src, numBytes, cudaMemcpyHostToDevice); // dst destination //Remember to use checkCudaErrors! // src source checkCudaErrors(cudaMemcpy(d_filter, h_filter, sizeof(float) * filterWidth * filterWidth, cudaMemcpyHostToDevice)); } void your_gaussian_blur(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA, uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols, unsigned char *d_redBlurred, unsigned char *d_greenBlurred, unsigned char *d_blueBlurred, const int filterWidth) { //TODO: Set reasonable block size (i.e., number of threads per block) const dim3 blockSize(16, 16, 1); //TODO: //Compute correct grid size (i.e., number of blocks per kernel launch) //from the image size and and block size. const dim3 gridSize( (numCols+15)/16, (numRows+15)/16, 1); //TODO: Launch a kernel for separating the RGBA image into different color channels separateChannels<<<gridSize, blockSize>>>(d_inputImageRGBA, numRows, numCols, d_red, d_green, d_blue); // Call cudaDeviceSynchronize(), then call checkCudaErrors() immediately after // launching your kernel to make sure that you didn't make any mistakes. cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); //TODO: Call your convolution kernel here 3 times, once for each color channel. gaussian_blur<<<gridSize, blockSize>>>(d_red,d_redBlurred,numRows,numCols,d_filter,filterWidth); gaussian_blur<<<gridSize, blockSize>>>(d_green,d_greenBlurred,numRows,numCols,d_filter,filterWidth); gaussian_blur<<<gridSize, blockSize>>>(d_blue,d_blueBlurred,numRows,numCols,d_filter,filterWidth); // Again, call cudaDeviceSynchronize(), then call checkCudaErrors() immediately after // launching your kernel to make sure that you didn't make any mistakes. cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); // Now we recombine your results. We take care of launching this kernel for you. // // NOTE: This kernel launch depends on the gridSize and blockSize variables, // which you must set yourself. recombineChannels<<<gridSize, blockSize>>>(d_redBlurred, d_greenBlurred, d_blueBlurred, d_outputImageRGBA, numRows, numCols); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); } //Free all the memory that we allocated //TODO: make sure you free any arrays that you allocated void cleanup() { checkCudaErrors(cudaFree(d_red)); checkCudaErrors(cudaFree(d_green)); checkCudaErrors(cudaFree(d_blue)); checkCudaErrors(cudaFree(d_filter));//TODO } // Good job! Your image matched perfectly to the reference image //Your program ran and executed in 1.627488 ms.
7b506284799f6e0847ef294718259808529dd52a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #pragma once #include <stdio.h> __global__ void cube(float * d_out, float * d_in){ int idx = threadIdx.x; float f = d_in[idx]; d_out[idx] = f * f * f ; d_out[0] = 42; } template<std::size_t NN> void test_alloc(){ float* data_; hipMalloc((void**) &data_, sizeof(float)*NN); } int main(int argc, char ** argv) { const int ARRAY_SIZE = 96; const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float); test_alloc<3>(); // generate the input array on the host float h_in[ARRAY_SIZE]; for (int i = 0; i < ARRAY_SIZE; i++) { h_in[i] = float(i); } float h_out[ARRAY_SIZE]; // declare GPU memory pointers float * d_in; float * d_out; // allocate GPU memory hipMalloc((void**) &d_in, ARRAY_BYTES); hipMalloc((void**) &d_out, ARRAY_BYTES); // transfer the array to the GPU hipMemcpy(d_in, h_in, ARRAY_BYTES, hipMemcpyHostToDevice); // launch the kernel hipLaunchKernelGGL(( cube), dim3(1), dim3(ARRAY_SIZE), 0, 0, d_out, d_in); // printf("Test print device data: \n"); // printf("%f", d_out[3]); // copy back the result array to the CPU hipMemcpy(h_out, d_out, ARRAY_BYTES, hipMemcpyDeviceToHost); // print out the resulting array for (int i =0; i < ARRAY_SIZE; i++) { printf("%f", h_out[i]); printf(((i % 4) != 3) ? "\t" : "\n"); } hipFree(d_in); hipFree(d_out); return 0; }
7b506284799f6e0847ef294718259808529dd52a.cu
#pragma once #include <stdio.h> __global__ void cube(float * d_out, float * d_in){ int idx = threadIdx.x; float f = d_in[idx]; d_out[idx] = f * f * f ; d_out[0] = 42; } template<std::size_t NN> void test_alloc(){ float* data_; cudaMalloc((void**) &data_, sizeof(float)*NN); } int main(int argc, char ** argv) { const int ARRAY_SIZE = 96; const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float); test_alloc<3>(); // generate the input array on the host float h_in[ARRAY_SIZE]; for (int i = 0; i < ARRAY_SIZE; i++) { h_in[i] = float(i); } float h_out[ARRAY_SIZE]; // declare GPU memory pointers float * d_in; float * d_out; // allocate GPU memory cudaMalloc((void**) &d_in, ARRAY_BYTES); cudaMalloc((void**) &d_out, ARRAY_BYTES); // transfer the array to the GPU cudaMemcpy(d_in, h_in, ARRAY_BYTES, cudaMemcpyHostToDevice); // launch the kernel cube<<<1, ARRAY_SIZE>>>(d_out, d_in); // printf("Test print device data: \n"); // printf("%f", d_out[3]); // copy back the result array to the CPU cudaMemcpy(h_out, d_out, ARRAY_BYTES, cudaMemcpyDeviceToHost); // print out the resulting array for (int i =0; i < ARRAY_SIZE; i++) { printf("%f", h_out[i]); printf(((i % 4) != 3) ? "\t" : "\n"); } cudaFree(d_in); cudaFree(d_out); return 0; }
26fd238871ffd413ac491dd0e7b51df36455d573.hip
// !!! This is a file automatically generated by hipify!!! #include <ATen/ATen.h> #include <ATen/ExpandUtils.h> #include <ATen/InitialTensorOptions.h> #include <ATen/NativeFunctions.h> #include <ATen/SparseCsrTensorImpl.h> #include <ATen/SparseCsrTensorUtils.h> #include <ATen/SparseTensorUtils.h> #include <ATen/WrapDimUtilsMulti.h> #include <ATen/native/BinaryOps.h> #include <ATen/native/Resize.h> #include <algorithm> #include <hip/hip_runtime.h> #include <type_traits> #include <THH/THHTensorMathPointwise.cuh> #include <THH/THHThrustAllocator.cuh> #include <ATen/hip/HIPContext.h> #include <ATen/hip/HIPUtils.h> #include <ATen/hip/impl/HIPCachingAllocatorMasqueradingAsCUDA.h> #include <ATen/native/sparse/hip/SparseHIPBlas.cuh> #include <thrust/device_ptr.h> #include <thrust/execution_policy.h> #include <thrust/for_each.h> #include <thrust/sequence.h> namespace at { namespace native { using namespace at::sparse_csr; // certain utiliy functions are usable from sparse COO. using namespace at::sparse; Tensor& add_out_dense_sparse_csr_cuda( Tensor& output, const Tensor& dense, const SparseCsrTensor& src, const Scalar& alpha) { TORCH_INTERNAL_ASSERT(dense.layout() == kStrided); TORCH_INTERNAL_ASSERT(src.is_sparse_csr()); TORCH_INTERNAL_ASSERT(dense.is_cuda()); TORCH_CHECK( output.is_contiguous(), "out argument must be contiguous, but got: ", output.suggest_memory_format()); TORCH_CHECK( output.is_cuda(), "add: expected 'out' to be CUDA tensor, but got tensor on device: ", output.device()); TORCH_CHECK( src.is_cuda(), "add: expected 'other' to be a CUDA tensor, but got tensor on device: ", src.device()); TORCH_CHECK( dense.sizes().equals(src.sizes()), "add: expected 'self' and 'other' to have same size, but self has size ", dense.sizes(), " while other has size ", src.sizes(), " (FYI: op2-sparse addition does not currently support broadcasting)"); auto commonDtype = promoteTypes(dense.scalar_type(), src.scalar_type()); TORCH_CHECK( canCast(commonDtype, output.scalar_type()), "Can't convert result type ", commonDtype, " to output ", output.scalar_type(), " in add operation"); Tensor src_values = src.values(); Tensor src_crow_indices = src.crow_indices(); Tensor src_col_indices = src.col_indices(); resize_output(output, dense.sizes()); Tensor resultBuffer = output; Tensor valuesBuffer = src_values.to(commonDtype); if (output.scalar_type() != commonDtype) { resultBuffer = dense.to(commonDtype); } else if (!is_same_tensor(output, dense)) { resultBuffer.copy_(dense); } AT_DISPATCH_ALL_TYPES( commonDtype, "add_out_op2_sparse_csr", [&valuesBuffer, &resultBuffer, &alpha, &src_crow_indices, &src_col_indices]() { AT_DISPATCH_INDEX_TYPES( src_crow_indices.scalar_type(), "csr_add_out_crow_indices", [&valuesBuffer, &resultBuffer, &alpha, &src_crow_indices, &src_col_indices]() { scalar_t* values_accessor = valuesBuffer.data_ptr<scalar_t>(); scalar_t* out_ptr = resultBuffer.data_ptr<scalar_t>(); scalar_t cast_value = alpha.to<scalar_t>(); index_t* crow_indices_accessor = src_crow_indices.data_ptr<index_t>(); index_t* col_indices_accessor = src_col_indices.data_ptr<index_t>(); int64_t out_storage_offset = resultBuffer.storage_offset(); auto out_strides = resultBuffer.strides(); int64_t out_strides0 = out_strides[0]; int64_t out_strides1 = out_strides[1]; hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA()); auto policy = thrust::hip::par(allocator).on(stream); // Note that this could be wildly imbalanced if the sparsity pattern varies a lot between rows. thrust::for_each( policy, thrust::make_counting_iterator(int64_t(0)), thrust::make_counting_iterator(int64_t(src_crow_indices.size(0) - 1)), [values_accessor, crow_indices_accessor, col_indices_accessor, out_ptr, out_storage_offset, out_strides0, cast_value, out_strides1 ]__device__(int64_t irow) { index_t start_index = crow_indices_accessor[irow]; index_t end_index = crow_indices_accessor[irow + 1]; for (index_t i = start_index; i < end_index; ++i) { auto icol = col_indices_accessor[i]; auto index = out_storage_offset + irow * out_strides0 + icol * out_strides1; out_ptr[index] += cast_value * values_accessor[i]; } }); }); }); if (output.scalar_type() != commonDtype) { output.copy_(resultBuffer); } return output; } Tensor& add_out_sparse_csr_cuda( const Tensor& self, const SparseCsrTensor& other, const Scalar& alpha, SparseCsrTensor& out) { if (self.layout() == kStrided) { return add_out_dense_sparse_csr_cuda(out, self, other, alpha); } else { TORCH_CHECK( false, "NotImplementedError: Addition of sparse CSR tensors is not yet implemented.") } return out; } } // namespace native } // namespace at
26fd238871ffd413ac491dd0e7b51df36455d573.cu
#include <ATen/ATen.h> #include <ATen/ExpandUtils.h> #include <ATen/InitialTensorOptions.h> #include <ATen/NativeFunctions.h> #include <ATen/SparseCsrTensorImpl.h> #include <ATen/SparseCsrTensorUtils.h> #include <ATen/SparseTensorUtils.h> #include <ATen/WrapDimUtilsMulti.h> #include <ATen/native/BinaryOps.h> #include <ATen/native/Resize.h> #include <algorithm> #include <cuda_runtime.h> #include <type_traits> #include <THC/THCTensorMathPointwise.cuh> #include <THC/THCThrustAllocator.cuh> #include <ATen/cuda/CUDAContext.h> #include <ATen/cuda/CUDAUtils.h> #include <c10/cuda/CUDACachingAllocator.h> #include <ATen/native/sparse/cuda/SparseCUDABlas.cuh> #include <thrust/device_ptr.h> #include <thrust/execution_policy.h> #include <thrust/for_each.h> #include <thrust/sequence.h> namespace at { namespace native { using namespace at::sparse_csr; // certain utiliy functions are usable from sparse COO. using namespace at::sparse; Tensor& add_out_dense_sparse_csr_cuda( Tensor& output, const Tensor& dense, const SparseCsrTensor& src, const Scalar& alpha) { TORCH_INTERNAL_ASSERT(dense.layout() == kStrided); TORCH_INTERNAL_ASSERT(src.is_sparse_csr()); TORCH_INTERNAL_ASSERT(dense.is_cuda()); TORCH_CHECK( output.is_contiguous(), "out argument must be contiguous, but got: ", output.suggest_memory_format()); TORCH_CHECK( output.is_cuda(), "add: expected 'out' to be CUDA tensor, but got tensor on device: ", output.device()); TORCH_CHECK( src.is_cuda(), "add: expected 'other' to be a CUDA tensor, but got tensor on device: ", src.device()); TORCH_CHECK( dense.sizes().equals(src.sizes()), "add: expected 'self' and 'other' to have same size, but self has size ", dense.sizes(), " while other has size ", src.sizes(), " (FYI: op2-sparse addition does not currently support broadcasting)"); auto commonDtype = promoteTypes(dense.scalar_type(), src.scalar_type()); TORCH_CHECK( canCast(commonDtype, output.scalar_type()), "Can't convert result type ", commonDtype, " to output ", output.scalar_type(), " in add operation"); Tensor src_values = src.values(); Tensor src_crow_indices = src.crow_indices(); Tensor src_col_indices = src.col_indices(); resize_output(output, dense.sizes()); Tensor resultBuffer = output; Tensor valuesBuffer = src_values.to(commonDtype); if (output.scalar_type() != commonDtype) { resultBuffer = dense.to(commonDtype); } else if (!is_same_tensor(output, dense)) { resultBuffer.copy_(dense); } AT_DISPATCH_ALL_TYPES( commonDtype, "add_out_op2_sparse_csr", [&valuesBuffer, &resultBuffer, &alpha, &src_crow_indices, &src_col_indices]() { AT_DISPATCH_INDEX_TYPES( src_crow_indices.scalar_type(), "csr_add_out_crow_indices", [&valuesBuffer, &resultBuffer, &alpha, &src_crow_indices, &src_col_indices]() { scalar_t* values_accessor = valuesBuffer.data_ptr<scalar_t>(); scalar_t* out_ptr = resultBuffer.data_ptr<scalar_t>(); scalar_t cast_value = alpha.to<scalar_t>(); index_t* crow_indices_accessor = src_crow_indices.data_ptr<index_t>(); index_t* col_indices_accessor = src_col_indices.data_ptr<index_t>(); int64_t out_storage_offset = resultBuffer.storage_offset(); auto out_strides = resultBuffer.strides(); int64_t out_strides0 = out_strides[0]; int64_t out_strides1 = out_strides[1]; cudaStream_t stream = at::cuda::getCurrentCUDAStream(); auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA()); auto policy = thrust::cuda::par(allocator).on(stream); // Note that this could be wildly imbalanced if the sparsity pattern varies a lot between rows. thrust::for_each( policy, thrust::make_counting_iterator(int64_t(0)), thrust::make_counting_iterator(int64_t(src_crow_indices.size(0) - 1)), [values_accessor, crow_indices_accessor, col_indices_accessor, out_ptr, out_storage_offset, out_strides0, cast_value, out_strides1 ]__device__(int64_t irow) { index_t start_index = crow_indices_accessor[irow]; index_t end_index = crow_indices_accessor[irow + 1]; for (index_t i = start_index; i < end_index; ++i) { auto icol = col_indices_accessor[i]; auto index = out_storage_offset + irow * out_strides0 + icol * out_strides1; out_ptr[index] += cast_value * values_accessor[i]; } }); }); }); if (output.scalar_type() != commonDtype) { output.copy_(resultBuffer); } return output; } Tensor& add_out_sparse_csr_cuda( const Tensor& self, const SparseCsrTensor& other, const Scalar& alpha, SparseCsrTensor& out) { if (self.layout() == kStrided) { return add_out_dense_sparse_csr_cuda(out, self, other, alpha); } else { TORCH_CHECK( false, "NotImplementedError: Addition of sparse CSR tensors is not yet implemented.") } return out; } } // namespace native } // namespace at
8b6cf3aa077eba11e2554729d2630473d7bfa39d.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include "common.h" #include "naive.h" #include <iostream> #define blockSize 256 dim3 threadsPerBlock(blockSize); int *dev_data1; int *dev_data2; namespace StreamCompaction { namespace Naive { using StreamCompaction::Common::PerformanceTimer; PerformanceTimer& timer() { static PerformanceTimer timer; return timer; } __global__ void kernScan(int n, int *odata, int *idata, int d, int powd_min1){ int index = (blockDim.x * blockIdx.x) + threadIdx.x; int offset = powd_min1; // if d == 1, shift array over to accomodate first 0 int outIndex = d == 1 ? index + 1 : index; // if outIndex >= n, return (skips last elem on first run) if (outIndex >= n) { return; } // if first elem, should be 0 if (index == 0 && d == 1) { odata[index] = 0; } if (index >= offset){ odata[outIndex] = idata[index - offset] + idata[index]; } else{ odata[outIndex] = idata[index]; } } /** * Performs prefix-sum (aka scan) on idata, storing the result into odata. */ void scan(int n, int *odata, const int *idata) { // allocate memory dim3 fullBlocksPerGrid((n + blockSize - 1) / blockSize); hipMalloc((void**)&dev_data1, n * sizeof(int)); hipMalloc((void**)&dev_data2, n * sizeof(int)); hipMemcpy(dev_data1, idata, n * sizeof(int), hipMemcpyHostToDevice); timer().startGpuTimer(); // for log iterations, perform scan for (int d = 1; d < ilog2ceil(n) + 1; d++){ int powd_min1 = pow(2, d - 1); kernScan << <fullBlocksPerGrid, n >> > (n, dev_data2, dev_data1, d, powd_min1); // ping-pong int *tmp = dev_data1; dev_data1 = dev_data2; dev_data2 = tmp; } timer().endGpuTimer(); // copy to odata to return hipMemcpy(odata, dev_data1, n * sizeof(int), hipMemcpyDeviceToHost); hipFree(dev_data1); hipFree(dev_data2); } } }
8b6cf3aa077eba11e2554729d2630473d7bfa39d.cu
#include <cuda.h> #include <cuda_runtime.h> #include "common.h" #include "naive.h" #include <iostream> #define blockSize 256 dim3 threadsPerBlock(blockSize); int *dev_data1; int *dev_data2; namespace StreamCompaction { namespace Naive { using StreamCompaction::Common::PerformanceTimer; PerformanceTimer& timer() { static PerformanceTimer timer; return timer; } __global__ void kernScan(int n, int *odata, int *idata, int d, int powd_min1){ int index = (blockDim.x * blockIdx.x) + threadIdx.x; int offset = powd_min1; // if d == 1, shift array over to accomodate first 0 int outIndex = d == 1 ? index + 1 : index; // if outIndex >= n, return (skips last elem on first run) if (outIndex >= n) { return; } // if first elem, should be 0 if (index == 0 && d == 1) { odata[index] = 0; } if (index >= offset){ odata[outIndex] = idata[index - offset] + idata[index]; } else{ odata[outIndex] = idata[index]; } } /** * Performs prefix-sum (aka scan) on idata, storing the result into odata. */ void scan(int n, int *odata, const int *idata) { // allocate memory dim3 fullBlocksPerGrid((n + blockSize - 1) / blockSize); cudaMalloc((void**)&dev_data1, n * sizeof(int)); cudaMalloc((void**)&dev_data2, n * sizeof(int)); cudaMemcpy(dev_data1, idata, n * sizeof(int), cudaMemcpyHostToDevice); timer().startGpuTimer(); // for log iterations, perform scan for (int d = 1; d < ilog2ceil(n) + 1; d++){ int powd_min1 = pow(2, d - 1); kernScan << <fullBlocksPerGrid, n >> > (n, dev_data2, dev_data1, d, powd_min1); // ping-pong int *tmp = dev_data1; dev_data1 = dev_data2; dev_data2 = tmp; } timer().endGpuTimer(); // copy to odata to return cudaMemcpy(odata, dev_data1, n * sizeof(int), cudaMemcpyDeviceToHost); cudaFree(dev_data1); cudaFree(dev_data2); } } }
7c8fbbd4a6315dae6497f29676c7e03a8673b3f3.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <hip/hip_runtime.h> #include "util.hpp" // CUDA kernel implementing axpy // y = y + alpha*x __global__ void axpy(int n, double alpha, const double* x, double* y) { int i = threadIdx.x + blockDim.x*blockIdx.x; if (i<n) { y[i] += alpha*x[i]; } } int main(int argc, char** argv) { size_t pow = read_arg(argc, argv, 1, 16); size_t n = 1 << pow; auto size_in_bytes = n * sizeof(double); hipInit(0); std::cout << "memcopy and daxpy test of size " << n << "\n"; double* x_device = malloc_device<double>(n); double* y_device = malloc_device<double>(n); double* x_host = malloc_host<double>(n, 1.5); double* y_host = malloc_host<double>(n, 3.0); double* y = malloc_host<double>(n, 0.0); // copy to device auto start = get_time(); copy_to_device<double>(x_host, x_device, n); copy_to_device<double>(y_host, y_device, n); auto time_H2D = get_time() - start; // calculate grid dimensions int block_dim = 128; int num_blocks = (n+block_dim-1)/block_dim; // synchronize the host and device so that the timings are accurate hipDeviceSynchronize(); start = get_time(); // launch kernel hipLaunchKernelGGL(( axpy), dim3(num_blocks), dim3(block_dim), 0, 0, n, 2.0, x_device, y_device); hipDeviceSynchronize(); auto time_axpy = get_time() - start; // check for error in last kernel call cuda_check_last_kernel("axpy kernel"); // copy result back to host start = get_time(); copy_to_host<double>(y_device, y, n); auto time_D2H = get_time() - start; std::cout << "-------\ntimings\n-------\n"; std::cout << "H2D : " << time_H2D << " s\n"; std::cout << "D2H : " << time_D2H << " s\n"; std::cout << "axpy : " << time_axpy << " s\n"; std::cout << std::endl; std::cout << "total: " << time_axpy+time_H2D+time_D2H << " s\n"; std::cout << std::endl; std::cout << "-------\nbandwidth\n-------\n"; auto H2D_BW = size_in_bytes/1e6*2 / time_H2D; auto D2H_BW = size_in_bytes/1e6 / time_D2H; std::cout << "H2D BW : " << H2D_BW << " MB/s\n"; std::cout << "D2H BW : " << D2H_BW << " MB/s\n"; // check for errors auto errors = 0; for(auto i=0; i<n; ++i) { if(::fabs(6.-y[i])>1e-15) { ++errors; } } std::cout << (errors>0 ? "failed" : "passed") << " with " << errors << " errors\n"; hipFree(x_device); hipFree(y_device); free(x_host); free(y_host); free(y); return 0; }
7c8fbbd4a6315dae6497f29676c7e03a8673b3f3.cu
#include <iostream> #include <cuda.h> #include "util.hpp" // CUDA kernel implementing axpy // y = y + alpha*x __global__ void axpy(int n, double alpha, const double* x, double* y) { int i = threadIdx.x + blockDim.x*blockIdx.x; if (i<n) { y[i] += alpha*x[i]; } } int main(int argc, char** argv) { size_t pow = read_arg(argc, argv, 1, 16); size_t n = 1 << pow; auto size_in_bytes = n * sizeof(double); cuInit(0); std::cout << "memcopy and daxpy test of size " << n << "\n"; double* x_device = malloc_device<double>(n); double* y_device = malloc_device<double>(n); double* x_host = malloc_host<double>(n, 1.5); double* y_host = malloc_host<double>(n, 3.0); double* y = malloc_host<double>(n, 0.0); // copy to device auto start = get_time(); copy_to_device<double>(x_host, x_device, n); copy_to_device<double>(y_host, y_device, n); auto time_H2D = get_time() - start; // calculate grid dimensions int block_dim = 128; int num_blocks = (n+block_dim-1)/block_dim; // synchronize the host and device so that the timings are accurate cudaDeviceSynchronize(); start = get_time(); // launch kernel axpy<<<num_blocks, block_dim>>>(n, 2.0, x_device, y_device); cudaDeviceSynchronize(); auto time_axpy = get_time() - start; // check for error in last kernel call cuda_check_last_kernel("axpy kernel"); // copy result back to host start = get_time(); copy_to_host<double>(y_device, y, n); auto time_D2H = get_time() - start; std::cout << "-------\ntimings\n-------\n"; std::cout << "H2D : " << time_H2D << " s\n"; std::cout << "D2H : " << time_D2H << " s\n"; std::cout << "axpy : " << time_axpy << " s\n"; std::cout << std::endl; std::cout << "total: " << time_axpy+time_H2D+time_D2H << " s\n"; std::cout << std::endl; std::cout << "-------\nbandwidth\n-------\n"; auto H2D_BW = size_in_bytes/1e6*2 / time_H2D; auto D2H_BW = size_in_bytes/1e6 / time_D2H; std::cout << "H2D BW : " << H2D_BW << " MB/s\n"; std::cout << "D2H BW : " << D2H_BW << " MB/s\n"; // check for errors auto errors = 0; for(auto i=0; i<n; ++i) { if(std::fabs(6.-y[i])>1e-15) { ++errors; } } std::cout << (errors>0 ? "failed" : "passed") << " with " << errors << " errors\n"; cudaFree(x_device); cudaFree(y_device); free(x_host); free(y_host); free(y); return 0; }
1fa329b976eddf6ba45cd7064c6c8803bcb8a1cb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" typedef struct { int ** p; int size; } matrix; int ** Alloc(int size) { return allocMatrix(size); } __global__ void mul_gpu(int *A, int *B, int *C, int size) { // A, B jsou vstupni matice // C je vystupni matice // size je dim A int g = blockIdx.x*1024 + threadIdx.x; //int block = blockIdx.x; // int thread = threadIdx.x; int x = g/size; int y = g % size; // printf("Hello %dx%d\n",block, thread); int tmp = 0; for (int i = 0; i < size; i++) { // tmp += A[block*size + i] * B[i*size + thread]; tmp += A[x*size + i] * B[i*size + y]; } // vystup C[x*size + y] = tmp; // synchronizace pred prepnutim -- jinak dava spatny vysledek? __syncthreads(); } __global__ void add_gpu(int *A, int *B, int *C, int size) { // A, B jsou vstupni matice // C je vystupni matice // size je dim A int g = blockIdx.x*1024 + threadIdx.x; // vystup if ( g < size ) { C[g] = A[g] + B[g]; } __syncthreads(); } __global__ void sub_gpu(int *A, int *B, int *C, int size) { // A, B jsou vstupni matice // C je vystupni matice // size je dim A int g = blockIdx.x*1024 + threadIdx.x; // vystup if ( g < size ) { C[g] = A[g] - B[g]; } __syncthreads(); } matrix multM(matrix a, matrix b) { matrix c; c.p = Alloc(a.size); c.size = a.size; for (int i = 0; i < a.size; i++) { for (int j = 0; j < a.size; j++) { int result = 0; for (int k = 0; k < a.size; k++) { result += a.p[i][k] * b.p[k][j]; } c.p[i][j] = result; } } return c; } matrix subM(matrix a, matrix b) { matrix c; c.p = Alloc(a.size); c.size = a.size; for (int i = 0 ; i < a.size ; i++) { for (int j = 0 ; j < a.size ; j++) { c.p[i][j] = a.p[i][j] - b.p[i][j]; } } return c; } matrix addM(matrix a, matrix b) { matrix c; c.p = Alloc(a.size); c.size = a.size; for (int i = 0 ; i < a.size ; i++) { for (int j = 0 ; j < a.size ; j++) { c.p[i][j] = a.p[i][j] + b.p[i][j]; } } return c; } matrix getPart(int f1, int f2, matrix x) { matrix c; c.p = Alloc(x.size/2); c.size = x.size/2; int xstart = f1 * c.size ; int ystart = f2 * c.size ; for (int i = 0 ; i < c.size ; i++) { for (int j = 0 ; j < c.size ; j++) { c.p[i][j] = x.p[i + xstart][j + ystart]; } } return c; } void setPart(int f1, int f2, matrix *target, matrix source) { int xstart = f1 * source.size ; int ystart = f2 * source.size ; for (int i = 0 ; i < source.size ; i++) { for (int j = 0 ; j < source.size ; j++) { target->p[i + xstart][j + ystart] = source.p[i][j]; } } } void cleanM(matrix x) { for (int i=0; i<x.size; i++) { delete[] (x.p[i]); } delete[](x.p); } matrix s_alg(matrix a, matrix b) { // mereni clock_t start, end; clock_t start_gpu, end_gpu; start = clock(); int size = a.size/2; // nastaveni spusteni int gx = ((size*size)/1024 + 1); int bx = 1024; dim3 grid(gx, 1, 1); dim3 block(bx, 1, 1); // pocatecni rozdeleni matrix a11 = getPart(0, 0, a); matrix a12 = getPart(0, 1, a); matrix a21 = getPart(1, 0, a); matrix a22 = getPart(1, 1, a); matrix b11 = getPart(0, 0, b); matrix b12 = getPart(0, 1, b); matrix b21 = getPart(1, 0, b); matrix b22 = getPart(1, 1, b); int *cuda_a11, *cuda_a12, *cuda_a21, *cuda_a22, *cuda_b11, *cuda_b12, *cuda_b21, *cuda_b22; hipMalloc((void**)&cuda_a11, sizeof(int)*size*size); hipMalloc((void**)&cuda_a12, sizeof(int)*size*size); hipMalloc((void**)&cuda_a21, sizeof(int)*size*size); hipMalloc((void**)&cuda_a22, sizeof(int)*size*size); hipMalloc((void**)&cuda_b11, sizeof(int)*size*size); hipMalloc((void**)&cuda_b12, sizeof(int)*size*size); hipMalloc((void**)&cuda_b21, sizeof(int)*size*size); hipMalloc((void**)&cuda_b22, sizeof(int)*size*size); // dostanu 2*4 matice do GPU pameti for (int i = 0; i < size; i++) { hipMemcpy(&cuda_a11[i*size], a11.p[i], sizeof(int)*size, hipMemcpyHostToDevice); hipMemcpy(&cuda_a12[i*size], a12.p[i], sizeof(int)*size, hipMemcpyHostToDevice); hipMemcpy(&cuda_a21[i*size], a21.p[i], sizeof(int)*size, hipMemcpyHostToDevice); hipMemcpy(&cuda_a22[i*size], a22.p[i], sizeof(int)*size, hipMemcpyHostToDevice); hipMemcpy(&cuda_b11[i*size], b11.p[i], sizeof(int)*size, hipMemcpyHostToDevice); hipMemcpy(&cuda_b12[i*size], b12.p[i], sizeof(int)*size, hipMemcpyHostToDevice); hipMemcpy(&cuda_b21[i*size], b21.p[i], sizeof(int)*size, hipMemcpyHostToDevice); hipMemcpy(&cuda_b22[i*size], b22.p[i], sizeof(int)*size, hipMemcpyHostToDevice); } // toto uz nepotrebuji na CPU cleanM(a11); cleanM(a12); cleanM(a21); cleanM(a22); cleanM(b11); cleanM(b12); cleanM(b21); cleanM(b22); // inicializace int *cuda_t1, *cuda_t2, *cuda_m1, *cuda_t3, *cuda_m2, *cuda_t4, *cuda_m3, *cuda_t5, *cuda_m4, *cuda_t6, *cuda_m5, *cuda_t7, *cuda_t8, *cuda_m6, *cuda_t9, *cuda_t10, *cuda_m7; // a alokace pameti pro pomocne matice hipMalloc((void**)&cuda_t1, sizeof(int)*size*size); hipMalloc((void**)&cuda_t2, sizeof(int)*size*size); hipMalloc((void**)&cuda_m1, sizeof(int)*size*size); hipMalloc((void**)&cuda_t3, sizeof(int)*size*size); hipMalloc((void**)&cuda_m2, sizeof(int)*size*size); hipMalloc((void**)&cuda_t4, sizeof(int)*size*size); hipMalloc((void**)&cuda_m3, sizeof(int)*size*size); hipMalloc((void**)&cuda_t5, sizeof(int)*size*size); hipMalloc((void**)&cuda_m4, sizeof(int)*size*size); hipMalloc((void**)&cuda_t6, sizeof(int)*size*size); hipMalloc((void**)&cuda_m5, sizeof(int)*size*size); hipMalloc((void**)&cuda_t7, sizeof(int)*size*size); hipMalloc((void**)&cuda_t8, sizeof(int)*size*size); hipMalloc((void**)&cuda_m6, sizeof(int)*size*size); hipMalloc((void**)&cuda_t9, sizeof(int)*size*size); hipMalloc((void**)&cuda_t10, sizeof(int)*size*size); hipMalloc((void**)&cuda_m7, sizeof(int)*size*size); start_gpu = clock(); hipLaunchKernelGGL(( add_gpu), dim3(grid), dim3(block) , 0, 0, cuda_a11, cuda_a22, cuda_t1, size); hipDeviceSynchronize(); hipLaunchKernelGGL(( add_gpu), dim3(grid), dim3(block) , 0, 0, cuda_b11, cuda_b22, cuda_t2, size); hipDeviceSynchronize(); hipLaunchKernelGGL(( add_gpu), dim3(grid), dim3(block) , 0, 0, cuda_a21, cuda_a22, cuda_t3, size); hipDeviceSynchronize(); hipLaunchKernelGGL(( sub_gpu), dim3(grid), dim3(block) , 0, 0, cuda_b12, cuda_b22, cuda_t4, size); hipDeviceSynchronize(); hipLaunchKernelGGL(( sub_gpu), dim3(grid), dim3(block) , 0, 0, cuda_b21, cuda_b11, cuda_t5, size); hipDeviceSynchronize(); hipLaunchKernelGGL(( add_gpu), dim3(grid), dim3(block) , 0, 0, cuda_a11, cuda_a12, cuda_t6, size); hipDeviceSynchronize(); hipLaunchKernelGGL(( sub_gpu), dim3(grid), dim3(block) , 0, 0, cuda_a21, cuda_a11, cuda_t7, size); hipDeviceSynchronize(); hipLaunchKernelGGL(( add_gpu), dim3(grid), dim3(block) , 0, 0, cuda_b11, cuda_b12, cuda_t8, size); hipDeviceSynchronize(); hipLaunchKernelGGL(( sub_gpu), dim3(grid), dim3(block) , 0, 0, cuda_a12, cuda_a22, cuda_t9, size); hipDeviceSynchronize(); hipLaunchKernelGGL(( add_gpu), dim3(grid), dim3(block) , 0, 0, cuda_b21, cuda_b22, cuda_t10, size); hipDeviceSynchronize(); // matrix t1 = addM(a11, a22); // matrix t2 = addM(b11, b22); // matrix t3 = addM(a21, a22); // matrix t4 = subM(b12, b22); // matrix t5 = subM(b21, b11); // matrix t6 = addM(a11, a12); // matrix t7 = subM(a21, a11); // matrix t8 = addM(b11, b12); // matrix t9 = subM(a12, a22); // matrix t10 = addM(b21, b22); //cout << "po alokaci" << endl << flush; // for (int i = 0; i < size; i++) { // hipMemcpy(&cuda_t1[i*size], t1.p[i], sizeof(int)*size, hipMemcpyHostToDevice); // hipMemcpy(&cuda_t2[i*size], t2.p[i], sizeof(int)*size, hipMemcpyHostToDevice); // hipMemcpy(&cuda_t3[i*size], t3.p[i], sizeof(int)*size, hipMemcpyHostToDevice); // hipMemcpy(&cuda_b11[i*size], b11.p[i], sizeof(int)*size, hipMemcpyHostToDevice); // hipMemcpy(&cuda_a11[i*size], a11.p[i], sizeof(int)*size, hipMemcpyHostToDevice); // hipMemcpy(&cuda_t4[i*size], t4.p[i], sizeof(int)*size, hipMemcpyHostToDevice); // hipMemcpy(&cuda_a22[i*size], a22.p[i], sizeof(int)*size, hipMemcpyHostToDevice); // hipMemcpy(&cuda_t5[i*size], t5.p[i], sizeof(int)*size, hipMemcpyHostToDevice); // hipMemcpy(&cuda_t6[i*size], t6.p[i], sizeof(int)*size, hipMemcpyHostToDevice); // hipMemcpy(&cuda_b22[i*size], b22.p[i], sizeof(int)*size, hipMemcpyHostToDevice); // hipMemcpy(&cuda_t7[i*size], t7.p[i], sizeof(int)*size, hipMemcpyHostToDevice); // hipMemcpy(&cuda_t8[i*size], t8.p[i], sizeof(int)*size, hipMemcpyHostToDevice); // hipMemcpy(&cuda_t9[i*size], t9.p[i], sizeof(int)*size, hipMemcpyHostToDevice); // hipMemcpy(&cuda_t10[i*size], t10.p[i], sizeof(int)*size, hipMemcpyHostToDevice); // } //cout << "po memcpy" << endl << flush; // matrix m1 = s_alg(t1, t2); // matrix m2 = s_alg(t3, b11); // matrix m3 = s_alg(a11, t4); // matrix m4 = s_alg(a22, t5); // matrix m5 = s_alg(t6, b22); // matrix m6 = s_alg(t7, t8); // matrix m7 = s_alg(t9, t10); hipLaunchKernelGGL(( mul_gpu), dim3(grid), dim3(block) , 0, 0, cuda_t1, cuda_t2, cuda_m1, size); hipDeviceSynchronize(); hipLaunchKernelGGL(( mul_gpu), dim3(grid), dim3(block) , 0, 0, cuda_t3, cuda_b11, cuda_m2, size); hipDeviceSynchronize(); hipLaunchKernelGGL(( mul_gpu), dim3(grid), dim3(block) , 0, 0, cuda_a11, cuda_t4, cuda_m3, size); hipDeviceSynchronize(); hipLaunchKernelGGL(( mul_gpu), dim3(grid), dim3(block) , 0, 0, cuda_a22, cuda_t5, cuda_m4, size); hipDeviceSynchronize(); hipLaunchKernelGGL(( mul_gpu), dim3(grid), dim3(block) , 0, 0, cuda_t6, cuda_b22, cuda_m5, size); hipDeviceSynchronize(); hipLaunchKernelGGL(( mul_gpu), dim3(grid), dim3(block) , 0, 0, cuda_t7, cuda_t8, cuda_m6, size); hipDeviceSynchronize(); hipLaunchKernelGGL(( mul_gpu), dim3(grid), dim3(block) , 0, 0, cuda_t9, cuda_t10, cuda_m7, size); hipDeviceSynchronize(); // matrix m1, m2, m3, m4, m5, m6, m7; // m1.p = Alloc(size); // m1.size = size; // m2.p = Alloc(size); // m2.size = size; // m3.p = Alloc(size); // m3.size = size; // m4.p = Alloc(size); // m4.size = size; // m5.p = Alloc(size); // m5.size = size; // m6.p = Alloc(size); // m6.size = size; // m7.p = Alloc(size); // m7.size = size; // for (int i = 0; i < size; i++) { // // cout << "pruchod: " << i << flush << endl; // hipMemcpy(m1.p[i], &cuda_m1[i*size], sizeof(int)*size, hipMemcpyDeviceToHost); // hipMemcpy(m2.p[i], &cuda_m2[i*size], sizeof(int)*size, hipMemcpyDeviceToHost); // hipMemcpy(m3.p[i], &cuda_m3[i*size], sizeof(int)*size, hipMemcpyDeviceToHost); // hipMemcpy(m4.p[i], &cuda_m4[i*size], sizeof(int)*size, hipMemcpyDeviceToHost); // hipMemcpy(m5.p[i], &cuda_m5[i*size], sizeof(int)*size, hipMemcpyDeviceToHost); // hipMemcpy(m6.p[i], &cuda_m6[i*size], sizeof(int)*size, hipMemcpyDeviceToHost); // hipMemcpy(m7.p[i], &cuda_m7[i*size], sizeof(int)*size, hipMemcpyDeviceToHost); // } // hipDeviceSynchronize(); // printMatrix(m1.p, m1.size); // printMatrix(m2.p, m2.size); // printMatrix(m3.p, m3.size); // printMatrix(m4.p, m4.size); // printMatrix(m5.p, m5.size); // printMatrix(m6.p, m6.size); // printMatrix(m7.p, m7.size); // ****************************** // pokracuji normalne // cleanM(t1); // cleanM(t2); // cleanM(t3); // cleanM(t4); // cleanM(t5); // cleanM(t6); // cleanM(t7); // cleanM(t8); // cleanM(t9); // cleanM(t10); hipFree(cuda_t1); hipFree(cuda_t2); hipFree(cuda_t3); hipFree(cuda_b11); hipFree(cuda_a11); hipFree(cuda_t4); hipFree(cuda_a22); hipFree(cuda_t5); hipFree(cuda_t6); hipFree(cuda_b22); hipFree(cuda_t7); hipFree(cuda_t8); hipFree(cuda_t9); hipFree(cuda_t10); matrix c; c.p = Alloc(a.size); c.size = a.size; int *cuda_rx1,*cuda_rx2, *cuda_rx3, *cuda_r2, *cuda_r3, *cuda_ry1, *cuda_ry2, *cuda_ry3; hipMalloc((void**)&cuda_rx1, sizeof(int)*size*size); hipMalloc((void**)&cuda_rx2, sizeof(int)*size*size); hipMalloc((void**)&cuda_rx3, sizeof(int)*size*size); hipMalloc((void**)&cuda_r2, sizeof(int)*size*size); hipMalloc((void**)&cuda_r3, sizeof(int)*size*size); hipMalloc((void**)&cuda_ry1, sizeof(int)*size*size); hipMalloc((void**)&cuda_ry2, sizeof(int)*size*size); hipMalloc((void**)&cuda_ry3, sizeof(int)*size*size); hipLaunchKernelGGL(( add_gpu), dim3(grid), dim3(block) , 0, 0, cuda_m1, cuda_m4, cuda_rx1, size); hipDeviceSynchronize(); hipLaunchKernelGGL(( add_gpu), dim3(grid), dim3(block) , 0, 0, cuda_rx1, cuda_m7, cuda_rx2, size); hipDeviceSynchronize(); hipLaunchKernelGGL(( sub_gpu), dim3(grid), dim3(block) , 0, 0, cuda_rx2, cuda_m5, cuda_rx3, size); hipDeviceSynchronize(); hipLaunchKernelGGL(( add_gpu), dim3(grid), dim3(block) , 0, 0, cuda_m3, cuda_m5, cuda_r2, size); hipDeviceSynchronize(); hipLaunchKernelGGL(( add_gpu), dim3(grid), dim3(block) , 0, 0, cuda_m2, cuda_m4, cuda_r3, size); hipDeviceSynchronize(); hipLaunchKernelGGL(( sub_gpu), dim3(grid), dim3(block) , 0, 0, cuda_m1, cuda_m2, cuda_ry1, size); hipDeviceSynchronize(); hipLaunchKernelGGL(( add_gpu), dim3(grid), dim3(block) , 0, 0, cuda_ry1, cuda_m3, cuda_ry2, size); hipDeviceSynchronize(); hipLaunchKernelGGL(( add_gpu), dim3(grid), dim3(block) , 0, 0, cuda_ry2, cuda_m6, cuda_ry3, size); hipDeviceSynchronize(); // matrix rx1 = addM(m1, m4); // matrix rx2 = addM(rx1, m7); // matrix rx3 = subM(rx2, m5); // // matrix r2 = addM(m3, m5); // matrix r3 = addM(m2, m4); // // matrix ry1 = subM(m1, m2); // matrix ry2 = addM(ry1, m3); // matrix ry3 = addM(ry2, m6); end_gpu = clock(); hipFree(cuda_m1); hipFree(cuda_m2); hipFree(cuda_m3); hipFree(cuda_m4); hipFree(cuda_m5); hipFree(cuda_m6); hipFree(cuda_m7); matrix rx3, r2, r3, ry3; rx3.p = Alloc(size); rx3.size = size; r2.p = Alloc(size); r2.size = size; r3.p = Alloc(size); r3.size = size; ry3.p = Alloc(size); ry3.size = size; for (int i = 0; i < size; i++) { hipMemcpy(rx3.p[i], &cuda_rx3[i*size], sizeof(int)*size, hipMemcpyDeviceToHost); hipMemcpy(r2.p[i], &cuda_r2[i*size], sizeof(int)*size, hipMemcpyDeviceToHost); hipMemcpy(r3.p[i], &cuda_r3[i*size], sizeof(int)*size, hipMemcpyDeviceToHost); hipMemcpy(ry3.p[i], &cuda_ry3[i*size], sizeof(int)*size, hipMemcpyDeviceToHost); } setPart(0, 0, &c, rx3); setPart(0, 1, &c, r2); setPart(1, 0, &c, r3); setPart(1, 1, &c, ry3); // cleanM(m1); // cleanM(m2); // cleanM(m3); // cleanM(m4); // cleanM(m5); // cleanM(m6); // cleanM(m7); // cleanM(rx1); // cleanM(rx2); cleanM(rx3); cleanM(r2); cleanM(r3); // cleanM(ry1); // cleanM(ry2); cleanM(ry3); // vypis mereni end = clock(); cout << "Running for " << (double)(end-start)/CLOCKS_PER_SEC << endl << flush; cout << "GPU running for " << (double)(end_gpu-start_gpu)/CLOCKS_PER_SEC << endl << flush; return c; } // strassen algorithm int ** strassen(int size, int ** A, int ** B) { matrix a; a.p = A; a.size = size; matrix b; b.p = B; b.size = size; matrix c = s_alg(a, b); return c.p; }
1fa329b976eddf6ba45cd7064c6c8803bcb8a1cb.cu
typedef struct { int ** p; int size; } matrix; int ** Alloc(int size) { return allocMatrix(size); } __global__ void mul_gpu(int *A, int *B, int *C, int size) { // A, B jsou vstupni matice // C je vystupni matice // size je dim A int g = blockIdx.x*1024 + threadIdx.x; //int block = blockIdx.x; // int thread = threadIdx.x; int x = g/size; int y = g % size; // printf("Hello %dx%d\n",block, thread); int tmp = 0; for (int i = 0; i < size; i++) { // tmp += A[block*size + i] * B[i*size + thread]; tmp += A[x*size + i] * B[i*size + y]; } // vystup C[x*size + y] = tmp; // synchronizace pred prepnutim -- jinak dava spatny vysledek? __syncthreads(); } __global__ void add_gpu(int *A, int *B, int *C, int size) { // A, B jsou vstupni matice // C je vystupni matice // size je dim A int g = blockIdx.x*1024 + threadIdx.x; // vystup if ( g < size ) { C[g] = A[g] + B[g]; } __syncthreads(); } __global__ void sub_gpu(int *A, int *B, int *C, int size) { // A, B jsou vstupni matice // C je vystupni matice // size je dim A int g = blockIdx.x*1024 + threadIdx.x; // vystup if ( g < size ) { C[g] = A[g] - B[g]; } __syncthreads(); } matrix multM(matrix a, matrix b) { matrix c; c.p = Alloc(a.size); c.size = a.size; for (int i = 0; i < a.size; i++) { for (int j = 0; j < a.size; j++) { int result = 0; for (int k = 0; k < a.size; k++) { result += a.p[i][k] * b.p[k][j]; } c.p[i][j] = result; } } return c; } matrix subM(matrix a, matrix b) { matrix c; c.p = Alloc(a.size); c.size = a.size; for (int i = 0 ; i < a.size ; i++) { for (int j = 0 ; j < a.size ; j++) { c.p[i][j] = a.p[i][j] - b.p[i][j]; } } return c; } matrix addM(matrix a, matrix b) { matrix c; c.p = Alloc(a.size); c.size = a.size; for (int i = 0 ; i < a.size ; i++) { for (int j = 0 ; j < a.size ; j++) { c.p[i][j] = a.p[i][j] + b.p[i][j]; } } return c; } matrix getPart(int f1, int f2, matrix x) { matrix c; c.p = Alloc(x.size/2); c.size = x.size/2; int xstart = f1 * c.size ; int ystart = f2 * c.size ; for (int i = 0 ; i < c.size ; i++) { for (int j = 0 ; j < c.size ; j++) { c.p[i][j] = x.p[i + xstart][j + ystart]; } } return c; } void setPart(int f1, int f2, matrix *target, matrix source) { int xstart = f1 * source.size ; int ystart = f2 * source.size ; for (int i = 0 ; i < source.size ; i++) { for (int j = 0 ; j < source.size ; j++) { target->p[i + xstart][j + ystart] = source.p[i][j]; } } } void cleanM(matrix x) { for (int i=0; i<x.size; i++) { delete[] (x.p[i]); } delete[](x.p); } matrix s_alg(matrix a, matrix b) { // mereni clock_t start, end; clock_t start_gpu, end_gpu; start = clock(); int size = a.size/2; // nastaveni spusteni int gx = ((size*size)/1024 + 1); int bx = 1024; dim3 grid(gx, 1, 1); dim3 block(bx, 1, 1); // pocatecni rozdeleni matrix a11 = getPart(0, 0, a); matrix a12 = getPart(0, 1, a); matrix a21 = getPart(1, 0, a); matrix a22 = getPart(1, 1, a); matrix b11 = getPart(0, 0, b); matrix b12 = getPart(0, 1, b); matrix b21 = getPart(1, 0, b); matrix b22 = getPart(1, 1, b); int *cuda_a11, *cuda_a12, *cuda_a21, *cuda_a22, *cuda_b11, *cuda_b12, *cuda_b21, *cuda_b22; cudaMalloc((void**)&cuda_a11, sizeof(int)*size*size); cudaMalloc((void**)&cuda_a12, sizeof(int)*size*size); cudaMalloc((void**)&cuda_a21, sizeof(int)*size*size); cudaMalloc((void**)&cuda_a22, sizeof(int)*size*size); cudaMalloc((void**)&cuda_b11, sizeof(int)*size*size); cudaMalloc((void**)&cuda_b12, sizeof(int)*size*size); cudaMalloc((void**)&cuda_b21, sizeof(int)*size*size); cudaMalloc((void**)&cuda_b22, sizeof(int)*size*size); // dostanu 2*4 matice do GPU pameti for (int i = 0; i < size; i++) { cudaMemcpy(&cuda_a11[i*size], a11.p[i], sizeof(int)*size, cudaMemcpyHostToDevice); cudaMemcpy(&cuda_a12[i*size], a12.p[i], sizeof(int)*size, cudaMemcpyHostToDevice); cudaMemcpy(&cuda_a21[i*size], a21.p[i], sizeof(int)*size, cudaMemcpyHostToDevice); cudaMemcpy(&cuda_a22[i*size], a22.p[i], sizeof(int)*size, cudaMemcpyHostToDevice); cudaMemcpy(&cuda_b11[i*size], b11.p[i], sizeof(int)*size, cudaMemcpyHostToDevice); cudaMemcpy(&cuda_b12[i*size], b12.p[i], sizeof(int)*size, cudaMemcpyHostToDevice); cudaMemcpy(&cuda_b21[i*size], b21.p[i], sizeof(int)*size, cudaMemcpyHostToDevice); cudaMemcpy(&cuda_b22[i*size], b22.p[i], sizeof(int)*size, cudaMemcpyHostToDevice); } // toto uz nepotrebuji na CPU cleanM(a11); cleanM(a12); cleanM(a21); cleanM(a22); cleanM(b11); cleanM(b12); cleanM(b21); cleanM(b22); // inicializace int *cuda_t1, *cuda_t2, *cuda_m1, *cuda_t3, *cuda_m2, *cuda_t4, *cuda_m3, *cuda_t5, *cuda_m4, *cuda_t6, *cuda_m5, *cuda_t7, *cuda_t8, *cuda_m6, *cuda_t9, *cuda_t10, *cuda_m7; // a alokace pameti pro pomocne matice cudaMalloc((void**)&cuda_t1, sizeof(int)*size*size); cudaMalloc((void**)&cuda_t2, sizeof(int)*size*size); cudaMalloc((void**)&cuda_m1, sizeof(int)*size*size); cudaMalloc((void**)&cuda_t3, sizeof(int)*size*size); cudaMalloc((void**)&cuda_m2, sizeof(int)*size*size); cudaMalloc((void**)&cuda_t4, sizeof(int)*size*size); cudaMalloc((void**)&cuda_m3, sizeof(int)*size*size); cudaMalloc((void**)&cuda_t5, sizeof(int)*size*size); cudaMalloc((void**)&cuda_m4, sizeof(int)*size*size); cudaMalloc((void**)&cuda_t6, sizeof(int)*size*size); cudaMalloc((void**)&cuda_m5, sizeof(int)*size*size); cudaMalloc((void**)&cuda_t7, sizeof(int)*size*size); cudaMalloc((void**)&cuda_t8, sizeof(int)*size*size); cudaMalloc((void**)&cuda_m6, sizeof(int)*size*size); cudaMalloc((void**)&cuda_t9, sizeof(int)*size*size); cudaMalloc((void**)&cuda_t10, sizeof(int)*size*size); cudaMalloc((void**)&cuda_m7, sizeof(int)*size*size); start_gpu = clock(); add_gpu<<< grid, block >>>(cuda_a11, cuda_a22, cuda_t1, size); cudaThreadSynchronize(); add_gpu<<< grid, block >>>(cuda_b11, cuda_b22, cuda_t2, size); cudaThreadSynchronize(); add_gpu<<< grid, block >>>(cuda_a21, cuda_a22, cuda_t3, size); cudaThreadSynchronize(); sub_gpu<<< grid, block >>>(cuda_b12, cuda_b22, cuda_t4, size); cudaThreadSynchronize(); sub_gpu<<< grid, block >>>(cuda_b21, cuda_b11, cuda_t5, size); cudaThreadSynchronize(); add_gpu<<< grid, block >>>(cuda_a11, cuda_a12, cuda_t6, size); cudaThreadSynchronize(); sub_gpu<<< grid, block >>>(cuda_a21, cuda_a11, cuda_t7, size); cudaThreadSynchronize(); add_gpu<<< grid, block >>>(cuda_b11, cuda_b12, cuda_t8, size); cudaThreadSynchronize(); sub_gpu<<< grid, block >>>(cuda_a12, cuda_a22, cuda_t9, size); cudaThreadSynchronize(); add_gpu<<< grid, block >>>(cuda_b21, cuda_b22, cuda_t10, size); cudaThreadSynchronize(); // matrix t1 = addM(a11, a22); // matrix t2 = addM(b11, b22); // matrix t3 = addM(a21, a22); // matrix t4 = subM(b12, b22); // matrix t5 = subM(b21, b11); // matrix t6 = addM(a11, a12); // matrix t7 = subM(a21, a11); // matrix t8 = addM(b11, b12); // matrix t9 = subM(a12, a22); // matrix t10 = addM(b21, b22); //cout << "po alokaci" << endl << flush; // for (int i = 0; i < size; i++) { // cudaMemcpy(&cuda_t1[i*size], t1.p[i], sizeof(int)*size, cudaMemcpyHostToDevice); // cudaMemcpy(&cuda_t2[i*size], t2.p[i], sizeof(int)*size, cudaMemcpyHostToDevice); // cudaMemcpy(&cuda_t3[i*size], t3.p[i], sizeof(int)*size, cudaMemcpyHostToDevice); // cudaMemcpy(&cuda_b11[i*size], b11.p[i], sizeof(int)*size, cudaMemcpyHostToDevice); // cudaMemcpy(&cuda_a11[i*size], a11.p[i], sizeof(int)*size, cudaMemcpyHostToDevice); // cudaMemcpy(&cuda_t4[i*size], t4.p[i], sizeof(int)*size, cudaMemcpyHostToDevice); // cudaMemcpy(&cuda_a22[i*size], a22.p[i], sizeof(int)*size, cudaMemcpyHostToDevice); // cudaMemcpy(&cuda_t5[i*size], t5.p[i], sizeof(int)*size, cudaMemcpyHostToDevice); // cudaMemcpy(&cuda_t6[i*size], t6.p[i], sizeof(int)*size, cudaMemcpyHostToDevice); // cudaMemcpy(&cuda_b22[i*size], b22.p[i], sizeof(int)*size, cudaMemcpyHostToDevice); // cudaMemcpy(&cuda_t7[i*size], t7.p[i], sizeof(int)*size, cudaMemcpyHostToDevice); // cudaMemcpy(&cuda_t8[i*size], t8.p[i], sizeof(int)*size, cudaMemcpyHostToDevice); // cudaMemcpy(&cuda_t9[i*size], t9.p[i], sizeof(int)*size, cudaMemcpyHostToDevice); // cudaMemcpy(&cuda_t10[i*size], t10.p[i], sizeof(int)*size, cudaMemcpyHostToDevice); // } //cout << "po memcpy" << endl << flush; // matrix m1 = s_alg(t1, t2); // matrix m2 = s_alg(t3, b11); // matrix m3 = s_alg(a11, t4); // matrix m4 = s_alg(a22, t5); // matrix m5 = s_alg(t6, b22); // matrix m6 = s_alg(t7, t8); // matrix m7 = s_alg(t9, t10); mul_gpu<<< grid, block >>>(cuda_t1, cuda_t2, cuda_m1, size); cudaThreadSynchronize(); mul_gpu<<< grid, block >>>(cuda_t3, cuda_b11, cuda_m2, size); cudaThreadSynchronize(); mul_gpu<<< grid, block >>>(cuda_a11, cuda_t4, cuda_m3, size); cudaThreadSynchronize(); mul_gpu<<< grid, block >>>(cuda_a22, cuda_t5, cuda_m4, size); cudaThreadSynchronize(); mul_gpu<<< grid, block >>>(cuda_t6, cuda_b22, cuda_m5, size); cudaThreadSynchronize(); mul_gpu<<< grid, block >>>(cuda_t7, cuda_t8, cuda_m6, size); cudaThreadSynchronize(); mul_gpu<<< grid, block >>>(cuda_t9, cuda_t10, cuda_m7, size); cudaThreadSynchronize(); // matrix m1, m2, m3, m4, m5, m6, m7; // m1.p = Alloc(size); // m1.size = size; // m2.p = Alloc(size); // m2.size = size; // m3.p = Alloc(size); // m3.size = size; // m4.p = Alloc(size); // m4.size = size; // m5.p = Alloc(size); // m5.size = size; // m6.p = Alloc(size); // m6.size = size; // m7.p = Alloc(size); // m7.size = size; // for (int i = 0; i < size; i++) { // // cout << "pruchod: " << i << flush << endl; // cudaMemcpy(m1.p[i], &cuda_m1[i*size], sizeof(int)*size, cudaMemcpyDeviceToHost); // cudaMemcpy(m2.p[i], &cuda_m2[i*size], sizeof(int)*size, cudaMemcpyDeviceToHost); // cudaMemcpy(m3.p[i], &cuda_m3[i*size], sizeof(int)*size, cudaMemcpyDeviceToHost); // cudaMemcpy(m4.p[i], &cuda_m4[i*size], sizeof(int)*size, cudaMemcpyDeviceToHost); // cudaMemcpy(m5.p[i], &cuda_m5[i*size], sizeof(int)*size, cudaMemcpyDeviceToHost); // cudaMemcpy(m6.p[i], &cuda_m6[i*size], sizeof(int)*size, cudaMemcpyDeviceToHost); // cudaMemcpy(m7.p[i], &cuda_m7[i*size], sizeof(int)*size, cudaMemcpyDeviceToHost); // } // cudaThreadSynchronize(); // printMatrix(m1.p, m1.size); // printMatrix(m2.p, m2.size); // printMatrix(m3.p, m3.size); // printMatrix(m4.p, m4.size); // printMatrix(m5.p, m5.size); // printMatrix(m6.p, m6.size); // printMatrix(m7.p, m7.size); // ****************************** // pokracuji normalne // cleanM(t1); // cleanM(t2); // cleanM(t3); // cleanM(t4); // cleanM(t5); // cleanM(t6); // cleanM(t7); // cleanM(t8); // cleanM(t9); // cleanM(t10); cudaFree(cuda_t1); cudaFree(cuda_t2); cudaFree(cuda_t3); cudaFree(cuda_b11); cudaFree(cuda_a11); cudaFree(cuda_t4); cudaFree(cuda_a22); cudaFree(cuda_t5); cudaFree(cuda_t6); cudaFree(cuda_b22); cudaFree(cuda_t7); cudaFree(cuda_t8); cudaFree(cuda_t9); cudaFree(cuda_t10); matrix c; c.p = Alloc(a.size); c.size = a.size; int *cuda_rx1,*cuda_rx2, *cuda_rx3, *cuda_r2, *cuda_r3, *cuda_ry1, *cuda_ry2, *cuda_ry3; cudaMalloc((void**)&cuda_rx1, sizeof(int)*size*size); cudaMalloc((void**)&cuda_rx2, sizeof(int)*size*size); cudaMalloc((void**)&cuda_rx3, sizeof(int)*size*size); cudaMalloc((void**)&cuda_r2, sizeof(int)*size*size); cudaMalloc((void**)&cuda_r3, sizeof(int)*size*size); cudaMalloc((void**)&cuda_ry1, sizeof(int)*size*size); cudaMalloc((void**)&cuda_ry2, sizeof(int)*size*size); cudaMalloc((void**)&cuda_ry3, sizeof(int)*size*size); add_gpu<<< grid, block >>>(cuda_m1, cuda_m4, cuda_rx1, size); cudaThreadSynchronize(); add_gpu<<< grid, block >>>(cuda_rx1, cuda_m7, cuda_rx2, size); cudaThreadSynchronize(); sub_gpu<<< grid, block >>>(cuda_rx2, cuda_m5, cuda_rx3, size); cudaThreadSynchronize(); add_gpu<<< grid, block >>>(cuda_m3, cuda_m5, cuda_r2, size); cudaThreadSynchronize(); add_gpu<<< grid, block >>>(cuda_m2, cuda_m4, cuda_r3, size); cudaThreadSynchronize(); sub_gpu<<< grid, block >>>(cuda_m1, cuda_m2, cuda_ry1, size); cudaThreadSynchronize(); add_gpu<<< grid, block >>>(cuda_ry1, cuda_m3, cuda_ry2, size); cudaThreadSynchronize(); add_gpu<<< grid, block >>>(cuda_ry2, cuda_m6, cuda_ry3, size); cudaThreadSynchronize(); // matrix rx1 = addM(m1, m4); // matrix rx2 = addM(rx1, m7); // matrix rx3 = subM(rx2, m5); // // matrix r2 = addM(m3, m5); // matrix r3 = addM(m2, m4); // // matrix ry1 = subM(m1, m2); // matrix ry2 = addM(ry1, m3); // matrix ry3 = addM(ry2, m6); end_gpu = clock(); cudaFree(cuda_m1); cudaFree(cuda_m2); cudaFree(cuda_m3); cudaFree(cuda_m4); cudaFree(cuda_m5); cudaFree(cuda_m6); cudaFree(cuda_m7); matrix rx3, r2, r3, ry3; rx3.p = Alloc(size); rx3.size = size; r2.p = Alloc(size); r2.size = size; r3.p = Alloc(size); r3.size = size; ry3.p = Alloc(size); ry3.size = size; for (int i = 0; i < size; i++) { cudaMemcpy(rx3.p[i], &cuda_rx3[i*size], sizeof(int)*size, cudaMemcpyDeviceToHost); cudaMemcpy(r2.p[i], &cuda_r2[i*size], sizeof(int)*size, cudaMemcpyDeviceToHost); cudaMemcpy(r3.p[i], &cuda_r3[i*size], sizeof(int)*size, cudaMemcpyDeviceToHost); cudaMemcpy(ry3.p[i], &cuda_ry3[i*size], sizeof(int)*size, cudaMemcpyDeviceToHost); } setPart(0, 0, &c, rx3); setPart(0, 1, &c, r2); setPart(1, 0, &c, r3); setPart(1, 1, &c, ry3); // cleanM(m1); // cleanM(m2); // cleanM(m3); // cleanM(m4); // cleanM(m5); // cleanM(m6); // cleanM(m7); // cleanM(rx1); // cleanM(rx2); cleanM(rx3); cleanM(r2); cleanM(r3); // cleanM(ry1); // cleanM(ry2); cleanM(ry3); // vypis mereni end = clock(); cout << "Running for " << (double)(end-start)/CLOCKS_PER_SEC << endl << flush; cout << "GPU running for " << (double)(end_gpu-start_gpu)/CLOCKS_PER_SEC << endl << flush; return c; } // strassen algorithm int ** strassen(int size, int ** A, int ** B) { matrix a; a.p = A; a.size = size; matrix b; b.p = B; b.size = size; matrix c = s_alg(a, b); return c.p; }
5a0eb12b67ff5ff29c29c456a27df69110d89661.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <vector> #include "caffe/layers/select_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void SelectForward(const int n_threads, const Dtype* bottom_data, const Dtype* select_data, Dtype* top_data, const int cand_id, const int inner_dim) { CUDA_KERNEL_LOOP(index, n_threads) { const int i = index / inner_dim; const int select_id = static_cast<int>(select_data[i]); // DCHECK_GE(select_id, 0); // DCHECK_LT(select_id, num_cand); if (cand_id == select_id) { top_data[index] = bottom_data[index]; } } } template <typename Dtype> void SelectLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { Dtype* top_data = top[0]->mutable_gpu_data(); const Dtype* select_data = bottom[num_cand_]->gpu_data(); const int n_threads = top[0]->count(); for (int i = 0; i < num_cand_; ++i) { const Dtype* bottom_data = bottom[i]->gpu_data(); SelectForward<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators)) , dim3(CAFFE_GET_BLOCKS(n_threads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, n_threads, bottom_data, select_data, top_data, i, inner_dim_); } } template <typename Dtype> __global__ void SelectBackward(const int n_threads, const Dtype* top_diff, const Dtype* select_data, Dtype* bottom_diff, const int cand_id, const int inner_dim) { CUDA_KERNEL_LOOP(index, n_threads) { const int i = index / inner_dim; const int select_id = static_cast<int>(select_data[i]); if (cand_id == select_id) { bottom_diff[index] = top_diff[index]; } else { bottom_diff[index] = 0; } } } template <typename Dtype> void SelectLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (propagate_down[num_cand_]) { LOG(FATAL) << this->type() << " Layer cannot backpropagate to selection inputs."; } // if (!propagate_down[0]) { return; } const Dtype* top_diff = top[0]->gpu_diff(); const Dtype* select_data = bottom[num_cand_]->gpu_data(); const int n_threads = bottom[0]->count(); for (int i = 0; i < num_cand_; ++i) { if (!propagate_down[i]) { continue; } Dtype* bottom_diff = bottom[i]->mutable_gpu_diff(); SelectBackward<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators)) , dim3(CAFFE_GET_BLOCKS(n_threads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, n_threads, top_diff, select_data, bottom_diff, i, inner_dim_); } } INSTANTIATE_LAYER_GPU_FUNCS(SelectLayer); } // namespace caffe
5a0eb12b67ff5ff29c29c456a27df69110d89661.cu
#include <vector> #include "caffe/layers/select_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void SelectForward(const int n_threads, const Dtype* bottom_data, const Dtype* select_data, Dtype* top_data, const int cand_id, const int inner_dim) { CUDA_KERNEL_LOOP(index, n_threads) { const int i = index / inner_dim; const int select_id = static_cast<int>(select_data[i]); // DCHECK_GE(select_id, 0); // DCHECK_LT(select_id, num_cand); if (cand_id == select_id) { top_data[index] = bottom_data[index]; } } } template <typename Dtype> void SelectLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { Dtype* top_data = top[0]->mutable_gpu_data(); const Dtype* select_data = bottom[num_cand_]->gpu_data(); const int n_threads = top[0]->count(); for (int i = 0; i < num_cand_; ++i) { const Dtype* bottom_data = bottom[i]->gpu_data(); SelectForward<Dtype> // NOLINT_NEXT_LINE(whitespace/operators) <<<CAFFE_GET_BLOCKS(n_threads), CAFFE_CUDA_NUM_THREADS>>>( n_threads, bottom_data, select_data, top_data, i, inner_dim_); } } template <typename Dtype> __global__ void SelectBackward(const int n_threads, const Dtype* top_diff, const Dtype* select_data, Dtype* bottom_diff, const int cand_id, const int inner_dim) { CUDA_KERNEL_LOOP(index, n_threads) { const int i = index / inner_dim; const int select_id = static_cast<int>(select_data[i]); if (cand_id == select_id) { bottom_diff[index] = top_diff[index]; } else { bottom_diff[index] = 0; } } } template <typename Dtype> void SelectLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (propagate_down[num_cand_]) { LOG(FATAL) << this->type() << " Layer cannot backpropagate to selection inputs."; } // if (!propagate_down[0]) { return; } const Dtype* top_diff = top[0]->gpu_diff(); const Dtype* select_data = bottom[num_cand_]->gpu_data(); const int n_threads = bottom[0]->count(); for (int i = 0; i < num_cand_; ++i) { if (!propagate_down[i]) { continue; } Dtype* bottom_diff = bottom[i]->mutable_gpu_diff(); SelectBackward<Dtype> // NOLINT_NEXT_LINE(whitespace/operators) <<<CAFFE_GET_BLOCKS(n_threads), CAFFE_CUDA_NUM_THREADS>>>( n_threads, top_diff, select_data, bottom_diff, i, inner_dim_); } } INSTANTIATE_LAYER_GPU_FUNCS(SelectLayer); } // namespace caffe
50caeeb3959de467ffa6cfd2ddf3fd6839c7fae4.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <math.h> #include <omp.h> #include <assert.h> #include <iostream> #include <complex> #include <hip/hip_runtime.h> #include <rocblas.h> #include <hipfft.h> #define INTEL_C_COMPILER #if defined(GNU_C_COMPILER) extern "C" { #include "cblas.h" #include "lapacke.h" #include "lapacke_mangling.h" } #elif defined(INTEL_C_COMPILER) #include "fftw3.h" #include "mkl.h" #endif #define VERBOSITY using std::cout; using std::complex; #define EXIT_SUCCESS 0 #define EXIT_FAILURE 1 #define nullptr NULL #define SCHEME_RADIUS 4 #define BLOCK_X 32 #define BLOCK_Y 8 #define BLOCK_Z 1 #define MEM_PATTERN_X 32 #define THREAD_X 32 #define THREAD_Y 8 #define UNROLL 8 #define DEBUG #define safeCall(err) __safeCall(err, __FILE__, __LINE__) inline void __safeCall(hipError_t err, const char * file, const int line) { if(hipSuccess != err) { fprintf(stderr, "ERROR: safeCall() Runtime API error in file <%s>, line %i : %s.\n", file , line, hipGetErrorString(err)); exit(-1); } } class TimerGPU { public: hipEvent_t start, stop; hipStream_t stream; TimerGPU(hipStream_t stream_ = 0) : stream(stream_) { hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, stream); } ~TimerGPU() { hipEventDestroy(start); hipEventDestroy(stop); } float read() { hipEventRecord(stop, stream); hipEventSynchronize(stop); float time; hipEventElapsedTime(&time, start, stop); return time; } }; class TimerCPU { static const int bits = 10; public: long long beg_clock; float freq; TimerCPU(float freq_) : freq(freq_) { beg_clock = getTSC(bits); } long long getTSC(int bits) { #ifdef WIN32 return __rdtsc(); #else unsigned int low, high; __asm__(".byte 0x0f, 0x31" :"=a" (low), "=d" (high)); return ((long long)high<<(32 - bits)) | ((long long)low >> bits); #endif } float read() { long long end_clock = getTSC(bits); long long Kcycles = end_clock - beg_clock; float time = (float)(1 << bits) * Kcycles / freq / 1e3f; return time; } }; int iDivUp(int a, int b); int iDivDown(int a, int b); int iAlignUp(int a, int b); int iAlignDown(int a, int b); int iDivUp(int a, int b) { return (a % b == 0) ? (a / b) : (a / b + 1); } int iDivDown(int a, int b) { return a / b; } int iAlignUp(int a, int b) { return (a % b == 0) ? a : (a - a % b + b); } int iAlignDown(int a, int b) { return a - a % b; } __constant__ float coeff[SCHEME_RADIUS + 1]; static const float h_coeff[] = { -2.847222222, 1.6, -0.2, 0.02539682540, -0.001785714286}; template<typename h_T, typename d_T, size_t BX, size_t BY, size_t BZ> class Wavefield3d { public: Wavefield3d(); ~Wavefield3d(); void allocate(const int _nx, const int _ny, const int _nz, const int _radius, bool host, d_T * devmem, h_T * hostmem); double download(); double readback(); public: int nx, ny, nz; int nxpad, nypad, nzpad; int radius; int padding; h_T * h_wf; d_T * d_wf; bool h_internalAlloc; bool d_internalAlloc; }; #define ALIGNEDMENT_BITS 128 template<typename h_T, typename d_T, size_t BX, size_t BY, size_t BZ> void Wavefield3d<h_T, d_T, BX, BY, BZ>::allocate(const int _nx, const int _ny, const int _nz, const int _radius, bool host, d_T * devmem, h_T * hostmem) { nx = _nx; ny = _ny; nz = _nz; radius = _radius; nxpad = iAlignUp(nx - 2 * radius, BX) + 2 * radius; nzpad = iAlignUp(nz - 2 * radius, BZ) + 2 * radius; nypad = iAlignUp(ny - 2 * radius, BY) + 2 * radius; long long int volumeSize = ((long long int)nxpad) * nypad * nzpad; #ifdef VERBOSITY fprintf(stdout, "INFO: nx = %d, ny = %d, nz = %d.\n", nx, ny, nz); fprintf(stdout, "INFO: nxpad = %d, nypad = %d, nzpad = %d.\n", nxpad, nypad, nzpad); fflush(stdout); #endif padding = ALIGNEDMENT_BITS / sizeof(h_T) - radius; volumeSize += padding; h_wf = hostmem; d_wf = devmem; if(d_wf == nullptr) { if(volumeSize < 0) { fprintf(stderr, "ERROR: cannot allocate %lld bytes from device global memory, file: %s, line: %d\n", volumeSize * sizeof(d_T), __FILE__, __LINE__); d_wf = nullptr; exit(EXIT_FAILURE); } safeCall(hipMalloc((void**)&d_wf, volumeSize * sizeof(d_T))); safeCall(hipMemset(d_wf, 0, volumeSize * sizeof(d_T))); if(d_wf == nullptr) { fprintf(stderr, "ERROR: cannot allocate %lld bytes from device global memory, file: %s, line: %d\n", volumeSize * sizeof(d_T), __FILE__, __LINE__); } d_internalAlloc = true; } if(host && h_wf == nullptr) { long long int h_volumeSize = nx * ny * nz; if(h_volumeSize < 0) { fprintf(stderr, "ERROR: cannot allocate %lld bytes from host memory, file: %s, line: %d\n", h_volumeSize * sizeof(h_T), __FILE__, __LINE__); h_wf = nullptr; exit(EXIT_FAILURE); } h_wf = (float*)malloc(sizeof(h_T) * h_volumeSize); memset(h_wf, 0, h_volumeSize * sizeof(h_T)); h_internalAlloc = true; } } template<typename h_T, typename d_T, size_t BX, size_t BY, size_t BZ> Wavefield3d<h_T, d_T, BX, BY, BZ>::Wavefield3d() : nx(0), ny(0), nz(0), radius(0), h_wf(nullptr), d_wf(nullptr), h_internalAlloc(false), d_internalAlloc(false) { } template<typename h_T, typename d_T, size_t BX, size_t BY, size_t BZ> Wavefield3d<h_T, d_T, BX, BY, BZ>::~Wavefield3d() { if(h_internalAlloc && h_wf != nullptr) free(h_wf); h_wf = nullptr; if(d_internalAlloc && d_wf != nullptr) safeCall(hipFree(d_wf)); d_wf = nullptr; } template<typename h_T, typename d_T, size_t BX, size_t BY, size_t BZ> double Wavefield3d<h_T, d_T, BX, BY, BZ>::download() { TimerGPU timer(0); int stride_z = sizeof(d_T) * nzpad; int d_stride_y = nxpad * nzpad; int h_stride_y = nx * nz; if(h_wf != nullptr && d_wf != nullptr) { h_T * h_ptr = h_wf; d_T * d_ptr = d_wf + padding; for(int iy = 0; iy < ny; iy++) { safeCall(hipMemcpy2D(d_ptr, stride_z, h_ptr, sizeof(h_T) * nz, sizeof(h_T) * nz, nx, hipMemcpyHostToDevice)); h_ptr += h_stride_y; d_ptr += d_stride_y; } } double gpuTime = timer.read(); #ifdef VERBOSITY fprintf(stdout, "INFO: download time = %.2fms\n", gpuTime); fflush(stdout); #endif return gpuTime; } template<typename h_T, typename d_T, size_t BX, size_t BY, size_t BZ> double Wavefield3d<h_T, d_T, BX, BY, BZ>::readback() { TimerGPU timer(0); int stride_z = sizeof(d_T) * nzpad; int d_stride_y = nxpad * nzpad; int h_stride_y = nx * nz; if(h_wf != nullptr && d_wf != nullptr) { h_T * h_ptr = h_wf; d_T * d_ptr = d_wf + padding; for(int iy = 0; iy < ny; iy++) { safeCall(hipMemcpy2D(h_ptr, sizeof(h_T) * nz, d_ptr, stride_z, sizeof(d_T) * nz, nx, hipMemcpyDeviceToHost)); h_ptr += h_stride_y; d_ptr += d_stride_y; } } double gpuTime = timer.read(); #ifdef VERBOSITY fprintf(stdout, "INFO: download time = %.2fms\n", gpuTime); fflush(stdout); #endif return gpuTime; } // 2.5d tiling // DX >= RADIUS // DY >= RADIUS template<size_t BX, size_t BY, size_t TX, size_t TY, size_t DX, size_t DY, size_t RADIUS> __global__ static void fdtd3d_kernel_template(float * __restrict__ wf_next, float * __restrict__ wf_cur, const int stride_y, const int stride_z, const int ny, const float idx2, const float idy2, const float idz2, float * __restrict__ vel) { const int tx = threadIdx.x; const int ty = threadIdx.y; const int tid = ty * TX + tx; const int idx = tid % DX; const int idy = tid / DX; const int l_tx = idx + RADIUS; const int l_ty = idy + RADIUS; const int gx = blockIdx.x * blockDim.x; const int gy = blockIdx.y * blockDim.y; __shared__ float s_data[BY + 2 * RADIUS][BX + 2 * RADIUS]; float front[RADIUS][BY / DY][BX / DX]; float rear[RADIUS][BY / DY][BX / DX]; float cur[BY / DY][BX / DX]; float laplacian[BY / DY][BX / DX]; const int p_idx = (gy + l_ty) * stride_z + gx + l_tx; float * plane_input = wf_cur + p_idx; float * plane_output = wf_next + RADIUS * stride_y + p_idx; float * plane_vel = vel + RADIUS * stride_y + p_idx; int iz, ix, iy, i; #pragma unroll for(iy = 0; iy < RADIUS; iy++) { #pragma unroll for(ix = 0; ix < BY / DY; ix++) { #pragma unroll for(iz = 0; iz < BX / DX; iz++) { front[iy][ix][iz] = plane_input[ix * DY * stride_z + iz * DX]; rear[iy][ix][iz] = plane_input[(RADIUS + 1) * stride_y + ix * DY * stride_z + iz * DX]; } } plane_input += stride_y; } #pragma unroll for(ix = 0; ix < BY / DY; ix++) { #pragma unroll for(iz = 0; iz < BX / DX; iz++) { cur[ix][iz] = plane_input[ix * DY * stride_z + iz * DX]; } } #pragma unroll UNROLL for(iy = RADIUS; iy < ny - RADIUS; iy++) { #pragma unroll for(ix = 0; ix < BY / DY; ix++) { #pragma unroll for(iz = 0; iz < BX / DX; iz++) { s_data[l_ty + ix * DY][l_tx + iz * DX] = cur[ix][iz]; } } // top & bottom if(idy < RADIUS) { #pragma unroll for(iz = 0; iz < BX / DX; iz++) { s_data[idy ][l_tx + iz * DX] = plane_input[- RADIUS * stride_z + iz * DX]; s_data[idy + BY + RADIUS][l_tx + iz * DX] = plane_input[ BY * stride_z + iz * DX]; } } // left & right if(idx < RADIUS) { #pragma unroll for(ix = 0; ix < BY / DY; ix++) { s_data[l_ty + ix * DY][idx ] = plane_input[ix * DY * stride_z - RADIUS]; s_data[l_ty + ix * DY][idx + BX + RADIUS] = plane_input[ix * DY * stride_z + BX]; } } __syncthreads(); float deriv; #pragma unroll for(ix = 0; ix < BY / DY; ix++) { #pragma unroll for(iz = 0; iz < BX / DX; iz++) { float m_pos = coeff[0] * cur[ix][iz]; deriv = m_pos; #pragma unroll for(i = 1; i <= RADIUS; i++) { deriv += coeff[i] * (s_data[l_ty + ix * DY][l_tx + iz * DX + i] + s_data[l_ty + ix * DY][l_tx + iz * DX - i]); } laplacian[ix][iz] = deriv * idz2; deriv = m_pos; #pragma unroll for(i = 1; i <= RADIUS; i++) { deriv += coeff[i] * (s_data[l_ty + ix * DY + i][l_tx + iz * DX] + s_data[l_ty + ix * DY - i][l_tx + iz * DX]); } laplacian[ix][iz] += deriv * idx2; deriv = m_pos; #pragma unroll for(i = 1; i <= RADIUS; i++) { deriv += coeff[i] * (front[RADIUS - i][ix][iz] + rear[i - 1][ix][iz]); } laplacian[ix][iz] += deriv * idy2; // plane_output[ix * DY * stride_z + iz * DX] = - plane_output[ix * DY * stride_z + iz * DX] + cur[ix][iz] + cur[ix][iz] + plane_vel[ix * DY * stride_z + iz * DX] * laplacian[ix][iz]; plane_output[ix * DY * stride_z + iz * DX] = plane_vel[ix * DY * stride_z + iz * DX]; } } #pragma unroll for(ix = 0; ix < BY / DY; ix++) { #pragma unroll for(iz = 0; iz < BX / DX; iz++) { #pragma unroll for(i = 0; i < RADIUS - 1; i++) { front[i][ix][iz] = front[i + 1][ix][iz]; } front[RADIUS - 1][ix][iz] = cur[ix][iz]; cur[ix][iz] = rear[0][ix][iz]; #pragma unroll for(i = 0; i < RADIUS - 1; i++) { rear[i][ix][iz] = rear[i + 1][ix][iz]; } rear[RADIUS - 1][ix][iz] = plane_input[(RADIUS + 1) * stride_y + ix * DY * stride_z + iz * DX]; } } plane_vel += stride_y; plane_input += stride_y; plane_output += stride_y; __syncthreads(); } } __device__ float wsinc(float x) { float sinc = 1.f; float wind = 0.f; float tmp = M_PI * x; if(x) sinc = __sinf(tmp) / tmp; if(x > -4.f && x < 4.f) wind = 0.5f * (1.f + __cosf(tmp / 4.f)); return sinc * wind; } template<size_t BX, size_t BY, size_t TX, size_t TY, size_t DX, size_t DY, size_t RADIUS> __global__ void inject_source_kernel_template( float * __restrict__ wf_cur, float wavelet, float sx, float sy, float sz, const int x1, const int x2, const int z1, const int z2, const int y1, const int y2, const int stride_y, const int stride_z) { const int tx = threadIdx.x; const int ty = threadIdx.y; const int tid = ty * TX + tx; const int idx = tid % DX; const int idy = tid / DX; const int l_tx = idx + RADIUS; const int l_ty = idy + RADIUS; const int gx = blockIdx.x * blockDim.x + l_tx; const int gy = blockIdx.y * blockDim.y + l_ty; float * ptr_output = wf_cur + gy * stride_z + gx; int iy, ix, iz; const float km = 0.68f; #pragma unroll for(ix = 0; ix < BY / DY; ix++) { #pragma unroll for(iz = 0; iz < BX / DX; iz++) { int x = gx + iz * DX; int z = gy + ix * DY; if(x >= x1 && x < x2 && z >= z1 && z < z2) { float xs = wsinc(km * (sx - x)); float zs = wsinc(km * (sz - z)); #pragma unroll for(iy = y1; iy < y2; iy++) { float ys = wsinc(km * (sy - iy)); ptr_output[iy * stride_y + ix * DY * stride_z + iz * DX] += wavelet * xs * ys * zs; // if(x == x1 && z == z1 && iy == y1) ptr_output[iy * stride_y + ix * DY * stride_z + iz * DX] += wavelet; } } } } } // 2.5d tiling + time blocking //template<size_t BX, size_t BY, size_t DX, size_t DY, > //__global___ static void fdtd3d_kernel_template() //{ // //} template<size_t BX, size_t BY, size_t BZ, size_t DX, size_t DY, size_t TX, size_t TY, size_t RADIUS> double fdtd3d_gpu_wrapper( const int nx, const int ny, const int nz, const int radius, const float dx, const float dy, const float dz, float * wf_cur, float * h_vel, const int nt, const float dt, const float * wav, const int waveletLength) { cout << DX << "\t" << DY << "\n"; assert(radius == RADIUS); Wavefield3d<float, float, BY, BZ, BX> d_wf_cur; d_wf_cur.allocate(nx, ny, nz, radius, false, nullptr, wf_cur); // initialize wavefield d_wf_cur.download(); Wavefield3d<float, float, BY, BZ, BX> d_wf_next; d_wf_next.allocate(nx, ny, nz, radius, false, nullptr, wf_cur); // initialize wavefiled d_wf_next.download(); Wavefield3d<float, float, BY, BZ, BX> d_vel; d_vel.allocate(nx, ny, nz, radius, false, nullptr, h_vel); d_vel.download(); long long int mpoints = (d_wf_cur.nxpad - 2 * d_wf_cur.radius) * (d_wf_cur.nypad - 2 * d_wf_cur.radius) * (d_wf_cur.nzpad - 2 * d_wf_cur.radius); cout << 1.0 * mpoints * nt / 1e6 << "Mpoints\n"; float gflops = (3.f * radius * (1 + 1 + 1) + 2.f + 4.f) * mpoints * nt / 1e9; cout << 1.0 * gflops << "GFLOP\n"; const int stride_y = d_wf_cur.nxpad * d_wf_cur.nzpad; const int stride_z = d_wf_cur.nzpad; const int worky = d_wf_cur.ny; const float idx2 = 1.f / (dx * dx); const float idy2 = 1.f / (dy * dy); const float idz2 = 1.f / (dz * dz); const float sx = (nx - 1) * dx * 0.5f / dx; const float sz = (nz - 1) * dz * 0.5f / dz; const float sy = (ny - 1) * dy * 0.5f / dy; const float wind = 0.05f; const int windx = (int)(wind / dx + 0.5f); const int windy = (int)(wind / dy + 0.5f); const int windz = (int)(wind / dz + 0.5f); const int x1 = sx - windx; const int x2 = sx + windx + 1; const int z1 = sz - windz; const int z2 = sz + windz + 1; const int y1 = sy - windy; const int y2 = sy + windy + 1; float memSize = 3.0 * 4.f * (d_wf_cur.nxpad * d_wf_cur.nypad * d_wf_cur.nzpad + d_wf_cur.padding) / (1024.0 * 1024.0 * 1024.0); cout << memSize << "Gb\n"; hipMemcpyToSymbol(coeff, (void*)h_coeff, sizeof(float) * (radius + 1)); int grid_x = (d_wf_cur.nzpad - 2 * d_wf_cur.radius) / BX; int grid_y = (d_wf_cur.nxpad - 2 * d_wf_cur.radius) / BY; dim3 grids(grid_x, grid_y, 1); dim3 threads(BX, BY, 1); TimerGPU timer(0); // kernel execution int it = 0; for(; it <= nt - 2; it += 2) { // from wf_cur -> wf_next if(it % 10 == 0) fprintf(stdout, "INFO: %04d time steps of total time steps %04d\n", it, nt); hipLaunchKernelGGL(( fdtd3d_kernel_template<BX, BY, TX, TY, DX, DY, RADIUS>), dim3(grids), dim3(threads), 0, 0, d_wf_next.d_wf + d_wf_next.padding, d_wf_cur.d_wf + d_wf_cur.padding, stride_y, stride_z, worky, idx2, idy2, idz2, d_vel.d_wf + d_vel.padding); if(it < waveletLength) { hipLaunchKernelGGL(( inject_source_kernel_template<BX, BY, TX, TY, DX, DY, RADIUS>), dim3(grids), dim3(threads), 0, 0, d_wf_next.d_wf + d_wf_next.padding, wav[it], sx, sy, sz, x1, x2, z1, z2, y1, y2, stride_y, stride_z); } // from wf_next -> wf_cur if((it + 1) % 10 == 0) fprintf(stdout, "INFO: %04d time steps of total time steps %04d\n", it, nt); hipLaunchKernelGGL(( fdtd3d_kernel_template<BX, BY, TX, TY, DX, DY, RADIUS>), dim3(grids), dim3(threads), 0, 0, d_wf_cur.d_wf + d_wf_cur.padding, d_wf_next.d_wf + d_wf_next.padding, stride_y, stride_z, worky, idx2, idy2, idz2, d_vel.d_wf + d_vel.padding); if(it + 1 < waveletLength) { hipLaunchKernelGGL(( inject_source_kernel_template<BX, BY, TX, TY, DX, DY, RADIUS>), dim3(grids), dim3(threads), 0, 0, d_wf_cur.d_wf + d_wf_cur.padding, wav[it + 1], sx, sy, sz, x1, x2, z1, z2, y1, y2, stride_y, stride_z); } } for(; it < nt; it++) { if(it % 10 == 0) fprintf(stdout, "INFO: %04d time steps of total time steps %04d\n", it, nt); hipLaunchKernelGGL(( fdtd3d_kernel_template<BX, BY, TX, TY, DX, DY, RADIUS>), dim3(grids), dim3(threads), 0, 0, d_wf_next.d_wf + d_wf_next.padding, d_wf_cur.d_wf + d_wf_cur.padding, stride_y, stride_z, worky, idx2, idy2, idz2, d_vel.d_wf + d_vel.padding); if(it < waveletLength) { hipLaunchKernelGGL(( inject_source_kernel_template<BX, BY, TX, TY, DX, DY, RADIUS>), dim3(grids), dim3(threads), 0, 0, d_wf_next.d_wf + d_wf_next.padding, wav[it], sx, sy, sz, x1, x2, z1, z2, y1, y2, stride_y, stride_z); } } double gpuTime = timer.read(); fprintf(stdout, "INFO: elapsed time = %.2f ms.\n", gpuTime); #ifdef VERBOSITY fprintf(stdout, "INFO: performance = %.2f GFLOPS.\n", gflops / (1e-3 * gpuTime)); fprintf(stdout, "INFO: performance = %.2f Mpoints/s.\n", mpoints * nt / (1e6 * gpuTime * 1e-3)); #endif fflush(stdout); if((nt & 0x1) == 0) d_wf_cur.readback(); else d_wf_next.readback(); #ifdef DEBUG FILE * fp = fopen("/d0/data/zx/wf_gpu.dat", "wb"); fwrite(wf_cur, sizeof(float), nx * ny * nz, fp); fflush(fp); fclose(fp); #endif return gpuTime; } void set_source_wavelet(float * wav, const int nt, const float dt, const float f0) { for(int it = 0; it < nt; it++) { float ttime = it * dt; float temp = M_PI * M_PI * f0 * f0 * (ttime - 1.0 / f0) * (ttime - 1.0 / f0); wav[it] = (1.0 - 2.0 * temp) * expf(- temp); } } int main(int argc, char * argv[]) { if(argc != 9) { fprintf(stderr, "USAGE: nx ny nz dx dy dz nt dt\n"); return -1; } int nx = atoi(argv[1]); int ny = atoi(argv[2]); int nz = atoi(argv[3]); float dx = atof(argv[4]); float dy = atof(argv[5]); float dz = atof(argv[6]); int nt = atoi(argv[7]); float dt = atof(argv[8]); const int radius = 4; const float f0 = 20.f; const int waveletLength = 2 * (int)(1.f / f0 / dt + 0.5f); float * wav = (float*)malloc(sizeof(float) * waveletLength); set_source_wavelet(wav, waveletLength, dt, f0); for(int it = 0; it < waveletLength; it++) { wav[it] *= dt * dt / (dx * dy * dz); } #ifdef VERBOSITY fprintf(stdout, "INFO: finite difference forward modeling\n"); fprintf(stdout, "INFO: nx = %d, ny = %d, nz = %d\n", nx, ny, nz); fflush(stdout); #endif float * wf_cur = (float*)malloc(sizeof(float) * nx * ny * nz); float * wf_next = (float*)malloc(sizeof(float) * nx * ny * nz); float * h_vel = (float*)malloc(sizeof(float) * nx * ny * nz); float * h_wf_gpu = (float*)malloc(sizeof(float) * nx * ny * nz); memset(wf_cur, 0, sizeof(float) * nx * ny * nz); memset(wf_next, 0, sizeof(float) * nx * ny * nz); for(long long int i = 0; i < nx * ny * nz; i++) h_vel[i] = 4.f * 4.f * dt * dt; fdtd3d_gpu_wrapper<BLOCK_X, BLOCK_Y, BLOCK_Z, MEM_PATTERN_X, THREAD_X * THREAD_Y / MEM_PATTERN_X, THREAD_X, THREAD_Y, SCHEME_RADIUS>( nx, ny, nz, radius, dx, dy, dz, h_wf_gpu, h_vel, nt, dt, wav, waveletLength); free(wf_cur); wf_cur = nullptr; free(wf_next); wf_next = nullptr; free(h_vel); h_vel = nullptr; free(h_wf_gpu); h_wf_gpu = nullptr; free(wav); wav = nullptr; return EXIT_SUCCESS; }
50caeeb3959de467ffa6cfd2ddf3fd6839c7fae4.cu
#include <stdio.h> #include <math.h> #include <omp.h> #include <assert.h> #include <iostream> #include <complex> #include <cuda_runtime.h> #include <cublas_v2.h> #include <cufft.h> #define INTEL_C_COMPILER #if defined(GNU_C_COMPILER) extern "C" { #include "cblas.h" #include "lapacke.h" #include "lapacke_mangling.h" } #elif defined(INTEL_C_COMPILER) #include "fftw3.h" #include "mkl.h" #endif #define VERBOSITY using std::cout; using std::complex; #define EXIT_SUCCESS 0 #define EXIT_FAILURE 1 #define nullptr NULL #define SCHEME_RADIUS 4 #define BLOCK_X 32 #define BLOCK_Y 8 #define BLOCK_Z 1 #define MEM_PATTERN_X 32 #define THREAD_X 32 #define THREAD_Y 8 #define UNROLL 8 #define DEBUG #define safeCall(err) __safeCall(err, __FILE__, __LINE__) inline void __safeCall(cudaError err, const char * file, const int line) { if(cudaSuccess != err) { fprintf(stderr, "ERROR: safeCall() Runtime API error in file <%s>, line %i : %s.\n", file , line, cudaGetErrorString(err)); exit(-1); } } class TimerGPU { public: cudaEvent_t start, stop; cudaStream_t stream; TimerGPU(cudaStream_t stream_ = 0) : stream(stream_) { cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, stream); } ~TimerGPU() { cudaEventDestroy(start); cudaEventDestroy(stop); } float read() { cudaEventRecord(stop, stream); cudaEventSynchronize(stop); float time; cudaEventElapsedTime(&time, start, stop); return time; } }; class TimerCPU { static const int bits = 10; public: long long beg_clock; float freq; TimerCPU(float freq_) : freq(freq_) { beg_clock = getTSC(bits); } long long getTSC(int bits) { #ifdef WIN32 return __rdtsc(); #else unsigned int low, high; __asm__(".byte 0x0f, 0x31" :"=a" (low), "=d" (high)); return ((long long)high<<(32 - bits)) | ((long long)low >> bits); #endif } float read() { long long end_clock = getTSC(bits); long long Kcycles = end_clock - beg_clock; float time = (float)(1 << bits) * Kcycles / freq / 1e3f; return time; } }; int iDivUp(int a, int b); int iDivDown(int a, int b); int iAlignUp(int a, int b); int iAlignDown(int a, int b); int iDivUp(int a, int b) { return (a % b == 0) ? (a / b) : (a / b + 1); } int iDivDown(int a, int b) { return a / b; } int iAlignUp(int a, int b) { return (a % b == 0) ? a : (a - a % b + b); } int iAlignDown(int a, int b) { return a - a % b; } __constant__ float coeff[SCHEME_RADIUS + 1]; static const float h_coeff[] = { -2.847222222, 1.6, -0.2, 0.02539682540, -0.001785714286}; template<typename h_T, typename d_T, size_t BX, size_t BY, size_t BZ> class Wavefield3d { public: Wavefield3d(); ~Wavefield3d(); void allocate(const int _nx, const int _ny, const int _nz, const int _radius, bool host, d_T * devmem, h_T * hostmem); double download(); double readback(); public: int nx, ny, nz; int nxpad, nypad, nzpad; int radius; int padding; h_T * h_wf; d_T * d_wf; bool h_internalAlloc; bool d_internalAlloc; }; #define ALIGNEDMENT_BITS 128 template<typename h_T, typename d_T, size_t BX, size_t BY, size_t BZ> void Wavefield3d<h_T, d_T, BX, BY, BZ>::allocate(const int _nx, const int _ny, const int _nz, const int _radius, bool host, d_T * devmem, h_T * hostmem) { nx = _nx; ny = _ny; nz = _nz; radius = _radius; nxpad = iAlignUp(nx - 2 * radius, BX) + 2 * radius; nzpad = iAlignUp(nz - 2 * radius, BZ) + 2 * radius; nypad = iAlignUp(ny - 2 * radius, BY) + 2 * radius; long long int volumeSize = ((long long int)nxpad) * nypad * nzpad; #ifdef VERBOSITY fprintf(stdout, "INFO: nx = %d, ny = %d, nz = %d.\n", nx, ny, nz); fprintf(stdout, "INFO: nxpad = %d, nypad = %d, nzpad = %d.\n", nxpad, nypad, nzpad); fflush(stdout); #endif padding = ALIGNEDMENT_BITS / sizeof(h_T) - radius; volumeSize += padding; h_wf = hostmem; d_wf = devmem; if(d_wf == nullptr) { if(volumeSize < 0) { fprintf(stderr, "ERROR: cannot allocate %lld bytes from device global memory, file: %s, line: %d\n", volumeSize * sizeof(d_T), __FILE__, __LINE__); d_wf = nullptr; exit(EXIT_FAILURE); } safeCall(cudaMalloc((void**)&d_wf, volumeSize * sizeof(d_T))); safeCall(cudaMemset(d_wf, 0, volumeSize * sizeof(d_T))); if(d_wf == nullptr) { fprintf(stderr, "ERROR: cannot allocate %lld bytes from device global memory, file: %s, line: %d\n", volumeSize * sizeof(d_T), __FILE__, __LINE__); } d_internalAlloc = true; } if(host && h_wf == nullptr) { long long int h_volumeSize = nx * ny * nz; if(h_volumeSize < 0) { fprintf(stderr, "ERROR: cannot allocate %lld bytes from host memory, file: %s, line: %d\n", h_volumeSize * sizeof(h_T), __FILE__, __LINE__); h_wf = nullptr; exit(EXIT_FAILURE); } h_wf = (float*)malloc(sizeof(h_T) * h_volumeSize); memset(h_wf, 0, h_volumeSize * sizeof(h_T)); h_internalAlloc = true; } } template<typename h_T, typename d_T, size_t BX, size_t BY, size_t BZ> Wavefield3d<h_T, d_T, BX, BY, BZ>::Wavefield3d() : nx(0), ny(0), nz(0), radius(0), h_wf(nullptr), d_wf(nullptr), h_internalAlloc(false), d_internalAlloc(false) { } template<typename h_T, typename d_T, size_t BX, size_t BY, size_t BZ> Wavefield3d<h_T, d_T, BX, BY, BZ>::~Wavefield3d() { if(h_internalAlloc && h_wf != nullptr) free(h_wf); h_wf = nullptr; if(d_internalAlloc && d_wf != nullptr) safeCall(cudaFree(d_wf)); d_wf = nullptr; } template<typename h_T, typename d_T, size_t BX, size_t BY, size_t BZ> double Wavefield3d<h_T, d_T, BX, BY, BZ>::download() { TimerGPU timer(0); int stride_z = sizeof(d_T) * nzpad; int d_stride_y = nxpad * nzpad; int h_stride_y = nx * nz; if(h_wf != nullptr && d_wf != nullptr) { h_T * h_ptr = h_wf; d_T * d_ptr = d_wf + padding; for(int iy = 0; iy < ny; iy++) { safeCall(cudaMemcpy2D(d_ptr, stride_z, h_ptr, sizeof(h_T) * nz, sizeof(h_T) * nz, nx, cudaMemcpyHostToDevice)); h_ptr += h_stride_y; d_ptr += d_stride_y; } } double gpuTime = timer.read(); #ifdef VERBOSITY fprintf(stdout, "INFO: download time = %.2fms\n", gpuTime); fflush(stdout); #endif return gpuTime; } template<typename h_T, typename d_T, size_t BX, size_t BY, size_t BZ> double Wavefield3d<h_T, d_T, BX, BY, BZ>::readback() { TimerGPU timer(0); int stride_z = sizeof(d_T) * nzpad; int d_stride_y = nxpad * nzpad; int h_stride_y = nx * nz; if(h_wf != nullptr && d_wf != nullptr) { h_T * h_ptr = h_wf; d_T * d_ptr = d_wf + padding; for(int iy = 0; iy < ny; iy++) { safeCall(cudaMemcpy2D(h_ptr, sizeof(h_T) * nz, d_ptr, stride_z, sizeof(d_T) * nz, nx, cudaMemcpyDeviceToHost)); h_ptr += h_stride_y; d_ptr += d_stride_y; } } double gpuTime = timer.read(); #ifdef VERBOSITY fprintf(stdout, "INFO: download time = %.2fms\n", gpuTime); fflush(stdout); #endif return gpuTime; } // 2.5d tiling // DX >= RADIUS // DY >= RADIUS template<size_t BX, size_t BY, size_t TX, size_t TY, size_t DX, size_t DY, size_t RADIUS> __global__ static void fdtd3d_kernel_template(float * __restrict__ wf_next, float * __restrict__ wf_cur, const int stride_y, const int stride_z, const int ny, const float idx2, const float idy2, const float idz2, float * __restrict__ vel) { const int tx = threadIdx.x; const int ty = threadIdx.y; const int tid = ty * TX + tx; const int idx = tid % DX; const int idy = tid / DX; const int l_tx = idx + RADIUS; const int l_ty = idy + RADIUS; const int gx = blockIdx.x * blockDim.x; const int gy = blockIdx.y * blockDim.y; __shared__ float s_data[BY + 2 * RADIUS][BX + 2 * RADIUS]; float front[RADIUS][BY / DY][BX / DX]; float rear[RADIUS][BY / DY][BX / DX]; float cur[BY / DY][BX / DX]; float laplacian[BY / DY][BX / DX]; const int p_idx = (gy + l_ty) * stride_z + gx + l_tx; float * plane_input = wf_cur + p_idx; float * plane_output = wf_next + RADIUS * stride_y + p_idx; float * plane_vel = vel + RADIUS * stride_y + p_idx; int iz, ix, iy, i; #pragma unroll for(iy = 0; iy < RADIUS; iy++) { #pragma unroll for(ix = 0; ix < BY / DY; ix++) { #pragma unroll for(iz = 0; iz < BX / DX; iz++) { front[iy][ix][iz] = plane_input[ix * DY * stride_z + iz * DX]; rear[iy][ix][iz] = plane_input[(RADIUS + 1) * stride_y + ix * DY * stride_z + iz * DX]; } } plane_input += stride_y; } #pragma unroll for(ix = 0; ix < BY / DY; ix++) { #pragma unroll for(iz = 0; iz < BX / DX; iz++) { cur[ix][iz] = plane_input[ix * DY * stride_z + iz * DX]; } } #pragma unroll UNROLL for(iy = RADIUS; iy < ny - RADIUS; iy++) { #pragma unroll for(ix = 0; ix < BY / DY; ix++) { #pragma unroll for(iz = 0; iz < BX / DX; iz++) { s_data[l_ty + ix * DY][l_tx + iz * DX] = cur[ix][iz]; } } // top & bottom if(idy < RADIUS) { #pragma unroll for(iz = 0; iz < BX / DX; iz++) { s_data[idy ][l_tx + iz * DX] = plane_input[- RADIUS * stride_z + iz * DX]; s_data[idy + BY + RADIUS][l_tx + iz * DX] = plane_input[ BY * stride_z + iz * DX]; } } // left & right if(idx < RADIUS) { #pragma unroll for(ix = 0; ix < BY / DY; ix++) { s_data[l_ty + ix * DY][idx ] = plane_input[ix * DY * stride_z - RADIUS]; s_data[l_ty + ix * DY][idx + BX + RADIUS] = plane_input[ix * DY * stride_z + BX]; } } __syncthreads(); float deriv; #pragma unroll for(ix = 0; ix < BY / DY; ix++) { #pragma unroll for(iz = 0; iz < BX / DX; iz++) { float m_pos = coeff[0] * cur[ix][iz]; deriv = m_pos; #pragma unroll for(i = 1; i <= RADIUS; i++) { deriv += coeff[i] * (s_data[l_ty + ix * DY][l_tx + iz * DX + i] + s_data[l_ty + ix * DY][l_tx + iz * DX - i]); } laplacian[ix][iz] = deriv * idz2; deriv = m_pos; #pragma unroll for(i = 1; i <= RADIUS; i++) { deriv += coeff[i] * (s_data[l_ty + ix * DY + i][l_tx + iz * DX] + s_data[l_ty + ix * DY - i][l_tx + iz * DX]); } laplacian[ix][iz] += deriv * idx2; deriv = m_pos; #pragma unroll for(i = 1; i <= RADIUS; i++) { deriv += coeff[i] * (front[RADIUS - i][ix][iz] + rear[i - 1][ix][iz]); } laplacian[ix][iz] += deriv * idy2; // plane_output[ix * DY * stride_z + iz * DX] = - plane_output[ix * DY * stride_z + iz * DX] + cur[ix][iz] + cur[ix][iz] + plane_vel[ix * DY * stride_z + iz * DX] * laplacian[ix][iz]; plane_output[ix * DY * stride_z + iz * DX] = plane_vel[ix * DY * stride_z + iz * DX]; } } #pragma unroll for(ix = 0; ix < BY / DY; ix++) { #pragma unroll for(iz = 0; iz < BX / DX; iz++) { #pragma unroll for(i = 0; i < RADIUS - 1; i++) { front[i][ix][iz] = front[i + 1][ix][iz]; } front[RADIUS - 1][ix][iz] = cur[ix][iz]; cur[ix][iz] = rear[0][ix][iz]; #pragma unroll for(i = 0; i < RADIUS - 1; i++) { rear[i][ix][iz] = rear[i + 1][ix][iz]; } rear[RADIUS - 1][ix][iz] = plane_input[(RADIUS + 1) * stride_y + ix * DY * stride_z + iz * DX]; } } plane_vel += stride_y; plane_input += stride_y; plane_output += stride_y; __syncthreads(); } } __device__ float wsinc(float x) { float sinc = 1.f; float wind = 0.f; float tmp = M_PI * x; if(x) sinc = __sinf(tmp) / tmp; if(x > -4.f && x < 4.f) wind = 0.5f * (1.f + __cosf(tmp / 4.f)); return sinc * wind; } template<size_t BX, size_t BY, size_t TX, size_t TY, size_t DX, size_t DY, size_t RADIUS> __global__ void inject_source_kernel_template( float * __restrict__ wf_cur, float wavelet, float sx, float sy, float sz, const int x1, const int x2, const int z1, const int z2, const int y1, const int y2, const int stride_y, const int stride_z) { const int tx = threadIdx.x; const int ty = threadIdx.y; const int tid = ty * TX + tx; const int idx = tid % DX; const int idy = tid / DX; const int l_tx = idx + RADIUS; const int l_ty = idy + RADIUS; const int gx = blockIdx.x * blockDim.x + l_tx; const int gy = blockIdx.y * blockDim.y + l_ty; float * ptr_output = wf_cur + gy * stride_z + gx; int iy, ix, iz; const float km = 0.68f; #pragma unroll for(ix = 0; ix < BY / DY; ix++) { #pragma unroll for(iz = 0; iz < BX / DX; iz++) { int x = gx + iz * DX; int z = gy + ix * DY; if(x >= x1 && x < x2 && z >= z1 && z < z2) { float xs = wsinc(km * (sx - x)); float zs = wsinc(km * (sz - z)); #pragma unroll for(iy = y1; iy < y2; iy++) { float ys = wsinc(km * (sy - iy)); ptr_output[iy * stride_y + ix * DY * stride_z + iz * DX] += wavelet * xs * ys * zs; // if(x == x1 && z == z1 && iy == y1) ptr_output[iy * stride_y + ix * DY * stride_z + iz * DX] += wavelet; } } } } } // 2.5d tiling + time blocking //template<size_t BX, size_t BY, size_t DX, size_t DY, > //__global___ static void fdtd3d_kernel_template() //{ // //} template<size_t BX, size_t BY, size_t BZ, size_t DX, size_t DY, size_t TX, size_t TY, size_t RADIUS> double fdtd3d_gpu_wrapper( const int nx, const int ny, const int nz, const int radius, const float dx, const float dy, const float dz, float * wf_cur, float * h_vel, const int nt, const float dt, const float * wav, const int waveletLength) { cout << DX << "\t" << DY << "\n"; assert(radius == RADIUS); Wavefield3d<float, float, BY, BZ, BX> d_wf_cur; d_wf_cur.allocate(nx, ny, nz, radius, false, nullptr, wf_cur); // initialize wavefield d_wf_cur.download(); Wavefield3d<float, float, BY, BZ, BX> d_wf_next; d_wf_next.allocate(nx, ny, nz, radius, false, nullptr, wf_cur); // initialize wavefiled d_wf_next.download(); Wavefield3d<float, float, BY, BZ, BX> d_vel; d_vel.allocate(nx, ny, nz, radius, false, nullptr, h_vel); d_vel.download(); long long int mpoints = (d_wf_cur.nxpad - 2 * d_wf_cur.radius) * (d_wf_cur.nypad - 2 * d_wf_cur.radius) * (d_wf_cur.nzpad - 2 * d_wf_cur.radius); cout << 1.0 * mpoints * nt / 1e6 << "Mpoints\n"; float gflops = (3.f * radius * (1 + 1 + 1) + 2.f + 4.f) * mpoints * nt / 1e9; cout << 1.0 * gflops << "GFLOP\n"; const int stride_y = d_wf_cur.nxpad * d_wf_cur.nzpad; const int stride_z = d_wf_cur.nzpad; const int worky = d_wf_cur.ny; const float idx2 = 1.f / (dx * dx); const float idy2 = 1.f / (dy * dy); const float idz2 = 1.f / (dz * dz); const float sx = (nx - 1) * dx * 0.5f / dx; const float sz = (nz - 1) * dz * 0.5f / dz; const float sy = (ny - 1) * dy * 0.5f / dy; const float wind = 0.05f; const int windx = (int)(wind / dx + 0.5f); const int windy = (int)(wind / dy + 0.5f); const int windz = (int)(wind / dz + 0.5f); const int x1 = sx - windx; const int x2 = sx + windx + 1; const int z1 = sz - windz; const int z2 = sz + windz + 1; const int y1 = sy - windy; const int y2 = sy + windy + 1; float memSize = 3.0 * 4.f * (d_wf_cur.nxpad * d_wf_cur.nypad * d_wf_cur.nzpad + d_wf_cur.padding) / (1024.0 * 1024.0 * 1024.0); cout << memSize << "Gb\n"; cudaMemcpyToSymbol(coeff, (void*)h_coeff, sizeof(float) * (radius + 1)); int grid_x = (d_wf_cur.nzpad - 2 * d_wf_cur.radius) / BX; int grid_y = (d_wf_cur.nxpad - 2 * d_wf_cur.radius) / BY; dim3 grids(grid_x, grid_y, 1); dim3 threads(BX, BY, 1); TimerGPU timer(0); // kernel execution int it = 0; for(; it <= nt - 2; it += 2) { // from wf_cur -> wf_next if(it % 10 == 0) fprintf(stdout, "INFO: %04d time steps of total time steps %04d\n", it, nt); fdtd3d_kernel_template<BX, BY, TX, TY, DX, DY, RADIUS><<<grids, threads>>>( d_wf_next.d_wf + d_wf_next.padding, d_wf_cur.d_wf + d_wf_cur.padding, stride_y, stride_z, worky, idx2, idy2, idz2, d_vel.d_wf + d_vel.padding); if(it < waveletLength) { inject_source_kernel_template<BX, BY, TX, TY, DX, DY, RADIUS><<<grids, threads>>>( d_wf_next.d_wf + d_wf_next.padding, wav[it], sx, sy, sz, x1, x2, z1, z2, y1, y2, stride_y, stride_z); } // from wf_next -> wf_cur if((it + 1) % 10 == 0) fprintf(stdout, "INFO: %04d time steps of total time steps %04d\n", it, nt); fdtd3d_kernel_template<BX, BY, TX, TY, DX, DY, RADIUS><<<grids, threads>>>( d_wf_cur.d_wf + d_wf_cur.padding, d_wf_next.d_wf + d_wf_next.padding, stride_y, stride_z, worky, idx2, idy2, idz2, d_vel.d_wf + d_vel.padding); if(it + 1 < waveletLength) { inject_source_kernel_template<BX, BY, TX, TY, DX, DY, RADIUS><<<grids, threads>>>( d_wf_cur.d_wf + d_wf_cur.padding, wav[it + 1], sx, sy, sz, x1, x2, z1, z2, y1, y2, stride_y, stride_z); } } for(; it < nt; it++) { if(it % 10 == 0) fprintf(stdout, "INFO: %04d time steps of total time steps %04d\n", it, nt); fdtd3d_kernel_template<BX, BY, TX, TY, DX, DY, RADIUS><<<grids, threads>>>( d_wf_next.d_wf + d_wf_next.padding, d_wf_cur.d_wf + d_wf_cur.padding, stride_y, stride_z, worky, idx2, idy2, idz2, d_vel.d_wf + d_vel.padding); if(it < waveletLength) { inject_source_kernel_template<BX, BY, TX, TY, DX, DY, RADIUS><<<grids, threads>>>( d_wf_next.d_wf + d_wf_next.padding, wav[it], sx, sy, sz, x1, x2, z1, z2, y1, y2, stride_y, stride_z); } } double gpuTime = timer.read(); fprintf(stdout, "INFO: elapsed time = %.2f ms.\n", gpuTime); #ifdef VERBOSITY fprintf(stdout, "INFO: performance = %.2f GFLOPS.\n", gflops / (1e-3 * gpuTime)); fprintf(stdout, "INFO: performance = %.2f Mpoints/s.\n", mpoints * nt / (1e6 * gpuTime * 1e-3)); #endif fflush(stdout); if((nt & 0x1) == 0) d_wf_cur.readback(); else d_wf_next.readback(); #ifdef DEBUG FILE * fp = fopen("/d0/data/zx/wf_gpu.dat", "wb"); fwrite(wf_cur, sizeof(float), nx * ny * nz, fp); fflush(fp); fclose(fp); #endif return gpuTime; } void set_source_wavelet(float * wav, const int nt, const float dt, const float f0) { for(int it = 0; it < nt; it++) { float ttime = it * dt; float temp = M_PI * M_PI * f0 * f0 * (ttime - 1.0 / f0) * (ttime - 1.0 / f0); wav[it] = (1.0 - 2.0 * temp) * expf(- temp); } } int main(int argc, char * argv[]) { if(argc != 9) { fprintf(stderr, "USAGE: nx ny nz dx dy dz nt dt\n"); return -1; } int nx = atoi(argv[1]); int ny = atoi(argv[2]); int nz = atoi(argv[3]); float dx = atof(argv[4]); float dy = atof(argv[5]); float dz = atof(argv[6]); int nt = atoi(argv[7]); float dt = atof(argv[8]); const int radius = 4; const float f0 = 20.f; const int waveletLength = 2 * (int)(1.f / f0 / dt + 0.5f); float * wav = (float*)malloc(sizeof(float) * waveletLength); set_source_wavelet(wav, waveletLength, dt, f0); for(int it = 0; it < waveletLength; it++) { wav[it] *= dt * dt / (dx * dy * dz); } #ifdef VERBOSITY fprintf(stdout, "INFO: finite difference forward modeling\n"); fprintf(stdout, "INFO: nx = %d, ny = %d, nz = %d\n", nx, ny, nz); fflush(stdout); #endif float * wf_cur = (float*)malloc(sizeof(float) * nx * ny * nz); float * wf_next = (float*)malloc(sizeof(float) * nx * ny * nz); float * h_vel = (float*)malloc(sizeof(float) * nx * ny * nz); float * h_wf_gpu = (float*)malloc(sizeof(float) * nx * ny * nz); memset(wf_cur, 0, sizeof(float) * nx * ny * nz); memset(wf_next, 0, sizeof(float) * nx * ny * nz); for(long long int i = 0; i < nx * ny * nz; i++) h_vel[i] = 4.f * 4.f * dt * dt; fdtd3d_gpu_wrapper<BLOCK_X, BLOCK_Y, BLOCK_Z, MEM_PATTERN_X, THREAD_X * THREAD_Y / MEM_PATTERN_X, THREAD_X, THREAD_Y, SCHEME_RADIUS>( nx, ny, nz, radius, dx, dy, dz, h_wf_gpu, h_vel, nt, dt, wav, waveletLength); free(wf_cur); wf_cur = nullptr; free(wf_next); wf_next = nullptr; free(h_vel); h_vel = nullptr; free(h_wf_gpu); h_wf_gpu = nullptr; free(wav); wav = nullptr; return EXIT_SUCCESS; }
1b577a02dd03fc46b5d31578300efe0e3f3fee6e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "hip/hip_runtime.h" #include "device_helpers.hpp" #include "kernels_hip.cuh" namespace emida { // size // +-------+-------+-------+ // +--+--+--+ +--+--+--+ // +--+--+--+ //blockDim2 2 2 2 2 2 2 2 2 //block: 1 2 3 4 5 6 7 8 9 template<typename T> __global__ void sum(const T* data, T * maxes, size_t size) { T* sdata = shared_memory_proxy<T>(); //number of blocks we need to process one picture size_t one_pic_blocks = div_up(size, blockDim.x); size_t pic_num = blockIdx.x / one_pic_blocks; size_t pic_block = blockIdx.x % one_pic_blocks; //if this is the last block that processes one picture(chunk) //and this thread would process sth out of the picture, just load zero size_t i = pic_num * size + pic_block * blockDim.x + threadIdx.x; if (blockIdx.x % one_pic_blocks == one_pic_blocks - 1 && size % blockDim.x != 0 && threadIdx.x >= size % blockDim.x) { sdata[threadIdx.x] = 0; } else sdata[threadIdx.x] = data[i]; if (threadIdx.x == 0 && pic_block == 0) maxes[pic_num] = 0; __syncthreads(); for (size_t s = blockDim.x / 2; s > 0; s >>= 1) { if (threadIdx.x < s) { sdata[threadIdx.x] += sdata[threadIdx.x + s]; } __syncthreads(); } if (threadIdx.x == 0) atomicAdd(&maxes[pic_num], sdata[0]); } template<typename T> void run_sum(const T* data, T * sums, size_t size, size_t batch_size) { size_t block_size = 1024; size_t one_pic_blocks = div_up(size, block_size); size_t grid_size = one_pic_blocks * batch_size; hipLaunchKernelGGL(( sum<T>) , dim3(grid_size), dim3(block_size), block_size * sizeof(T), 0, data, sums, size); } template void run_sum<double>(const double* data, double * sums, size_t size, size_t batch_size); template<typename T, typename RES> __global__ void sum(const T* data, RES* sums, const size2_t* begins, size2_t src_size, size2_t slice_size) { RES* sdata = shared_memory_proxy<RES>(); //number of blocks we need to process one picture size_t one_pic_blocks = div_up(slice_size.area(), blockDim.x); size_t slice_num = blockIdx.x / one_pic_blocks; size_t slice_block = blockIdx.x % one_pic_blocks; //if this is the last block that processes one picture(chunk) //and this thread would process sth out of the picture, just load zero if (blockIdx.x % one_pic_blocks == one_pic_blocks - 1 && slice_size.area() % blockDim.x != 0 && threadIdx.x >= slice_size.area() % blockDim.x) { sdata[threadIdx.x] = 0; } else { size_t slice_i = slice_block * blockDim.x + threadIdx.x; size2_t slice_pos = { slice_i % slice_size.x, slice_i / slice_size.x }; size2_t src_pos = begins[slice_num] + slice_pos; sdata[threadIdx.x] = data[src_pos.pos(src_size.x)]; } if (threadIdx.x == 0 && slice_block == 0) sums[slice_num] = 0; __syncthreads(); for (size_t s = blockDim.x / 2; s > 0; s >>= 1) { if (threadIdx.x < s) { sdata[threadIdx.x] += sdata[threadIdx.x + s]; } __syncthreads(); } if (threadIdx.x == 0) atomicAdd(&sums[slice_num], sdata[0]); } template<typename T, typename RES> void run_sum(const T* data, RES* sums, const size2_t * begins, size2_t src_size, size2_t slice_size, size_t batch_size) { size_t block_size = 1024; size_t one_pic_blocks = div_up(slice_size.area(), block_size); size_t grid_size = one_pic_blocks * batch_size; hipLaunchKernelGGL(( sum<T, RES>) , dim3(grid_size), dim3(block_size), block_size * sizeof(RES) , 0, data, sums, begins, src_size, slice_size); } template void run_sum<double, double>(const double* data, double* sums, const size2_t* begins, size2_t src_size, size2_t slice_size, size_t batch_size); //template void run_sum<uint16_t, uint32_t>(const uint16_t* data, uint32_t* sums, const size2_t* begins, size2_t src_size, size2_t slice_size, size_t batch_size); template void run_sum<uint16_t, double>(const uint16_t* data, double* sums, const size2_t* begins, size2_t src_size, size2_t slice_size, size_t batch_size); template void run_sum<uint16_t, float>(const uint16_t* data, float* sums, const size2_t* begins, size2_t src_size, size2_t slice_size, size_t batch_size); }
1b577a02dd03fc46b5d31578300efe0e3f3fee6e.cu
#include "cuda.h" #include "cuda_runtime.h" #include "device_helpers.hpp" #include "kernels.cuh" namespace emida { // size // +-------+-------+-------+ // +--+--+--+ +--+--+--+ // +--+--+--+ //blockDim2 2 2 2 2 2 2 2 2 //block: 1 2 3 4 5 6 7 8 9 template<typename T> __global__ void sum(const T* data, T * maxes, size_t size) { T* sdata = shared_memory_proxy<T>(); //number of blocks we need to process one picture size_t one_pic_blocks = div_up(size, blockDim.x); size_t pic_num = blockIdx.x / one_pic_blocks; size_t pic_block = blockIdx.x % one_pic_blocks; //if this is the last block that processes one picture(chunk) //and this thread would process sth out of the picture, just load zero size_t i = pic_num * size + pic_block * blockDim.x + threadIdx.x; if (blockIdx.x % one_pic_blocks == one_pic_blocks - 1 && size % blockDim.x != 0 && threadIdx.x >= size % blockDim.x) { sdata[threadIdx.x] = 0; } else sdata[threadIdx.x] = data[i]; if (threadIdx.x == 0 && pic_block == 0) maxes[pic_num] = 0; __syncthreads(); for (size_t s = blockDim.x / 2; s > 0; s >>= 1) { if (threadIdx.x < s) { sdata[threadIdx.x] += sdata[threadIdx.x + s]; } __syncthreads(); } if (threadIdx.x == 0) atomicAdd(&maxes[pic_num], sdata[0]); } template<typename T> void run_sum(const T* data, T * sums, size_t size, size_t batch_size) { size_t block_size = 1024; size_t one_pic_blocks = div_up(size, block_size); size_t grid_size = one_pic_blocks * batch_size; sum<T> <<<grid_size, block_size, block_size * sizeof(T)>>> (data, sums, size); } template void run_sum<double>(const double* data, double * sums, size_t size, size_t batch_size); template<typename T, typename RES> __global__ void sum(const T* data, RES* sums, const size2_t* begins, size2_t src_size, size2_t slice_size) { RES* sdata = shared_memory_proxy<RES>(); //number of blocks we need to process one picture size_t one_pic_blocks = div_up(slice_size.area(), blockDim.x); size_t slice_num = blockIdx.x / one_pic_blocks; size_t slice_block = blockIdx.x % one_pic_blocks; //if this is the last block that processes one picture(chunk) //and this thread would process sth out of the picture, just load zero if (blockIdx.x % one_pic_blocks == one_pic_blocks - 1 && slice_size.area() % blockDim.x != 0 && threadIdx.x >= slice_size.area() % blockDim.x) { sdata[threadIdx.x] = 0; } else { size_t slice_i = slice_block * blockDim.x + threadIdx.x; size2_t slice_pos = { slice_i % slice_size.x, slice_i / slice_size.x }; size2_t src_pos = begins[slice_num] + slice_pos; sdata[threadIdx.x] = data[src_pos.pos(src_size.x)]; } if (threadIdx.x == 0 && slice_block == 0) sums[slice_num] = 0; __syncthreads(); for (size_t s = blockDim.x / 2; s > 0; s >>= 1) { if (threadIdx.x < s) { sdata[threadIdx.x] += sdata[threadIdx.x + s]; } __syncthreads(); } if (threadIdx.x == 0) atomicAdd(&sums[slice_num], sdata[0]); } template<typename T, typename RES> void run_sum(const T* data, RES* sums, const size2_t * begins, size2_t src_size, size2_t slice_size, size_t batch_size) { size_t block_size = 1024; size_t one_pic_blocks = div_up(slice_size.area(), block_size); size_t grid_size = one_pic_blocks * batch_size; sum<T, RES> <<<grid_size, block_size, block_size * sizeof(RES) >>> (data, sums, begins, src_size, slice_size); } template void run_sum<double, double>(const double* data, double* sums, const size2_t* begins, size2_t src_size, size2_t slice_size, size_t batch_size); //template void run_sum<uint16_t, uint32_t>(const uint16_t* data, uint32_t* sums, const size2_t* begins, size2_t src_size, size2_t slice_size, size_t batch_size); template void run_sum<uint16_t, double>(const uint16_t* data, double* sums, const size2_t* begins, size2_t src_size, size2_t slice_size, size_t batch_size); template void run_sum<uint16_t, float>(const uint16_t* data, float* sums, const size2_t* begins, size2_t src_size, size2_t slice_size, size_t batch_size); }
9475de18c09aabfd7fd915027e4be316d8864c30.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" __global__ void dotmul3_kernel(const real * __restrict__ a, const real * __restrict__ b, real * __restrict__ c) { int xi = threadIdx.x + blockIdx.x * blockDim.x + NGHOST; int yi = threadIdx.y + blockIdx.y * blockDim.y + NGHOST; int idx1 = vfidx(xi, yi, NGHOST); int idx2 = vfidx(xi, yi, NGHOST, 1); int idx3 = vfidx(xi, yi, NGHOST, 2); int stride = vfzstride(); for (int i = 0; i < NZ; i++) { c[idx1 + i*stride] = a[idx1 + i*stride] * b[idx1 + i*stride] + a[idx2 + i*stride] * b[idx2 + i*stride] + a[idx3 + i*stride] * b[idx3 + i*stride]; } } void dotmul3(vf3dgpu &a, vf3dgpu &b, vf3dgpu &c) { hipLaunchKernelGGL(( dotmul3_kernel), dim3(x_wide.nblocks), dim3(x_wide.nthreads), 0, 0, a.mem(), b.mem(), c.mem()); } __global__ void add2_kernel(real c1, const real *t1, real c2, const real *t2, real *res) { int xi = threadIdx.x + blockIdx.x * blockDim.x + NGHOST; int yi = threadIdx.y + blockIdx.y * blockDim.y + NGHOST; int idx = vfidx(xi, yi, NGHOST); int stride = vfzstride(); for (int i = 0; i < NZ; i++) { res[idx] = c1 * t1[idx] + c2 * t2[idx]; idx += stride; } } __global__ void add3_kernel(real c1, const real *t1, real c2, const real *t2, real c3, const real *t3, real *res) { int xi = threadIdx.x + blockIdx.x * blockDim.x + NGHOST; int yi = threadIdx.y + blockIdx.y * blockDim.y + NGHOST; int idx = vfidx(xi, yi, NGHOST); int stride = vfzstride(); for (int i = 0; i < NZ; i++) { res[idx] = c1 * t1[idx] + c2 * t2[idx] + c3 * t3[idx]; idx += stride; } } void add2(real c1, vf3dgpu &a, real c2, vf3dgpu &b, vf3dgpu &c) { for (int vi = 0; vi < a.varcount(); vi++) { hipLaunchKernelGGL(( add2_kernel), dim3(x_wide.nblocks), dim3(x_wide.nthreads), 0, 0, c1, vfvar(a.mem(), vi), c2, vfvar(b.mem(), vi), vfvar(c.mem(), vi)); } } void add3(real c1, vf3dgpu &a, real c2, vf3dgpu &b, real c3, vf3dgpu &c, vf3dgpu &d) { for (int vi = 0; vi < a.varcount(); vi++) { hipLaunchKernelGGL(( add3_kernel), dim3(x_wide.nblocks), dim3(x_wide.nthreads), 0, 0, c1, vfvar(a.mem(), vi), c2, vfvar(b.mem(), vi), c3, vfvar(c.mem(), vi), vfvar(d.mem(), vi)); } }
9475de18c09aabfd7fd915027e4be316d8864c30.cu
__global__ void dotmul3_kernel(const real * __restrict__ a, const real * __restrict__ b, real * __restrict__ c) { int xi = threadIdx.x + blockIdx.x * blockDim.x + NGHOST; int yi = threadIdx.y + blockIdx.y * blockDim.y + NGHOST; int idx1 = vfidx(xi, yi, NGHOST); int idx2 = vfidx(xi, yi, NGHOST, 1); int idx3 = vfidx(xi, yi, NGHOST, 2); int stride = vfzstride(); for (int i = 0; i < NZ; i++) { c[idx1 + i*stride] = a[idx1 + i*stride] * b[idx1 + i*stride] + a[idx2 + i*stride] * b[idx2 + i*stride] + a[idx3 + i*stride] * b[idx3 + i*stride]; } } void dotmul3(vf3dgpu &a, vf3dgpu &b, vf3dgpu &c) { dotmul3_kernel<<<x_wide.nblocks, x_wide.nthreads>>>(a.mem(), b.mem(), c.mem()); } __global__ void add2_kernel(real c1, const real *t1, real c2, const real *t2, real *res) { int xi = threadIdx.x + blockIdx.x * blockDim.x + NGHOST; int yi = threadIdx.y + blockIdx.y * blockDim.y + NGHOST; int idx = vfidx(xi, yi, NGHOST); int stride = vfzstride(); for (int i = 0; i < NZ; i++) { res[idx] = c1 * t1[idx] + c2 * t2[idx]; idx += stride; } } __global__ void add3_kernel(real c1, const real *t1, real c2, const real *t2, real c3, const real *t3, real *res) { int xi = threadIdx.x + blockIdx.x * blockDim.x + NGHOST; int yi = threadIdx.y + blockIdx.y * blockDim.y + NGHOST; int idx = vfidx(xi, yi, NGHOST); int stride = vfzstride(); for (int i = 0; i < NZ; i++) { res[idx] = c1 * t1[idx] + c2 * t2[idx] + c3 * t3[idx]; idx += stride; } } void add2(real c1, vf3dgpu &a, real c2, vf3dgpu &b, vf3dgpu &c) { for (int vi = 0; vi < a.varcount(); vi++) { add2_kernel<<<x_wide.nblocks, x_wide.nthreads>>>(c1, vfvar(a.mem(), vi), c2, vfvar(b.mem(), vi), vfvar(c.mem(), vi)); } } void add3(real c1, vf3dgpu &a, real c2, vf3dgpu &b, real c3, vf3dgpu &c, vf3dgpu &d) { for (int vi = 0; vi < a.varcount(); vi++) { add3_kernel<<<x_wide.nblocks, x_wide.nthreads>>>(c1, vfvar(a.mem(), vi), c2, vfvar(b.mem(), vi), c3, vfvar(c.mem(), vi), vfvar(d.mem(), vi)); } }
4e9c535affae028cb38e77ef14304d2431af4e3b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" __device__ real test_trig_init(real x, real y, real z) { return sin(x) + cos(y) - 2 * sin(z); } __device__ real sin_x_init(real x, real y, real z) { return sin(x); } __device__ real zero_init(real x, real y, real z) { return 0.0; } template<real(*fun)(real, real, real)> __global__ void init_field_kernel(real *f, const real mod, const struct space_args s) { int xi = threadIdx.x + blockDim.x * blockIdx.x; int yi = threadIdx.y + blockDim.y * blockIdx.y; int idx = vfidx(xi + NGHOST, yi + NGHOST, NGHOST); int stride = vfzstride(); for (int zi = 0; zi < NZ; zi++) f[idx + zi * stride] = mod * fun(s.x_0 + xi * s.dx, s.y_0 + yi * s.dy, s.z_0 + zi * s.dz); } void init_field(vf3dgpu vf, init_fun_t fun, real mod) { space_args s = {dx, dy, dz, x_0, y_0, z_0}; for (int vi = 0; vi < vf.varcount(); vi++) { switch (fun) { case TEST_TRIG_INIT: hipLaunchKernelGGL(( init_field_kernel<test_trig_init>) , dim3(x_wide.nblocks), dim3(x_wide.nthreads), 0, 0, vfvar(vf.mem(), vi), mod, s); break; case SIN_X_INIT: hipLaunchKernelGGL(( init_field_kernel<sin_x_init>) , dim3(x_wide.nblocks), dim3(x_wide.nthreads), 0, 0, vfvar(vf.mem(), vi), mod, s); break; case ZERO_INIT: hipLaunchKernelGGL(( init_field_kernel<zero_init>) , dim3(x_wide.nblocks), dim3(x_wide.nthreads), 0, 0, vfvar(vf.mem(), vi), mod, s); break; } } }
4e9c535affae028cb38e77ef14304d2431af4e3b.cu
__device__ real test_trig_init(real x, real y, real z) { return sin(x) + cos(y) - 2 * sin(z); } __device__ real sin_x_init(real x, real y, real z) { return sin(x); } __device__ real zero_init(real x, real y, real z) { return 0.0; } template<real(*fun)(real, real, real)> __global__ void init_field_kernel(real *f, const real mod, const struct space_args s) { int xi = threadIdx.x + blockDim.x * blockIdx.x; int yi = threadIdx.y + blockDim.y * blockIdx.y; int idx = vfidx(xi + NGHOST, yi + NGHOST, NGHOST); int stride = vfzstride(); for (int zi = 0; zi < NZ; zi++) f[idx + zi * stride] = mod * fun(s.x_0 + xi * s.dx, s.y_0 + yi * s.dy, s.z_0 + zi * s.dz); } void init_field(vf3dgpu vf, init_fun_t fun, real mod) { space_args s = {dx, dy, dz, x_0, y_0, z_0}; for (int vi = 0; vi < vf.varcount(); vi++) { switch (fun) { case TEST_TRIG_INIT: init_field_kernel<test_trig_init> <<<x_wide.nblocks, x_wide.nthreads>>>( vfvar(vf.mem(), vi), mod, s); break; case SIN_X_INIT: init_field_kernel<sin_x_init> <<<x_wide.nblocks, x_wide.nthreads>>>( vfvar(vf.mem(), vi), mod, s); break; case ZERO_INIT: init_field_kernel<zero_init> <<<x_wide.nblocks, x_wide.nthreads>>>( vfvar(vf.mem(), vi), mod, s); break; } } }
3f0165a85de60c4c45fda0457ab0d7568d9c9e8f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void divScalar(float* in, float* out, float div, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) out[tid] = in[tid] / div; }
3f0165a85de60c4c45fda0457ab0d7568d9c9e8f.cu
#include "includes.h" __global__ void divScalar(float* in, float* out, float div, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) out[tid] = in[tid] / div; }
f1a783a0a6b05363bcf20acd1ba3abaa43bc9fa8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*************************************************************************************************** * Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Basic CUDA file for testing compiler flags. */ __device__ int inner() { return -1; } __global__ void test() { inner(); } int main() { hipLaunchKernelGGL(( test), dim3(1),dim3(1), 0, 0, ); return 0; }
f1a783a0a6b05363bcf20acd1ba3abaa43bc9fa8.cu
/*************************************************************************************************** * Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Basic CUDA file for testing compiler flags. */ __device__ int inner() { return -1; } __global__ void test() { inner(); } int main() { test<<<1,1>>>(); return 0; }
8218660c7ce186cbc36f2f5d696efc75009f6110.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // The MIT License (MIT) // // Copyright (c) 2016 Northeastern University // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE // SOFTWARE. #include <iostream> #include "include/kmeans_gpu.h" #include "include/gpu_util.h" // Hack to cope with Clion #include "../../include/gpu_util.h" #include "../../../../../../../../usr/local/cuda/include/hip/driver_types.h" #include "../../include/kmeans_gpu.h" #include "../../include/kernel_types.h" #include <stdio.h> #include <stdlib.h> static inline int nextPowerOfTwo(int n) { n--; n = n >> 1 | n; n = n >> 2 | n; n = n >> 4 | n; n = n >> 8 | n; n = n >> 16 | n; // n = n >> 32 | n; // For 64-bit ints return ++n; } __host__ __device__ inline static float euclid_dist_2(int numCoords, int numObjs, int numClusters, float *objects, // [numCoords][numObjs] float *clusters, // [numCoords][numClusters] int objectId, int clusterId) { int i; float ans = 0.0; for (i = 0; i < numCoords; i++) { ans += (objects[numObjs * i + objectId] - clusters[numClusters * i + clusterId]) * (objects[numObjs * i + objectId] - clusters[numClusters * i + clusterId]); } return (ans); } __global__ static void find_nearest_cluster(int numCoords, int numObjs, int numClusters, float *objects, // [numCoords][numObjs] float *deviceClusters, // [numCoords][numClusters] int *membership, // [numObjs] int *intermediates) { extern __shared__ char sharedMemory[]; // The type chosen for membershipChanged must be large enough to support // reductions! There are blockDim.x elements, one for each thread in the // block. See numThreadsPerClusterBlock in cuda_kmeans(). unsigned char *membershipChanged = (unsigned char *) sharedMemory; float *clusters = deviceClusters; membershipChanged[threadIdx.x] = 0; int objectId = blockDim.x * blockIdx.x + threadIdx.x; if (objectId < numObjs) { int index, i; float dist, min_dist; /* find the cluster id that has min distance to object */ index = 0; min_dist = euclid_dist_2(numCoords, numObjs, numClusters, objects, clusters, objectId, 0); for (i = 1; i < numClusters; i++) { dist = euclid_dist_2(numCoords, numObjs, numClusters, objects, clusters, objectId, i); /* no need square root */ if (dist < min_dist) { /* find the min and its array index */ min_dist = dist; index = i; } } if (membership[objectId] != index) { membershipChanged[threadIdx.x] = 1; } /* assign the membership to object objectId */ membership[objectId] = index; __syncthreads(); // For membershipChanged[] // blockDim.x *must* be a power of two! for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) { if (threadIdx.x < s) { membershipChanged[threadIdx.x] += membershipChanged[threadIdx.x + s]; } __syncthreads(); } if (threadIdx.x == 0) { intermediates[blockIdx.x] = membershipChanged[0]; } } } __global__ static void compute_delta(int *deviceIntermediates, int numIntermediates, // The actual number of intermediates int numIntermediates2) // The next power of two { // The number of elements in this array should be equal to // numIntermediates2, the number of threads launched. It *must* be a power // of two! extern __shared__ unsigned int intermediates[]; // Copy global intermediate values into shared memory. intermediates[threadIdx.x] = (threadIdx.x < numIntermediates) ? deviceIntermediates[threadIdx.x] : 0; __syncthreads(); // numIntermediates2 *must* be a power of two! for (unsigned int s = numIntermediates2 / 2; s > 0; s >>= 1) { if (threadIdx.x < s) { intermediates[threadIdx.x] += intermediates[threadIdx.x + s]; } __syncthreads(); } if (threadIdx.x == 0) { deviceIntermediates[0] = intermediates[0]; } } /*----< cuda_kmeans() >-------------------------------------------------------*/ // // ---------------------------------------- // DATA LAYOUT // // objects [numObjs][numCoords] // clusters [numClusters][numCoords] // dimObjects [numCoords][numObjs] // dimClusters [numCoords][numClusters] // newClusters [numCoords][numClusters] // deviceObjects [numCoords][numObjs] // deviceClusters [numCoords][numClusters] // ---------------------------------------- // /* return an array of cluster centers of size [numClusters][numCoords] */ float **Nice::cuda_kmeans(float **objects, /* in: [numObjs][numCoords] */ int numCoords, /* no. features */ int numObjs, /* no. objects */ int numClusters, /* no. clusters */ float threshold, /* % objects change membership */ int *membership, /* out: [numObjs] */ int *loop_iterations) { int i, j, index, loop = 0; int *newClusterSize; /* [numClusters]: no. objects assigned in each new cluster */ float delta; /* % of objects change their clusters */ float **dimObjects; float **clusters; /* out: [numClusters][numCoords] */ float **dimClusters; float **newClusters; /* [numCoords][numClusters] */ float *deviceObjects; float *deviceClusters; int *deviceMembership; int *deviceIntermediates; // Copy objects given in [numObjs][numCoords] layout to new // [numCoords][numObjs] layout malloc2D(dimObjects, numCoords, numObjs, float); for (i = 0; i < numCoords; i++) { for (j = 0; j < numObjs; j++) { dimObjects[i][j] = objects[j][i]; } } /* pick first numClusters elements of objects[] as initial cluster centers*/ malloc2D(dimClusters, numCoords, numClusters, float); for (i = 0; i < numCoords; i++) { for (j = 0; j < numClusters; j++) { dimClusters[i][j] = dimObjects[i][j]; } } /* initialize membership[] */ for (i = 0; i < numObjs; i++) membership[i] = -1; /* need to initialize newClusterSize and newClusters[0] to all 0 */ newClusterSize = (int *) calloc(numClusters, sizeof(int)); assert(newClusterSize != NULL); malloc2D(newClusters, numCoords, numClusters, float); memset(newClusters[0], 0, numCoords * numClusters * sizeof(float)); // To support reduction, numThreadsPerClusterBlock *must* be a power of // two, and it *must* be no larger than the number of bits that will // fit into an unsigned char, the type used to keep track of membership // changes in the kernel. const unsigned int numThreadsPerClusterBlock = 128; const unsigned int numClusterBlocks = (numObjs + numThreadsPerClusterBlock - 1) / numThreadsPerClusterBlock; const unsigned int clusterBlockSharedDataSize = numThreadsPerClusterBlock * sizeof(unsigned char); const unsigned int numReductionThreads = nextPowerOfTwo(numClusterBlocks); const unsigned int reductionBlockSharedDataSize = numReductionThreads * sizeof(unsigned int); checkCuda(hipMalloc(&deviceObjects, numObjs * numCoords * sizeof(float))); checkCuda(hipMalloc(&deviceClusters, numClusters * numCoords * sizeof(float))); checkCuda(hipMalloc(&deviceMembership, numObjs * sizeof(int))); checkCuda(hipMalloc(&deviceIntermediates, numReductionThreads * sizeof(unsigned int))); checkCuda(hipMemcpy(deviceObjects, dimObjects[0], numObjs * numCoords * sizeof(float), hipMemcpyHostToDevice)); checkCuda(hipMemcpy(deviceMembership, membership, numObjs * sizeof(int), hipMemcpyHostToDevice)); do { checkCuda(hipMemcpy(deviceClusters, dimClusters[0], numClusters * numCoords * sizeof(float), hipMemcpyHostToDevice)); find_nearest_cluster << < numClusterBlocks, numThreadsPerClusterBlock, clusterBlockSharedDataSize >> > (numCoords, numObjs, numClusters, deviceObjects, deviceClusters, deviceMembership, deviceIntermediates); hipDeviceSynchronize(); checkLastCudaError(); compute_delta << < 1, numReductionThreads, reductionBlockSharedDataSize >> > (deviceIntermediates, numClusterBlocks, numReductionThreads); hipDeviceSynchronize(); checkLastCudaError(); int d; checkCuda(hipMemcpy(&d, deviceIntermediates, sizeof(int), hipMemcpyDeviceToHost)); delta = (float) d; checkCuda(hipMemcpy(membership, deviceMembership, numObjs * sizeof(int), hipMemcpyDeviceToHost)); for (i = 0; i < numObjs; i++) { /* find the array index of nestest cluster center */ index = membership[i]; /* update new cluster centers : sum of objects located within */ newClusterSize[index]++; for (j = 0; j < numCoords; j++) newClusters[j][index] += objects[i][j]; } /* average the sum and replace old cluster centers with newClusters */ for (i = 0; i < numClusters; i++) { for (j = 0; j < numCoords; j++) { if (newClusterSize[i] > 0) dimClusters[j][i] = newClusters[j][i] / newClusterSize[i]; newClusters[j][i] = 0.0; /* set back to 0 */ } newClusterSize[i] = 0; /* set back to 0 */ } delta /= numObjs; } while (delta > threshold && loop++ < 500); *loop_iterations = loop + 1; /* allocate a 2D space for returning variable clusters[] (coordinates of cluster centers) */ malloc2D(clusters, numClusters, numCoords, float); for (i = 0; i < numClusters; i++) { for (j = 0; j < numCoords; j++) { clusters[i][j] = dimClusters[j][i]; } } checkCuda(hipFree(deviceObjects)); checkCuda(hipFree(deviceClusters)); checkCuda(hipFree(deviceMembership)); checkCuda(hipFree(deviceIntermediates)); free(dimObjects[0]); free(dimObjects); free(dimClusters[0]); free(dimClusters); free(newClusters[0]); free(newClusters); free(newClusterSize); return clusters; }
8218660c7ce186cbc36f2f5d696efc75009f6110.cu
// The MIT License (MIT) // // Copyright (c) 2016 Northeastern University // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE // SOFTWARE. #include <iostream> #include "include/kmeans_gpu.h" #include "include/gpu_util.h" // Hack to cope with Clion #include "../../include/gpu_util.h" #include "../../../../../../../../usr/local/cuda/include/driver_types.h" #include "../../include/kmeans_gpu.h" #include "../../include/kernel_types.h" #include <stdio.h> #include <stdlib.h> static inline int nextPowerOfTwo(int n) { n--; n = n >> 1 | n; n = n >> 2 | n; n = n >> 4 | n; n = n >> 8 | n; n = n >> 16 | n; // n = n >> 32 | n; // For 64-bit ints return ++n; } __host__ __device__ inline static float euclid_dist_2(int numCoords, int numObjs, int numClusters, float *objects, // [numCoords][numObjs] float *clusters, // [numCoords][numClusters] int objectId, int clusterId) { int i; float ans = 0.0; for (i = 0; i < numCoords; i++) { ans += (objects[numObjs * i + objectId] - clusters[numClusters * i + clusterId]) * (objects[numObjs * i + objectId] - clusters[numClusters * i + clusterId]); } return (ans); } __global__ static void find_nearest_cluster(int numCoords, int numObjs, int numClusters, float *objects, // [numCoords][numObjs] float *deviceClusters, // [numCoords][numClusters] int *membership, // [numObjs] int *intermediates) { extern __shared__ char sharedMemory[]; // The type chosen for membershipChanged must be large enough to support // reductions! There are blockDim.x elements, one for each thread in the // block. See numThreadsPerClusterBlock in cuda_kmeans(). unsigned char *membershipChanged = (unsigned char *) sharedMemory; float *clusters = deviceClusters; membershipChanged[threadIdx.x] = 0; int objectId = blockDim.x * blockIdx.x + threadIdx.x; if (objectId < numObjs) { int index, i; float dist, min_dist; /* find the cluster id that has min distance to object */ index = 0; min_dist = euclid_dist_2(numCoords, numObjs, numClusters, objects, clusters, objectId, 0); for (i = 1; i < numClusters; i++) { dist = euclid_dist_2(numCoords, numObjs, numClusters, objects, clusters, objectId, i); /* no need square root */ if (dist < min_dist) { /* find the min and its array index */ min_dist = dist; index = i; } } if (membership[objectId] != index) { membershipChanged[threadIdx.x] = 1; } /* assign the membership to object objectId */ membership[objectId] = index; __syncthreads(); // For membershipChanged[] // blockDim.x *must* be a power of two! for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) { if (threadIdx.x < s) { membershipChanged[threadIdx.x] += membershipChanged[threadIdx.x + s]; } __syncthreads(); } if (threadIdx.x == 0) { intermediates[blockIdx.x] = membershipChanged[0]; } } } __global__ static void compute_delta(int *deviceIntermediates, int numIntermediates, // The actual number of intermediates int numIntermediates2) // The next power of two { // The number of elements in this array should be equal to // numIntermediates2, the number of threads launched. It *must* be a power // of two! extern __shared__ unsigned int intermediates[]; // Copy global intermediate values into shared memory. intermediates[threadIdx.x] = (threadIdx.x < numIntermediates) ? deviceIntermediates[threadIdx.x] : 0; __syncthreads(); // numIntermediates2 *must* be a power of two! for (unsigned int s = numIntermediates2 / 2; s > 0; s >>= 1) { if (threadIdx.x < s) { intermediates[threadIdx.x] += intermediates[threadIdx.x + s]; } __syncthreads(); } if (threadIdx.x == 0) { deviceIntermediates[0] = intermediates[0]; } } /*----< cuda_kmeans() >-------------------------------------------------------*/ // // ---------------------------------------- // DATA LAYOUT // // objects [numObjs][numCoords] // clusters [numClusters][numCoords] // dimObjects [numCoords][numObjs] // dimClusters [numCoords][numClusters] // newClusters [numCoords][numClusters] // deviceObjects [numCoords][numObjs] // deviceClusters [numCoords][numClusters] // ---------------------------------------- // /* return an array of cluster centers of size [numClusters][numCoords] */ float **Nice::cuda_kmeans(float **objects, /* in: [numObjs][numCoords] */ int numCoords, /* no. features */ int numObjs, /* no. objects */ int numClusters, /* no. clusters */ float threshold, /* % objects change membership */ int *membership, /* out: [numObjs] */ int *loop_iterations) { int i, j, index, loop = 0; int *newClusterSize; /* [numClusters]: no. objects assigned in each new cluster */ float delta; /* % of objects change their clusters */ float **dimObjects; float **clusters; /* out: [numClusters][numCoords] */ float **dimClusters; float **newClusters; /* [numCoords][numClusters] */ float *deviceObjects; float *deviceClusters; int *deviceMembership; int *deviceIntermediates; // Copy objects given in [numObjs][numCoords] layout to new // [numCoords][numObjs] layout malloc2D(dimObjects, numCoords, numObjs, float); for (i = 0; i < numCoords; i++) { for (j = 0; j < numObjs; j++) { dimObjects[i][j] = objects[j][i]; } } /* pick first numClusters elements of objects[] as initial cluster centers*/ malloc2D(dimClusters, numCoords, numClusters, float); for (i = 0; i < numCoords; i++) { for (j = 0; j < numClusters; j++) { dimClusters[i][j] = dimObjects[i][j]; } } /* initialize membership[] */ for (i = 0; i < numObjs; i++) membership[i] = -1; /* need to initialize newClusterSize and newClusters[0] to all 0 */ newClusterSize = (int *) calloc(numClusters, sizeof(int)); assert(newClusterSize != NULL); malloc2D(newClusters, numCoords, numClusters, float); memset(newClusters[0], 0, numCoords * numClusters * sizeof(float)); // To support reduction, numThreadsPerClusterBlock *must* be a power of // two, and it *must* be no larger than the number of bits that will // fit into an unsigned char, the type used to keep track of membership // changes in the kernel. const unsigned int numThreadsPerClusterBlock = 128; const unsigned int numClusterBlocks = (numObjs + numThreadsPerClusterBlock - 1) / numThreadsPerClusterBlock; const unsigned int clusterBlockSharedDataSize = numThreadsPerClusterBlock * sizeof(unsigned char); const unsigned int numReductionThreads = nextPowerOfTwo(numClusterBlocks); const unsigned int reductionBlockSharedDataSize = numReductionThreads * sizeof(unsigned int); checkCuda(cudaMalloc(&deviceObjects, numObjs * numCoords * sizeof(float))); checkCuda(cudaMalloc(&deviceClusters, numClusters * numCoords * sizeof(float))); checkCuda(cudaMalloc(&deviceMembership, numObjs * sizeof(int))); checkCuda(cudaMalloc(&deviceIntermediates, numReductionThreads * sizeof(unsigned int))); checkCuda(cudaMemcpy(deviceObjects, dimObjects[0], numObjs * numCoords * sizeof(float), cudaMemcpyHostToDevice)); checkCuda(cudaMemcpy(deviceMembership, membership, numObjs * sizeof(int), cudaMemcpyHostToDevice)); do { checkCuda(cudaMemcpy(deviceClusters, dimClusters[0], numClusters * numCoords * sizeof(float), cudaMemcpyHostToDevice)); find_nearest_cluster << < numClusterBlocks, numThreadsPerClusterBlock, clusterBlockSharedDataSize >> > (numCoords, numObjs, numClusters, deviceObjects, deviceClusters, deviceMembership, deviceIntermediates); cudaDeviceSynchronize(); checkLastCudaError(); compute_delta << < 1, numReductionThreads, reductionBlockSharedDataSize >> > (deviceIntermediates, numClusterBlocks, numReductionThreads); cudaDeviceSynchronize(); checkLastCudaError(); int d; checkCuda(cudaMemcpy(&d, deviceIntermediates, sizeof(int), cudaMemcpyDeviceToHost)); delta = (float) d; checkCuda(cudaMemcpy(membership, deviceMembership, numObjs * sizeof(int), cudaMemcpyDeviceToHost)); for (i = 0; i < numObjs; i++) { /* find the array index of nestest cluster center */ index = membership[i]; /* update new cluster centers : sum of objects located within */ newClusterSize[index]++; for (j = 0; j < numCoords; j++) newClusters[j][index] += objects[i][j]; } /* average the sum and replace old cluster centers with newClusters */ for (i = 0; i < numClusters; i++) { for (j = 0; j < numCoords; j++) { if (newClusterSize[i] > 0) dimClusters[j][i] = newClusters[j][i] / newClusterSize[i]; newClusters[j][i] = 0.0; /* set back to 0 */ } newClusterSize[i] = 0; /* set back to 0 */ } delta /= numObjs; } while (delta > threshold && loop++ < 500); *loop_iterations = loop + 1; /* allocate a 2D space for returning variable clusters[] (coordinates of cluster centers) */ malloc2D(clusters, numClusters, numCoords, float); for (i = 0; i < numClusters; i++) { for (j = 0; j < numCoords; j++) { clusters[i][j] = dimClusters[j][i]; } } checkCuda(cudaFree(deviceObjects)); checkCuda(cudaFree(deviceClusters)); checkCuda(cudaFree(deviceMembership)); checkCuda(cudaFree(deviceIntermediates)); free(dimObjects[0]); free(dimObjects); free(dimClusters[0]); free(dimClusters); free(newClusters[0]); free(newClusters); free(newClusterSize); return clusters; }
86888d3ecebfe6a6e33f905e0ed0e5ce9636646f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2019-2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <gtest/gtest.h> #include <vector> #include "dali/kernels/imgproc/sampler_test.h" namespace dali { namespace kernels { using Pixel = std::array<uint8_t, 3>; using PixelF = std::array<float, 3>; template <typename Out, DALIInterpType interp, int MaxChannels = 8, typename In> __global__ void RunSampler2D( Surface2D<Out> out, Sampler2D<interp, In> sampler, In border_value, float dx, float dy, float x0, float y0) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x >= out.size.x || y >= out.size.y) return; In border[3] = { border_value, border_value, border_value }; vec2 src = { x*dx + x0, y*dy + y0 }; Out tmp[MaxChannels]; sampler(tmp, src, border); for (int c = 0; c < out.channels; c++) out(x, y, c) = tmp[c]; } template <typename Out, DALIInterpType interp, int MaxChannels = 8, typename In> __global__ void RunSampler3D( Surface3D<Out> out, Sampler3D<interp, In> sampler, In border_value, float dx, float dy, float dz, float x0, float y0, float z0) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int z = threadIdx.z + blockIdx.z * blockDim.z; if (x >= out.size.x || y >= out.size.y || z >= out.size.z) return; In border[3] = { border_value, border_value, border_value }; vec3 src = { x*dx + x0, y*dy + y0, z*dz + z0 }; Out tmp[MaxChannels]; sampler(tmp, src, border); for (int c = 0; c < out.channels; c++) out(x, y, z, c) = tmp[c]; } TEST(Sampler2D_GPU, NN) { SamplerTestData<uint8_t> sd; auto surf_cpu = sd.GetSurface2D(false); auto surf_gpu = sd.GetSurface2D(true); ASSERT_EQ(surf_cpu.channels, 3); ASSERT_EQ(surf_gpu.channels, 3); Sampler2D<DALI_INTERP_NN, uint8_t> sampler(surf_gpu); uint8_t border_value = 50; Pixel border = { border_value, border_value, border_value }; float dy = 0.125f; float dx = 0.125f; float x0 = -1; float y0 = -1; int w = (surf_cpu.size.x+2) / dx + 1; int h = (surf_cpu.size.y+2) / dy + 1; int c = surf_cpu.channels; auto out_mem = mm::alloc_raw_unique<uint8_t, mm::memory_kind::device>(w*h*c); Surface2D<uint8_t> out_surf = { out_mem.get(), { w, h }, c, { c, w*c }, 1 }; std::vector<uint8_t> out_mem_cpu(w*h*c); dim3 block(32, 32); dim3 grid((w+31)/32, (h+31)/32); hipLaunchKernelGGL(( RunSampler2D), dim3(grid), dim3(block), 0, 0, out_surf, sampler, border_value, dx, dy, x0, y0); Surface2D<uint8_t> out_cpu = { out_mem_cpu.data(), { w, h }, c, { c, w*c }, 1 }; CUDA_CALL(hipMemcpy(out_cpu.data, out_surf.data, w*h*c, hipMemcpyDeviceToHost)); for (int oy = 0; oy < h; oy++) { float y = oy * dy + y0; int iy = floorf(y); for (int ox = 0; ox < w; ox++) { float x = ox * dx + x0; int ix = floorf(x); Pixel ref; if (ix < 0 || iy < 0 || ix >= surf_cpu.size.x || iy >= surf_cpu.size.y) { ref = border; } else { for (int c = 0; c < surf_cpu.channels; c++) ref[c] = surf_cpu(ix, iy, c); } Pixel pixel; for (int c = 0; c< surf_cpu.channels; c++) pixel[c] = out_cpu(ox, oy, c); EXPECT_EQ(pixel, ref) << " mismatch at " << vec2(x, y); } } } TEST(Sampler3D_GPU, NN) { SamplerTestData<uint8_t> sd; auto surf_cpu = sd.GetSurface3D(false); auto surf_gpu = sd.GetSurface3D(true); ASSERT_EQ(surf_cpu.channels, 3); ASSERT_EQ(surf_gpu.channels, 3); Sampler3D<DALI_INTERP_NN, uint8_t> sampler(surf_gpu); uint8_t border_value = 50; Pixel border = { border_value, border_value, border_value }; float dy = 0.125f; float dx = 0.125f; float dz = 0.25f; float x0 = -1; float y0 = -1; float z0 = -1; int w = (surf_cpu.size.x+2) / dx + 1; int h = (surf_cpu.size.y+2) / dy + 1; int d = (surf_cpu.size.z+2) / dz + 1; int c = surf_cpu.channels; auto out_mem = mm::alloc_raw_unique<uint8_t, mm::memory_kind::device>(w*h*d*c); Surface3D<uint8_t> out_surf = { out_mem.get(), { w, h, d }, c, { c, w*c, h*w*c }, 1 }; std::vector<uint8_t> out_mem_cpu(w*h*d*c); dim3 block(32, 32, 1); dim3 grid((w+31)/32, (h+31)/32, d); hipLaunchKernelGGL(( RunSampler3D), dim3(grid), dim3(block), 0, 0, out_surf, sampler, border_value, dx, dy, dz, x0, y0, z0); Surface3D<uint8_t> out_cpu = { out_mem_cpu.data(), { w, h, d }, c, { c, w*c, h*w*c }, 1 }; CUDA_CALL(hipMemcpy(out_cpu.data, out_surf.data, w*h*d*c, hipMemcpyDeviceToHost)); for (int oz = 0; oz < d; oz++) { float z = oz * dz + z0; int iz = floorf(z); for (int oy = 0; oy < h; oy++) { float y = oy * dy + y0; int iy = floorf(y); for (int ox = 0; ox < w; ox++) { float x = ox * dx + x0; int ix = floorf(x); Pixel ref; if (ix < 0 || iy < 0 || iz < 0 || ix >= surf_cpu.size.x || iy >= surf_cpu.size.y || iz >= surf_cpu.size.z) { ref = border; } else { for (int c = 0; c < surf_cpu.channels; c++) ref[c] = surf_cpu(ix, iy, iz, c); } Pixel pixel; for (int c = 0; c< surf_cpu.channels; c++) pixel[c] = out_cpu(ox, oy, oz, c); EXPECT_EQ(pixel, ref) << " mismatch at " << vec3(x, y, z); } } } } TEST(Sampler2D_GPU, Linear) { SamplerTestData<uint8_t> sd; auto surf_cpu = sd.GetSurface2D(false); auto surf_gpu = sd.GetSurface2D(true); ASSERT_EQ(surf_cpu.channels, 3); ASSERT_EQ(surf_gpu.channels, 3); Sampler2D<DALI_INTERP_LINEAR, uint8_t> sampler(surf_gpu); Sampler2D<DALI_INTERP_LINEAR, uint8_t> sampler_cpu(surf_cpu); uint8_t border_value = 50; Pixel border = { border_value, border_value, border_value }; float dy = 0.125f; float dx = 0.125f; float x0 = -1; float y0 = -1; int w = (surf_cpu.size.x+2) / dx + 1; int h = (surf_cpu.size.y+2) / dy + 1; int c = surf_cpu.channels; auto out_mem = mm::alloc_raw_unique<uint8_t, mm::memory_kind::device>(w*h*c); Surface2D<uint8_t> out_surf = { out_mem.get(), { w, h }, c, { c, w*c}, 1 }; std::vector<uint8_t> out_mem_cpu(w*h*c); dim3 block(32, 32); dim3 grid((w+31)/32, (h+31)/32); hipLaunchKernelGGL(( RunSampler2D), dim3(grid), dim3(block), 0, 0, out_surf, sampler, border_value, dx, dy, x0, y0); Surface2D<uint8_t> out_cpu = { out_mem_cpu.data(), { w, h }, c, { c, w*c }, 1 }; CUDA_CALL(hipMemcpy(out_cpu.data, out_surf.data, w*h*c, hipMemcpyDeviceToHost)); const float eps = 0.50000025f; // 0.5 + 4 ULP for (int oy = 0; oy < h; oy++) { float y = oy * dy + y0; for (int ox = 0; ox < w; ox++) { float x = ox * dx + x0; vec2 pos = { x, y }; PixelF ref; sampler_cpu(ref.data(), pos, border.data()); Pixel pixel; for (int c = 0; c< surf_cpu.channels; c++) { pixel[c] = out_cpu(ox, oy, c); EXPECT_NEAR(pixel[c], ref[c], eps) << " mismatch at " << pos; } } } } TEST(Sampler3D_GPU, Linear) { SamplerTestData<uint8_t> sd; auto surf_cpu = sd.GetSurface3D(false); auto surf_gpu = sd.GetSurface3D(true); ASSERT_EQ(surf_cpu.channels, 3); ASSERT_EQ(surf_gpu.channels, 3); Sampler3D<DALI_INTERP_LINEAR, uint8_t> sampler(surf_gpu); Sampler3D<DALI_INTERP_LINEAR, uint8_t> sampler_cpu(surf_cpu); uint8_t border_value = 50; Pixel border = { border_value, border_value, border_value }; float dy = 0.125f; float dx = 0.125f; float dz = 0.25f; float x0 = -1; float y0 = -1; float z0 = -1; int w = (surf_cpu.size.x+2) / dx + 1; int h = (surf_cpu.size.y+2) / dy + 1; int d = (surf_cpu.size.z+2) / dz + 1; int c = surf_cpu.channels; auto out_mem = mm::alloc_raw_unique<uint8_t, mm::memory_kind::device>(w*h*d*c); Surface3D<uint8_t> out_surf = { out_mem.get(), { w, h, d }, c, { c, w*c, h*w*c }, 1 }; std::vector<uint8_t> out_mem_cpu(w*h*d*c); dim3 block(32, 32, 1); dim3 grid((w+31)/32, (h+31)/32, d); hipLaunchKernelGGL(( RunSampler3D), dim3(grid), dim3(block), 0, 0, out_surf, sampler, border_value, dx, dy, dz, x0, y0, z0); Surface3D<uint8_t> out_cpu = { out_mem_cpu.data(), { w, h, d }, c, { c, w*c, h*w*c }, 1 }; CUDA_CALL(hipMemcpy(out_cpu.data, out_surf.data, w*h*d*c, hipMemcpyDeviceToHost)); const float eps = 0.50000025f; // 0.5 + 4 ULP for (int oz = 0; oz < d; oz++) { float z = oz * dz + z0; for (int oy = 0; oy < h; oy++) { float y = oy * dy + y0; for (int ox = 0; ox < w; ox++) { float x = ox * dx + x0; vec3 pos = { x, y, z }; PixelF ref; sampler_cpu(ref.data(), pos, border.data()); Pixel pixel; for (int c = 0; c< surf_cpu.channels; c++) { pixel[c] = out_cpu(ox, oy, oz, c); EXPECT_NEAR(pixel[c], ref[c], eps) << " mismatch at " << pos; } } } } } } // namespace kernels } // namespace dali
86888d3ecebfe6a6e33f905e0ed0e5ce9636646f.cu
// Copyright (c) 2019-2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <gtest/gtest.h> #include <vector> #include "dali/kernels/imgproc/sampler_test.h" namespace dali { namespace kernels { using Pixel = std::array<uint8_t, 3>; using PixelF = std::array<float, 3>; template <typename Out, DALIInterpType interp, int MaxChannels = 8, typename In> __global__ void RunSampler2D( Surface2D<Out> out, Sampler2D<interp, In> sampler, In border_value, float dx, float dy, float x0, float y0) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x >= out.size.x || y >= out.size.y) return; In border[3] = { border_value, border_value, border_value }; vec2 src = { x*dx + x0, y*dy + y0 }; Out tmp[MaxChannels]; sampler(tmp, src, border); for (int c = 0; c < out.channels; c++) out(x, y, c) = tmp[c]; } template <typename Out, DALIInterpType interp, int MaxChannels = 8, typename In> __global__ void RunSampler3D( Surface3D<Out> out, Sampler3D<interp, In> sampler, In border_value, float dx, float dy, float dz, float x0, float y0, float z0) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int z = threadIdx.z + blockIdx.z * blockDim.z; if (x >= out.size.x || y >= out.size.y || z >= out.size.z) return; In border[3] = { border_value, border_value, border_value }; vec3 src = { x*dx + x0, y*dy + y0, z*dz + z0 }; Out tmp[MaxChannels]; sampler(tmp, src, border); for (int c = 0; c < out.channels; c++) out(x, y, z, c) = tmp[c]; } TEST(Sampler2D_GPU, NN) { SamplerTestData<uint8_t> sd; auto surf_cpu = sd.GetSurface2D(false); auto surf_gpu = sd.GetSurface2D(true); ASSERT_EQ(surf_cpu.channels, 3); ASSERT_EQ(surf_gpu.channels, 3); Sampler2D<DALI_INTERP_NN, uint8_t> sampler(surf_gpu); uint8_t border_value = 50; Pixel border = { border_value, border_value, border_value }; float dy = 0.125f; float dx = 0.125f; float x0 = -1; float y0 = -1; int w = (surf_cpu.size.x+2) / dx + 1; int h = (surf_cpu.size.y+2) / dy + 1; int c = surf_cpu.channels; auto out_mem = mm::alloc_raw_unique<uint8_t, mm::memory_kind::device>(w*h*c); Surface2D<uint8_t> out_surf = { out_mem.get(), { w, h }, c, { c, w*c }, 1 }; std::vector<uint8_t> out_mem_cpu(w*h*c); dim3 block(32, 32); dim3 grid((w+31)/32, (h+31)/32); RunSampler2D<<<grid, block>>>(out_surf, sampler, border_value, dx, dy, x0, y0); Surface2D<uint8_t> out_cpu = { out_mem_cpu.data(), { w, h }, c, { c, w*c }, 1 }; CUDA_CALL(cudaMemcpy(out_cpu.data, out_surf.data, w*h*c, cudaMemcpyDeviceToHost)); for (int oy = 0; oy < h; oy++) { float y = oy * dy + y0; int iy = floorf(y); for (int ox = 0; ox < w; ox++) { float x = ox * dx + x0; int ix = floorf(x); Pixel ref; if (ix < 0 || iy < 0 || ix >= surf_cpu.size.x || iy >= surf_cpu.size.y) { ref = border; } else { for (int c = 0; c < surf_cpu.channels; c++) ref[c] = surf_cpu(ix, iy, c); } Pixel pixel; for (int c = 0; c< surf_cpu.channels; c++) pixel[c] = out_cpu(ox, oy, c); EXPECT_EQ(pixel, ref) << " mismatch at " << vec2(x, y); } } } TEST(Sampler3D_GPU, NN) { SamplerTestData<uint8_t> sd; auto surf_cpu = sd.GetSurface3D(false); auto surf_gpu = sd.GetSurface3D(true); ASSERT_EQ(surf_cpu.channels, 3); ASSERT_EQ(surf_gpu.channels, 3); Sampler3D<DALI_INTERP_NN, uint8_t> sampler(surf_gpu); uint8_t border_value = 50; Pixel border = { border_value, border_value, border_value }; float dy = 0.125f; float dx = 0.125f; float dz = 0.25f; float x0 = -1; float y0 = -1; float z0 = -1; int w = (surf_cpu.size.x+2) / dx + 1; int h = (surf_cpu.size.y+2) / dy + 1; int d = (surf_cpu.size.z+2) / dz + 1; int c = surf_cpu.channels; auto out_mem = mm::alloc_raw_unique<uint8_t, mm::memory_kind::device>(w*h*d*c); Surface3D<uint8_t> out_surf = { out_mem.get(), { w, h, d }, c, { c, w*c, h*w*c }, 1 }; std::vector<uint8_t> out_mem_cpu(w*h*d*c); dim3 block(32, 32, 1); dim3 grid((w+31)/32, (h+31)/32, d); RunSampler3D<<<grid, block>>>(out_surf, sampler, border_value, dx, dy, dz, x0, y0, z0); Surface3D<uint8_t> out_cpu = { out_mem_cpu.data(), { w, h, d }, c, { c, w*c, h*w*c }, 1 }; CUDA_CALL(cudaMemcpy(out_cpu.data, out_surf.data, w*h*d*c, cudaMemcpyDeviceToHost)); for (int oz = 0; oz < d; oz++) { float z = oz * dz + z0; int iz = floorf(z); for (int oy = 0; oy < h; oy++) { float y = oy * dy + y0; int iy = floorf(y); for (int ox = 0; ox < w; ox++) { float x = ox * dx + x0; int ix = floorf(x); Pixel ref; if (ix < 0 || iy < 0 || iz < 0 || ix >= surf_cpu.size.x || iy >= surf_cpu.size.y || iz >= surf_cpu.size.z) { ref = border; } else { for (int c = 0; c < surf_cpu.channels; c++) ref[c] = surf_cpu(ix, iy, iz, c); } Pixel pixel; for (int c = 0; c< surf_cpu.channels; c++) pixel[c] = out_cpu(ox, oy, oz, c); EXPECT_EQ(pixel, ref) << " mismatch at " << vec3(x, y, z); } } } } TEST(Sampler2D_GPU, Linear) { SamplerTestData<uint8_t> sd; auto surf_cpu = sd.GetSurface2D(false); auto surf_gpu = sd.GetSurface2D(true); ASSERT_EQ(surf_cpu.channels, 3); ASSERT_EQ(surf_gpu.channels, 3); Sampler2D<DALI_INTERP_LINEAR, uint8_t> sampler(surf_gpu); Sampler2D<DALI_INTERP_LINEAR, uint8_t> sampler_cpu(surf_cpu); uint8_t border_value = 50; Pixel border = { border_value, border_value, border_value }; float dy = 0.125f; float dx = 0.125f; float x0 = -1; float y0 = -1; int w = (surf_cpu.size.x+2) / dx + 1; int h = (surf_cpu.size.y+2) / dy + 1; int c = surf_cpu.channels; auto out_mem = mm::alloc_raw_unique<uint8_t, mm::memory_kind::device>(w*h*c); Surface2D<uint8_t> out_surf = { out_mem.get(), { w, h }, c, { c, w*c}, 1 }; std::vector<uint8_t> out_mem_cpu(w*h*c); dim3 block(32, 32); dim3 grid((w+31)/32, (h+31)/32); RunSampler2D<<<grid, block>>>(out_surf, sampler, border_value, dx, dy, x0, y0); Surface2D<uint8_t> out_cpu = { out_mem_cpu.data(), { w, h }, c, { c, w*c }, 1 }; CUDA_CALL(cudaMemcpy(out_cpu.data, out_surf.data, w*h*c, cudaMemcpyDeviceToHost)); const float eps = 0.50000025f; // 0.5 + 4 ULP for (int oy = 0; oy < h; oy++) { float y = oy * dy + y0; for (int ox = 0; ox < w; ox++) { float x = ox * dx + x0; vec2 pos = { x, y }; PixelF ref; sampler_cpu(ref.data(), pos, border.data()); Pixel pixel; for (int c = 0; c< surf_cpu.channels; c++) { pixel[c] = out_cpu(ox, oy, c); EXPECT_NEAR(pixel[c], ref[c], eps) << " mismatch at " << pos; } } } } TEST(Sampler3D_GPU, Linear) { SamplerTestData<uint8_t> sd; auto surf_cpu = sd.GetSurface3D(false); auto surf_gpu = sd.GetSurface3D(true); ASSERT_EQ(surf_cpu.channels, 3); ASSERT_EQ(surf_gpu.channels, 3); Sampler3D<DALI_INTERP_LINEAR, uint8_t> sampler(surf_gpu); Sampler3D<DALI_INTERP_LINEAR, uint8_t> sampler_cpu(surf_cpu); uint8_t border_value = 50; Pixel border = { border_value, border_value, border_value }; float dy = 0.125f; float dx = 0.125f; float dz = 0.25f; float x0 = -1; float y0 = -1; float z0 = -1; int w = (surf_cpu.size.x+2) / dx + 1; int h = (surf_cpu.size.y+2) / dy + 1; int d = (surf_cpu.size.z+2) / dz + 1; int c = surf_cpu.channels; auto out_mem = mm::alloc_raw_unique<uint8_t, mm::memory_kind::device>(w*h*d*c); Surface3D<uint8_t> out_surf = { out_mem.get(), { w, h, d }, c, { c, w*c, h*w*c }, 1 }; std::vector<uint8_t> out_mem_cpu(w*h*d*c); dim3 block(32, 32, 1); dim3 grid((w+31)/32, (h+31)/32, d); RunSampler3D<<<grid, block>>>(out_surf, sampler, border_value, dx, dy, dz, x0, y0, z0); Surface3D<uint8_t> out_cpu = { out_mem_cpu.data(), { w, h, d }, c, { c, w*c, h*w*c }, 1 }; CUDA_CALL(cudaMemcpy(out_cpu.data, out_surf.data, w*h*d*c, cudaMemcpyDeviceToHost)); const float eps = 0.50000025f; // 0.5 + 4 ULP for (int oz = 0; oz < d; oz++) { float z = oz * dz + z0; for (int oy = 0; oy < h; oy++) { float y = oy * dy + y0; for (int ox = 0; ox < w; ox++) { float x = ox * dx + x0; vec3 pos = { x, y, z }; PixelF ref; sampler_cpu(ref.data(), pos, border.data()); Pixel pixel; for (int c = 0; c< surf_cpu.channels; c++) { pixel[c] = out_cpu(ox, oy, oz, c); EXPECT_NEAR(pixel[c], ref[c], eps) << " mismatch at " << pos; } } } } } } // namespace kernels } // namespace dali
b7f9414ef5408093641fba9b760b54ae8976015e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* ****************************************************************************** * * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * See the NOTICE file distributed with this work for additional * information regarding copyright ownership. * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author Oleh Semeniv (oleg.semeniv@gmail.com) // #include <system/op_boilerplate.h> #include <ops/declarable/helpers/updatersHelpers.h> #include <helpers/PointersManager.h> #include <math/platformmath.h> #include <math/templatemath.h> namespace sd { namespace ops { namespace helpers { /////////////////////////////////////////////////////////////////// template<typename T> __global__ void adaMaxUpdaterCuda(const void* vx, const Nd4jLong* xShapeInfo, const void* vinv, const Nd4jLong* invShapeInfo, const void* vinm, const Nd4jLong* inmShapeInfo, void* vz, const Nd4jLong* zShapeInfo, void* vstV, const Nd4jLong* stvShapeInfo, void* vstM, const Nd4jLong* stmShapeInfo, const T lr, const T beta1, const T beta2, const T epsilon, const T iteration) { const auto grad = reinterpret_cast<const T*>(vx); const auto initU = reinterpret_cast<const T*>(vinv); const auto initM = reinterpret_cast<const T*>(vinm); auto up = reinterpret_cast<T*>(vz); auto stU = reinterpret_cast<T*>(vstV); auto stM = reinterpret_cast<T*>(vstM); __shared__ Nd4jLong xLen; __shared__ T beta1T, epsilonT; __shared__ bool bEWS, bOrdering, bXZsame, bXInUSame, bXStUSame, bXInMSame, bXStMSame; if (threadIdx.x == 0) { xLen = shape::length(xShapeInfo); beta1T = sd::math::nd4j_pow<T,T,T>(beta1, (iteration + 1) ); epsilonT = lr / (1.0 - beta1T); if (sd::math::nd4j_isnan(epsilonT) || 0 == epsilonT || sd::math::nd4j_isinf(epsilonT)) epsilonT = epsilon; bEWS = 1 == shape::elementWiseStride(xShapeInfo) && 1 == shape::elementWiseStride(zShapeInfo) && 1 == shape::elementWiseStride(stmShapeInfo) && 1 == shape::elementWiseStride(inmShapeInfo) && 1 == shape::elementWiseStride(stvShapeInfo) && 1 == shape::elementWiseStride(invShapeInfo); bOrdering = shape::order(xShapeInfo) == shape::order(zShapeInfo) && shape::order(xShapeInfo) == shape::order(stmShapeInfo) && shape::order(xShapeInfo) == shape::order(inmShapeInfo) && shape::order(xShapeInfo) == shape::order(invShapeInfo) && shape::order(xShapeInfo) == shape::order(stvShapeInfo); bXZsame = shape::haveSameShapeAndStrides(xShapeInfo, zShapeInfo); bXInUSame = shape::haveSameShapeAndStrides(xShapeInfo, invShapeInfo); bXStUSame = shape::haveSameShapeAndStrides(xShapeInfo, stvShapeInfo); bXInMSame = shape::haveSameShapeAndStrides(xShapeInfo, inmShapeInfo); bXStMSame = shape::haveSameShapeAndStrides(xShapeInfo, stmShapeInfo); } __syncthreads(); int coords[MAX_RANK]; for (Nd4jLong i = blockIdx.x * blockDim.x + threadIdx.x; i < xLen; i += gridDim.x * blockDim.x) { auto xOffset = i, zOffset = i, initMOffset = i, initUOffset = i, stMOffset = i, stUOffset = i; if (!bEWS || !bOrdering) { shape::index2coords(i, xShapeInfo, coords); xOffset = shape::getOffset(xShapeInfo, coords); zOffset = bXZsame ? xOffset : shape::getOffset(zShapeInfo, coords); initUOffset = bXInUSame ? xOffset : shape::getOffset(invShapeInfo, coords); stUOffset = bXStUSame ? xOffset : shape::getOffset(stvShapeInfo, coords); initMOffset = bXInMSame ? xOffset : shape::getOffset(inmShapeInfo, coords); stMOffset = bXStMSame ? xOffset : shape::getOffset(stmShapeInfo, coords); } //m = B_1 * m + (1-B_1)*grad stM[stMOffset] = beta1 * initM[initMOffset] + grad[xOffset] * (1 - beta1); //u = max(B_2 * u, |grad|) stU[stUOffset] = sd::math::nd4j_max( (beta2* initU[initUOffset]), sd::math::nd4j_abs(grad[xOffset])) + 1e-32; up[zOffset] = (stM[stMOffset] * epsilonT) / stU[stUOffset]; } } /////////////////////////////////////////////////////////////////// template<typename T> linkage void adaMaxUpdaterCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const hipStream_t* stream, const void* vx, const Nd4jLong* xShapeInfo, const void* vinv, const Nd4jLong* invShapeInfo, const void* vinm, const Nd4jLong* inmShapeInfo, void* vz, const Nd4jLong* zShapeInfo, void* vstV, const Nd4jLong* stvShapeInfo, void* vstM, const Nd4jLong* stmShapeInfo, const double dLr, const double dBeta1, const double dBeta2, const double dEpsilon, const int nIteration) { const T lr = static_cast<T>(dLr); const T beta1 = static_cast<T>(dBeta1); const T beta2 = static_cast<T>(dBeta2); const T epsilon = static_cast<T>(dEpsilon); const T iteration = static_cast<T>(nIteration); hipLaunchKernelGGL(( adaMaxUpdaterCuda<T>), dim3(blocksPerGrid), dim3(threadsPerBlock), 256, * stream, vx, xShapeInfo, vinv, invShapeInfo, vinm, inmShapeInfo, vz, zShapeInfo, vstV, stvShapeInfo, vstM, stmShapeInfo, lr, beta1, beta2, epsilon, iteration); } /////////////////////////////////////////////////////////////////// void updaterAdaMax(sd::LaunchContext* context, const NDArray& gradient, const NDArray& initStateU, const NDArray& initStateM, NDArray& update, NDArray& stateU, NDArray& stateM, const double dLr, const double dBeta1, const double dBeta2, const double dEpsilon, const int nIteration) { PointersManager manager(context, "adaMaxUpdater"); const int threadsPerBlock = MAX_NUM_THREADS / 4; const int blocksPerGrid = (gradient.lengthOf() + threadsPerBlock - 1) / threadsPerBlock; NDArray::prepareSpecialUse({ &update, &stateU, &stateM }, { &gradient, &initStateU, &initStateM }); BUILD_SINGLE_SELECTOR(gradient.dataType(), adaMaxUpdaterCudaLauncher, (blocksPerGrid, threadsPerBlock, context->getCudaStream(), gradient.specialBuffer(), gradient.specialShapeInfo(), initStateU.specialBuffer(), initStateU.specialShapeInfo(), initStateM.specialBuffer(), initStateM.specialShapeInfo(), update.specialBuffer(), update.specialShapeInfo(), stateU.specialBuffer(), stateU.specialShapeInfo(), stateM.specialBuffer(), stateM.specialShapeInfo(), dLr, dBeta1, dBeta2, dEpsilon, nIteration ), FLOAT_TYPES); NDArray::registerSpecialUse({ &update, &stateU, &stateM }, { &gradient, &initStateU, &initStateM }); manager.synchronize(); } } } }
b7f9414ef5408093641fba9b760b54ae8976015e.cu
/* ****************************************************************************** * * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * See the NOTICE file distributed with this work for additional * information regarding copyright ownership. * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author Oleh Semeniv (oleg.semeniv@gmail.com) // #include <system/op_boilerplate.h> #include <ops/declarable/helpers/updatersHelpers.h> #include <helpers/PointersManager.h> #include <math/platformmath.h> #include <math/templatemath.h> namespace sd { namespace ops { namespace helpers { /////////////////////////////////////////////////////////////////// template<typename T> __global__ void adaMaxUpdaterCuda(const void* vx, const Nd4jLong* xShapeInfo, const void* vinv, const Nd4jLong* invShapeInfo, const void* vinm, const Nd4jLong* inmShapeInfo, void* vz, const Nd4jLong* zShapeInfo, void* vstV, const Nd4jLong* stvShapeInfo, void* vstM, const Nd4jLong* stmShapeInfo, const T lr, const T beta1, const T beta2, const T epsilon, const T iteration) { const auto grad = reinterpret_cast<const T*>(vx); const auto initU = reinterpret_cast<const T*>(vinv); const auto initM = reinterpret_cast<const T*>(vinm); auto up = reinterpret_cast<T*>(vz); auto stU = reinterpret_cast<T*>(vstV); auto stM = reinterpret_cast<T*>(vstM); __shared__ Nd4jLong xLen; __shared__ T beta1T, epsilonT; __shared__ bool bEWS, bOrdering, bXZsame, bXInUSame, bXStUSame, bXInMSame, bXStMSame; if (threadIdx.x == 0) { xLen = shape::length(xShapeInfo); beta1T = sd::math::nd4j_pow<T,T,T>(beta1, (iteration + 1) ); epsilonT = lr / (1.0 - beta1T); if (sd::math::nd4j_isnan(epsilonT) || 0 == epsilonT || sd::math::nd4j_isinf(epsilonT)) epsilonT = epsilon; bEWS = 1 == shape::elementWiseStride(xShapeInfo) && 1 == shape::elementWiseStride(zShapeInfo) && 1 == shape::elementWiseStride(stmShapeInfo) && 1 == shape::elementWiseStride(inmShapeInfo) && 1 == shape::elementWiseStride(stvShapeInfo) && 1 == shape::elementWiseStride(invShapeInfo); bOrdering = shape::order(xShapeInfo) == shape::order(zShapeInfo) && shape::order(xShapeInfo) == shape::order(stmShapeInfo) && shape::order(xShapeInfo) == shape::order(inmShapeInfo) && shape::order(xShapeInfo) == shape::order(invShapeInfo) && shape::order(xShapeInfo) == shape::order(stvShapeInfo); bXZsame = shape::haveSameShapeAndStrides(xShapeInfo, zShapeInfo); bXInUSame = shape::haveSameShapeAndStrides(xShapeInfo, invShapeInfo); bXStUSame = shape::haveSameShapeAndStrides(xShapeInfo, stvShapeInfo); bXInMSame = shape::haveSameShapeAndStrides(xShapeInfo, inmShapeInfo); bXStMSame = shape::haveSameShapeAndStrides(xShapeInfo, stmShapeInfo); } __syncthreads(); int coords[MAX_RANK]; for (Nd4jLong i = blockIdx.x * blockDim.x + threadIdx.x; i < xLen; i += gridDim.x * blockDim.x) { auto xOffset = i, zOffset = i, initMOffset = i, initUOffset = i, stMOffset = i, stUOffset = i; if (!bEWS || !bOrdering) { shape::index2coords(i, xShapeInfo, coords); xOffset = shape::getOffset(xShapeInfo, coords); zOffset = bXZsame ? xOffset : shape::getOffset(zShapeInfo, coords); initUOffset = bXInUSame ? xOffset : shape::getOffset(invShapeInfo, coords); stUOffset = bXStUSame ? xOffset : shape::getOffset(stvShapeInfo, coords); initMOffset = bXInMSame ? xOffset : shape::getOffset(inmShapeInfo, coords); stMOffset = bXStMSame ? xOffset : shape::getOffset(stmShapeInfo, coords); } //m = B_1 * m + (1-B_1)*grad stM[stMOffset] = beta1 * initM[initMOffset] + grad[xOffset] * (1 - beta1); //u = max(B_2 * u, |grad|) stU[stUOffset] = sd::math::nd4j_max( (beta2* initU[initUOffset]), sd::math::nd4j_abs(grad[xOffset])) + 1e-32; up[zOffset] = (stM[stMOffset] * epsilonT) / stU[stUOffset]; } } /////////////////////////////////////////////////////////////////// template<typename T> linkage void adaMaxUpdaterCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const cudaStream_t* stream, const void* vx, const Nd4jLong* xShapeInfo, const void* vinv, const Nd4jLong* invShapeInfo, const void* vinm, const Nd4jLong* inmShapeInfo, void* vz, const Nd4jLong* zShapeInfo, void* vstV, const Nd4jLong* stvShapeInfo, void* vstM, const Nd4jLong* stmShapeInfo, const double dLr, const double dBeta1, const double dBeta2, const double dEpsilon, const int nIteration) { const T lr = static_cast<T>(dLr); const T beta1 = static_cast<T>(dBeta1); const T beta2 = static_cast<T>(dBeta2); const T epsilon = static_cast<T>(dEpsilon); const T iteration = static_cast<T>(nIteration); adaMaxUpdaterCuda<T><<<blocksPerGrid, threadsPerBlock, 256, * stream>>>(vx, xShapeInfo, vinv, invShapeInfo, vinm, inmShapeInfo, vz, zShapeInfo, vstV, stvShapeInfo, vstM, stmShapeInfo, lr, beta1, beta2, epsilon, iteration); } /////////////////////////////////////////////////////////////////// void updaterAdaMax(sd::LaunchContext* context, const NDArray& gradient, const NDArray& initStateU, const NDArray& initStateM, NDArray& update, NDArray& stateU, NDArray& stateM, const double dLr, const double dBeta1, const double dBeta2, const double dEpsilon, const int nIteration) { PointersManager manager(context, "adaMaxUpdater"); const int threadsPerBlock = MAX_NUM_THREADS / 4; const int blocksPerGrid = (gradient.lengthOf() + threadsPerBlock - 1) / threadsPerBlock; NDArray::prepareSpecialUse({ &update, &stateU, &stateM }, { &gradient, &initStateU, &initStateM }); BUILD_SINGLE_SELECTOR(gradient.dataType(), adaMaxUpdaterCudaLauncher, (blocksPerGrid, threadsPerBlock, context->getCudaStream(), gradient.specialBuffer(), gradient.specialShapeInfo(), initStateU.specialBuffer(), initStateU.specialShapeInfo(), initStateM.specialBuffer(), initStateM.specialShapeInfo(), update.specialBuffer(), update.specialShapeInfo(), stateU.specialBuffer(), stateU.specialShapeInfo(), stateM.specialBuffer(), stateM.specialShapeInfo(), dLr, dBeta1, dBeta2, dEpsilon, nIteration ), FLOAT_TYPES); NDArray::registerSpecialUse({ &update, &stateU, &stateM }, { &gradient, &initStateU, &initStateM }); manager.synchronize(); } } } }
6a56f080aadebacd1d2b5309259a5943e4173840.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void build_sequence_length_padding_offset(const int* sequence_length, const int batch_size, const int max_seq_len, int* valid_word_num, int* tmp_mask_offset) { // do cumulated sum int total_seq_len = 0; int cum_offset = 0; int index = 0; for(int i = 0; i < batch_size; i++) { const int seq_len = sequence_length[i]; for(int j = 0; j < seq_len; j++) { tmp_mask_offset[index] = cum_offset; index++; } cum_offset += max_seq_len - seq_len; total_seq_len += seq_len; } valid_word_num[0] = total_seq_len; }
6a56f080aadebacd1d2b5309259a5943e4173840.cu
#include "includes.h" __global__ void build_sequence_length_padding_offset(const int* sequence_length, const int batch_size, const int max_seq_len, int* valid_word_num, int* tmp_mask_offset) { // do cumulated sum int total_seq_len = 0; int cum_offset = 0; int index = 0; for(int i = 0; i < batch_size; i++) { const int seq_len = sequence_length[i]; for(int j = 0; j < seq_len; j++) { tmp_mask_offset[index] = cum_offset; index++; } cum_offset += max_seq_len - seq_len; total_seq_len += seq_len; } valid_word_num[0] = total_seq_len; }
1e6e24e63273c5411583405051afea8c0697a151.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime_api.h> #include <device_launch_parameters.h> #include <hip/device_functions.h> #define NUM_BLOCKS 4 #define BLOCK_WIDTH 4 __global__ void hello() { printf("Hello world! I'm thread %d in block %d\n", threadIdx.x, blockIdx.x); } int main(int argc,char **argv) { // launch the kernel hipLaunchKernelGGL(( hello), dim3(NUM_BLOCKS), dim3(BLOCK_WIDTH), 0, 0, ); // force the printf()s to flush hipDeviceSynchronize(); printf("That's all!\n"); return 0; }
1e6e24e63273c5411583405051afea8c0697a151.cu
#include <stdio.h> #include <cuda.h> #include <cuda_runtime.h> #include <cuda_runtime_api.h> #include <device_launch_parameters.h> #include <device_functions.h> #define NUM_BLOCKS 4 #define BLOCK_WIDTH 4 __global__ void hello() { printf("Hello world! I'm thread %d in block %d\n", threadIdx.x, blockIdx.x); } int main(int argc,char **argv) { // launch the kernel hello<<<NUM_BLOCKS, BLOCK_WIDTH>>>(); // force the printf()s to flush cudaDeviceSynchronize(); printf("That's all!\n"); return 0; }
b04fc25eb8981acf3416a01f856f73f880e0cebd.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "shiftWalkers.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const int dim = 1; const int nwl = 1; const float *xx = NULL; hipMalloc(&xx, XSIZE*YSIZE); const float *x = NULL; hipMalloc(&x, XSIZE*YSIZE); float *yy = NULL; hipMalloc(&yy, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( shiftWalkers), dim3(gridBlock),dim3(threadBlock), 0, 0, dim,nwl,xx,x,yy); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( shiftWalkers), dim3(gridBlock),dim3(threadBlock), 0, 0, dim,nwl,xx,x,yy); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( shiftWalkers), dim3(gridBlock),dim3(threadBlock), 0, 0, dim,nwl,xx,x,yy); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
b04fc25eb8981acf3416a01f856f73f880e0cebd.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "shiftWalkers.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const int dim = 1; const int nwl = 1; const float *xx = NULL; cudaMalloc(&xx, XSIZE*YSIZE); const float *x = NULL; cudaMalloc(&x, XSIZE*YSIZE); float *yy = NULL; cudaMalloc(&yy, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); shiftWalkers<<<gridBlock,threadBlock>>>(dim,nwl,xx,x,yy); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { shiftWalkers<<<gridBlock,threadBlock>>>(dim,nwl,xx,x,yy); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { shiftWalkers<<<gridBlock,threadBlock>>>(dim,nwl,xx,x,yy); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
715d2ad002115ed4e04fbaf4fd028d6725a6ba24.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cassert> #include <cmath> #include <algorithm> #include <memory> // std::unique_ptr #include <iostream> #include <thrust\execution_policy.h> #include <thrust\device_vector.h> #include <thrust\scan.h> #include <thrust\reduce.h> #include <thrust\count.h> #include "hip/hip_texture_types.h" // texture #include "texture_fetch_functions.h" // tex1Dfetch #include "helper_math.hpp" #include "dmc.hpp" template <typename T> void print_d_arr(const T* d_in, unsigned size, const std::string& prefix) { T* h_in = new T[size]; checkCudaErrors(hipMemcpy(h_in, d_in, sizeof(T) * size, hipMemcpyDeviceToHost)); std::cout << prefix << std::endl; for (unsigned i = 0; i < size; ++i) { std::cout << "[" << i << "] " << h_in[i] << std::endl; } delete[] h_in; } namespace dmc { #ifndef M_PI #define M_PI 3.14159265358979323846 #endif using namespace utils; // For each voxel config and an edge index, return the associated iso vertex in DMC. // This is LUT 1. Voxel with 3B config with its adjacent voxel being 2B config CANNOT use this LUT. const iso_vertex_m_type config_edge_lut1[NUM_CONFIGS][VOXEL_NUM_EDGES] = { { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, { 0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, { 0, 0, 0xff, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, { 0xff, 0, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, { 0xff, 0, 0, 0xff, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xff }, { 0, 0, 0, 0, 0, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xff }, { 0, 0xff, 0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff, 0xff, 0xff }, { 0xff, 0xff, 0, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0xff }, { 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff }, { 0, 0xff, 0, 0xff, 0, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff }, { 0, 0, 0, 0, 0xff, 0, 0xff, 0, 0xff, 0xff, 0xff, 0xff }, { 0xff, 0, 0, 0xff, 0, 0, 0xff, 0, 0xff, 0xff, 0xff, 0xff }, { 0xff, 0, 0xff, 0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff, 0xff }, { 0, 0, 0xff, 0xff, 0, 0xff, 0, 0, 0xff, 0xff, 0xff, 0xff }, { 0, 0xff, 0xff, 0, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff }, { 0xff, 0xff, 0xff, 0xff, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff }, { 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0, 0xff, 0xff, 0 }, { 0, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0xff, 0 }, { 0, 0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0, 0xff, 0xff, 0 }, { 0xff, 0, 0xff, 0, 0xff, 0, 0xff, 0xff, 0, 0xff, 0xff, 0 }, { 0xff, 0, 0, 0xff, 1, 0xff, 0, 0xff, 1, 0xff, 0xff, 1 }, { 0, 0, 0, 0, 0xff, 0xff, 0, 0xff, 0, 0xff, 0xff, 0 }, { 0, 0xff, 0, 0xff, 0, 0, 0, 0xff, 0, 0xff, 0xff, 0 }, { 0xff, 0xff, 0, 0, 0xff, 0, 0, 0xff, 0, 0xff, 0xff, 0 }, { 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0 }, { 0, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0, 0, 0xff, 0xff, 0 }, { 1, 0, 0, 1, 1, 0, 0xff, 0, 0, 0xff, 0xff, 0 }, { 0xff, 0, 0, 0xff, 0xff, 0, 0xff, 0, 0, 0xff, 0xff, 0 }, { 0xff, 0, 0xff, 0, 0, 0xff, 0, 0, 0, 0xff, 0xff, 0 }, { 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0 }, { 0, 0xff, 0xff, 0, 0, 1, 1, 1, 1, 0xff, 0xff, 1 }, { 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0, 0, 0, 0xff, 0xff, 0 }, { 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0xff, 0, 0, 0xff, 0xff }, { 0, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0, 0, 0xff, 0xff }, { 0, 0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0, 0xff, 0xff }, { 0xff, 0, 0xff, 0, 0, 0xff, 0xff, 0xff, 0, 0, 0xff, 0xff }, { 0xff, 0, 0, 0xff, 0xff, 0, 0, 0xff, 0, 0, 0xff, 0xff }, { 1, 1, 0, 0, 0, 1, 0, 0xff, 0, 0, 0xff, 0xff }, { 0, 0xff, 0, 0xff, 0xff, 0xff, 0, 0xff, 0, 0, 0xff, 0xff }, { 0xff, 0xff, 0, 0, 0, 0xff, 0, 0xff, 0, 0, 0xff, 0xff }, { 0xff, 0xff, 0, 0, 0xff, 1, 0xff, 0, 1, 1, 0xff, 0xff }, { 0, 0xff, 0, 0xff, 0, 0, 0xff, 0, 0, 0, 0xff, 0xff }, { 0, 0, 0, 0, 0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff }, { 0xff, 0, 0, 0xff, 0, 0xff, 0xff, 0, 0, 0, 0xff, 0xff }, { 0xff, 0, 0xff, 0, 0xff, 0, 0, 0, 0, 0, 0xff, 0xff }, { 0, 0, 0xff, 0xff, 1, 0, 1, 1, 1, 1, 0xff, 0xff }, { 0, 0xff, 0xff, 0, 0xff, 0xff, 0, 0, 0, 0, 0xff, 0xff }, { 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0, 0, 0, 0, 0xff, 0xff }, { 0xff, 0xff, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff, 0, 0xff, 0 }, { 0, 0xff, 0xff, 0, 0xff, 0, 0xff, 0xff, 0xff, 0, 0xff, 0 }, { 0, 0, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0 }, { 0xff, 0, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0 }, { 0xff, 0, 0, 0xff, 0, 0, 0, 0xff, 0xff, 0, 0xff, 0 }, { 0, 0, 1, 1, 0xff, 0, 1, 0xff, 0xff, 1, 0xff, 1 }, { 0, 0xff, 0, 0xff, 0, 0xff, 0, 0xff, 0xff, 0, 0xff, 0 }, { 0xff, 0xff, 0, 0, 0xff, 0xff, 0, 0xff, 0xff, 0, 0xff, 0 }, { 0xff, 0xff, 0, 0, 0, 0, 0xff, 0, 0xff, 0, 0xff, 0 }, { 0, 0xff, 0, 0xff, 0xff, 0, 0xff, 0, 0xff, 0, 0xff, 0 }, { 0, 1, 1, 0, 0, 0xff, 0xff, 1, 0xff, 1, 0xff, 1 }, { 0xff, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0, 0xff, 0 }, { 0xff, 0, 0xff, 0, 0, 0, 1, 1, 0xff, 1, 0xff, 1 }, { 1, 1, 0xff, 0xff, 0xff, 1, 0, 0, 0xff, 0, 0xff, 0 }, { 1, 0xff, 0xff, 1, 1, 0xff, 0, 0, 0xff, 0, 0xff, 0 }, { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0, 0xff, 0, 0xff, 0 }, { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0xff, 0, 0, 0xff }, { 0, 0xff, 0xff, 0, 0, 0xff, 1, 0xff, 0xff, 1, 1, 0xff }, { 0, 0, 0xff, 0xff, 0xff, 0, 0, 0xff, 0xff, 0, 0, 0xff }, { 0xff, 0, 0xff, 0, 0, 0, 0, 0xff, 0xff, 0, 0, 0xff }, { 0xff, 0, 0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0, 0xff }, { 0, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0, 0xff }, { 0, 0xff, 0, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0, 0, 0xff }, { 0xff, 0xff, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0, 0, 0xff }, { 0xff, 0xff, 0, 0, 0xff, 0xff, 0, 0, 0xff, 0, 0, 0xff }, { 0, 0xff, 0, 0xff, 0, 0xff, 0, 0, 0xff, 0, 0, 0xff }, { 0, 1, 1, 0, 0xff, 0, 1, 0, 0xff, 0, 0, 0xff }, { 0xff, 0, 0, 0xff, 1, 1, 0, 1, 0xff, 1, 1, 0xff }, { 0xff, 0, 0xff, 0, 0xff, 0xff, 0xff, 0, 0xff, 0, 0, 0xff }, { 0, 0, 0xff, 0xff, 0, 0xff, 0xff, 0, 0xff, 0, 0, 0xff }, { 0, 0xff, 0xff, 0, 0xff, 0, 0xff, 0, 0xff, 0, 0, 0xff }, { 0xff, 0xff, 0xff, 0xff, 0, 0, 0xff, 0, 0xff, 0, 0, 0xff }, { 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0, 0xff, 0, 0, 0, 0 }, { 0, 0xff, 0xff, 0, 0xff, 0xff, 0, 0xff, 0, 0, 0, 0 }, { 0, 0, 0xff, 0xff, 0, 1, 0, 0xff, 1, 1, 0, 0 }, { 0xff, 1, 0xff, 1, 0xff, 0, 1, 0xff, 0, 0, 1, 1 }, { 0xff, 0, 0, 0xff, 0, 0xff, 0xff, 0xff, 0, 0, 0, 0 }, { 0, 0, 1, 1, 0xff, 0xff, 0xff, 0xff, 0, 0, 1, 1 }, { 1, 0xff, 1, 0xff, 1, 0, 0xff, 0xff, 0, 0, 1, 1 }, { 0xff, 0xff, 0, 0, 0xff, 1, 0xff, 0xff, 1, 1, 0, 0 }, { 0xff, 0xff, 0, 0, 0, 0xff, 0, 1, 0, 0, 1, 1 }, { 1, 0xff, 1, 0xff, 0xff, 0xff, 1, 0, 1, 1, 0, 0 }, { 0, 1, 1, 0, 0, 2, 1, 3, 2, 2, 3, 3 }, { 0xff, 0, 0, 0xff, 0xff, 1, 0, 2, 1, 1, 2, 2 }, { 0xff, 1, 0xff, 1, 1, 0xff, 0xff, 0, 1, 1, 0, 0 }, { 0, 0, 0xff, 0xff, 0xff, 0xff, 0xff, 1, 0, 0, 1, 1 }, { 0, 0xff, 0xff, 0, 0, 2, 0xff, 1, 2, 2, 1, 1 }, { 0xff, 0xff, 0xff, 0xff, 0xff, 1, 0xff, 0, 1, 1, 0, 0 }, { 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0, 0xff, 0, 0xff, 0, 0xff }, { 0, 0xff, 0xff, 0, 0, 0, 0, 0xff, 0, 0xff, 0, 0xff }, { 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0, 0xff, 0, 0xff }, { 0xff, 0, 0xff, 0, 0, 0xff, 0, 0xff, 0, 0xff, 0, 0xff }, { 0xff, 0, 0, 0xff, 0xff, 0, 0xff, 0xff, 0, 0xff, 0, 0xff }, { 0, 0, 1, 1, 1, 0, 0xff, 0xff, 1, 0xff, 1, 0xff }, { 0, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0, 0xff }, { 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0, 0xff, 0, 0xff }, { 0xff, 0xff, 0, 0, 0xff, 0, 0, 0, 0, 0xff, 0, 0xff }, { 1, 0xff, 1, 0xff, 0, 1, 1, 0, 0, 0xff, 0, 0xff }, { 1, 0, 0, 1, 0xff, 0xff, 0, 1, 1, 0xff, 1, 0xff }, { 0xff, 1, 1, 0xff, 0, 0xff, 1, 0, 0, 0xff, 0, 0xff }, { 0xff, 0, 0xff, 0, 0xff, 0, 0xff, 0, 0, 0xff, 0, 0xff }, { 1, 1, 0xff, 0xff, 0, 1, 0xff, 0, 0, 0xff, 0, 0xff }, { 0, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0, 0, 0xff, 0, 0xff }, { 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0xff, 0, 0, 0xff, 0, 0xff }, { 0xff, 0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0, 0 }, { 0, 0xff, 0xff, 0, 0xff, 0, 0, 0xff, 0xff, 0xff, 0, 0 }, { 0, 0, 0xff, 0xff, 0, 0xff, 0, 0xff, 0xff, 0xff, 0, 0 }, { 0xff, 0, 0xff, 0, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0, 0 }, { 0xff, 0, 0, 0xff, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0 }, { 1, 1, 0, 0, 0xff, 1, 0xff, 0xff, 0xff, 0xff, 0, 0 }, { 0, 0xff, 0, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0 }, { 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0 }, { 0xff, 0xff, 1, 1, 1, 1, 1, 0, 0xff, 0xff, 0, 0 }, { 0, 0xff, 0, 0xff, 0xff, 0, 0, 1, 0xff, 0xff, 1, 1 }, { 1, 2, 2, 1, 1, 0xff, 2, 0, 0xff, 0xff, 0, 0 }, { 0xff, 0, 0, 0xff, 0xff, 0xff, 0, 1, 0xff, 0xff, 1, 1 }, { 0xff, 0, 0xff, 0, 0, 0, 0xff, 1, 0xff, 0xff, 1, 1 }, { 0, 0, 0xff, 0xff, 0xff, 0, 0xff, 1, 0xff, 0xff, 1, 1 }, { 1, 0xff, 0xff, 1, 1, 0xff, 0xff, 0, 0xff, 0xff, 0, 0 }, { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0xff, 0, 0 }, { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0xff, 0, 0 }, { 0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0, 0xff, 0xff, 0, 0 }, { 0, 0, 0xff, 0xff, 0xff, 0, 0xff, 1, 0xff, 0xff, 1, 1 }, { 0xff, 0, 0xff, 0, 0, 0, 0xff, 0, 0xff, 0xff, 0, 0 }, { 0xff, 0, 0, 0xff, 0xff, 0xff, 0, 0, 0xff, 0xff, 0, 0 }, { 0, 0, 1, 1, 0, 0xff, 0, 1, 0xff, 0xff, 0, 0 }, { 0, 0xff, 0, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0, 0 }, { 0xff, 0xff, 0, 0, 1, 1, 1, 0, 0xff, 0xff, 1, 1 }, { 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0 }, { 0, 0xff, 0, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0 }, { 0, 0, 0, 0, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0, 0 }, { 0xff, 0, 0, 0xff, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0 }, { 0xff, 0, 0xff, 0, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0, 0 }, { 0, 0, 0xff, 0xff, 0, 0xff, 0, 0xff, 0xff, 0xff, 0, 0 }, { 0, 0xff, 0xff, 0, 0xff, 0, 0, 0xff, 0xff, 0xff, 0, 0 }, { 0xff, 0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0, 0 }, { 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0xff, 0, 0, 0xff, 0, 0xff }, { 0, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0, 0, 0xff, 0, 0xff }, { 0, 0, 0xff, 0xff, 0, 0, 0xff, 0, 0, 0xff, 0, 0xff }, { 0xff, 0, 0xff, 0, 0xff, 0, 0xff, 0, 0, 0xff, 0, 0xff }, { 0xff, 0, 0, 0xff, 0, 0xff, 0, 0, 0, 0xff, 0, 0xff }, { 1, 1, 0, 0, 0xff, 0xff, 1, 0, 1, 0xff, 1, 0xff }, { 0, 0xff, 0, 0xff, 0, 1, 1, 0, 1, 0xff, 1, 0xff }, { 0xff, 0xff, 1, 1, 0xff, 0, 0, 1, 0, 0xff, 0, 0xff }, { 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0, 0xff, 0, 0xff }, { 0, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0, 0xff }, { 0, 1, 1, 0, 0, 1, 0xff, 0xff, 1, 0xff, 1, 0xff }, { 0xff, 0, 0, 0xff, 0xff, 0, 0xff, 0xff, 0, 0xff, 0, 0xff }, { 0xff, 0, 0xff, 0, 0, 0xff, 0, 0xff, 0, 0xff, 0, 0xff }, { 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0, 0xff, 0, 0xff }, { 1, 0xff, 0xff, 1, 1, 0, 0, 0xff, 0, 0xff, 0, 0xff }, { 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0, 0xff, 0, 0xff, 0, 0xff }, { 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0, 0, 0, 0, 0 }, { 0, 0xff, 0xff, 0, 1, 0, 0xff, 0, 1, 0, 0, 1 }, { 0, 0, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0, 0, 0, 0 }, { 0xff, 1, 0xff, 1, 0, 0xff, 0xff, 1, 0, 1, 1, 0 }, { 0xff, 0, 0, 0xff, 0xff, 0, 1, 0, 0, 1, 1, 0 }, { 0, 0, 1, 1, 3, 0, 2, 1, 3, 2, 2, 3 }, { 1, 0xff, 1, 0xff, 0xff, 0xff, 0, 1, 1, 0, 0, 1 }, { 0xff, 0xff, 0, 0, 2, 0xff, 1, 0, 2, 1, 1, 2 }, { 0xff, 0xff, 0, 0, 0xff, 0, 0xff, 0xff, 0, 0, 0, 0 }, { 1, 0xff, 1, 0xff, 0, 1, 0xff, 0xff, 0, 1, 1, 0 }, { 0, 1, 1, 0, 0xff, 0xff, 0xff, 0xff, 0, 1, 1, 0 }, { 0xff, 0, 0, 0xff, 1, 0xff, 0xff, 0xff, 1, 0, 0, 1 }, { 0xff, 1, 0xff, 1, 0xff, 1, 0, 0xff, 1, 0, 0, 1 }, { 0, 0, 0xff, 0xff, 1, 0, 2, 0xff, 1, 2, 2, 1 }, { 0, 0xff, 0xff, 0, 0xff, 0xff, 1, 0xff, 0, 1, 1, 0 }, { 0xff, 0xff, 0xff, 0xff, 1, 0xff, 0, 0xff, 1, 0, 0, 1 }, { 0xff, 0xff, 0xff, 0xff, 0, 0, 0xff, 0, 0xff, 0, 0, 0xff }, { 0, 0xff, 0xff, 0, 0xff, 0, 0xff, 0, 0xff, 0, 0, 0xff }, { 0, 0, 0xff, 0xff, 0, 0xff, 0xff, 0, 0xff, 0, 0, 0xff }, { 0xff, 0, 0xff, 0, 0xff, 0xff, 0xff, 0, 0xff, 0, 0, 0xff }, { 0xff, 1, 1, 0xff, 1, 1, 0, 1, 0xff, 0, 0, 0xff }, { 2, 2, 1, 1, 0xff, 2, 0, 1, 0xff, 0, 0, 0xff }, { 0, 0xff, 0, 0xff, 0, 0xff, 1, 0, 0xff, 1, 1, 0xff }, { 0xff, 0xff, 0, 0, 0xff, 0xff, 1, 0, 0xff, 1, 1, 0xff }, { 0xff, 0xff, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0, 0, 0xff }, { 0, 0xff, 0, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0, 0, 0xff }, { 1, 0, 0, 1, 1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0xff }, { 0xff, 0, 0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0, 0xff }, { 0xff, 0, 0xff, 0, 0, 0, 1, 0xff, 0xff, 1, 1, 0xff }, { 1, 1, 0xff, 0xff, 0xff, 1, 0, 0xff, 0xff, 0, 0, 0xff }, { 0, 0xff, 0xff, 0, 0, 0xff, 1, 0xff, 0xff, 1, 1, 0xff }, { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0xff, 0, 0, 0xff }, { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0, 0xff, 0, 0xff, 0 }, { 0, 0xff, 0xff, 0, 0, 0xff, 0, 0, 0xff, 0, 0xff, 0 }, { 0, 0, 0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0, 0xff, 0 }, { 0xff, 1, 0xff, 1, 0, 0, 1, 1, 0xff, 0, 0xff, 0 }, { 0xff, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0, 0xff, 0 }, { 1, 1, 0, 0, 1, 0xff, 0xff, 0, 0xff, 1, 0xff, 1 }, { 0, 0xff, 0, 0xff, 0xff, 0, 0xff, 0, 0xff, 0, 0xff, 0 }, { 0xff, 0xff, 1, 1, 0, 0, 0xff, 1, 0xff, 0, 0xff, 0 }, { 0xff, 0xff, 0, 0, 0xff, 0xff, 0, 0xff, 0xff, 0, 0xff, 0 }, { 0, 0xff, 0, 0xff, 0, 0xff, 0, 0xff, 0xff, 0, 0xff, 0 }, { 1, 0, 0, 1, 0xff, 1, 0, 0xff, 0xff, 1, 0xff, 1 }, { 0xff, 1, 1, 0xff, 0, 0, 1, 0xff, 0xff, 0, 0xff, 0 }, { 0xff, 0, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0 }, { 0, 0, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0 }, { 0, 0xff, 0xff, 0, 0xff, 0, 0xff, 0xff, 0xff, 0, 0xff, 0 }, { 0xff, 0xff, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff, 0, 0xff, 0 }, { 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0, 0, 0, 0, 0xff, 0xff }, { 0, 0xff, 0xff, 0, 0xff, 0xff, 0, 0, 0, 0, 0xff, 0xff }, { 1, 1, 0xff, 0xff, 1, 0, 1, 1, 0, 0, 0xff, 0xff }, { 0xff, 0, 0xff, 0, 0xff, 1, 0, 0, 1, 1, 0xff, 0xff }, { 0xff, 0, 0, 0xff, 0, 0xff, 0xff, 0, 0, 0, 0xff, 0xff }, { 0, 0, 1, 1, 0xff, 0xff, 0xff, 1, 0, 0, 0xff, 0xff }, { 0, 0xff, 0, 0xff, 0, 1, 0xff, 0, 1, 1, 0xff, 0xff }, { 0xff, 0xff, 0, 0, 0xff, 1, 0xff, 0, 1, 1, 0xff, 0xff }, { 0xff, 0xff, 0, 0, 0, 0xff, 0, 0xff, 0, 0, 0xff, 0xff }, { 0, 0xff, 0, 0xff, 0xff, 0xff, 0, 0xff, 0, 0, 0xff, 0xff }, { 1, 0, 0, 1, 1, 2, 0, 0xff, 2, 2, 0xff, 0xff }, { 0xff, 0, 0, 0xff, 0xff, 1, 0, 0xff, 1, 1, 0xff, 0xff }, { 0xff, 0, 0xff, 0, 0, 0xff, 0xff, 0xff, 0, 0, 0xff, 0xff }, { 0, 0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0, 0xff, 0xff }, { 1, 0xff, 0xff, 1, 1, 0, 0xff, 0xff, 0, 0, 0xff, 0xff }, { 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0xff, 0, 0, 0xff, 0xff }, { 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0, 0, 0, 0xff, 0xff, 0 }, { 1, 0xff, 0xff, 1, 0, 1, 1, 1, 0, 0xff, 0xff, 0 }, { 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0 }, { 0xff, 0, 0xff, 0, 1, 0xff, 0, 0, 1, 0xff, 0xff, 1 }, { 0xff, 0, 0, 0xff, 0xff, 0, 0xff, 0, 0, 0xff, 0xff, 0 }, { 2, 2, 0, 0, 1, 2, 0xff, 0, 1, 0xff, 0xff, 1 }, { 0, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0, 0, 0xff, 0xff, 0 }, { 0xff, 0xff, 1, 1, 0, 0xff, 0xff, 1, 0, 0xff, 0xff, 0 }, { 0xff, 0xff, 0, 0, 0xff, 0, 0, 0xff, 0, 0xff, 0xff, 0 }, { 0, 0xff, 0, 0xff, 1, 0, 0, 0xff, 1, 0xff, 0xff, 1 }, { 0, 1, 1, 0, 0xff, 0xff, 1, 0xff, 0, 0xff, 0xff, 0 }, { 0xff, 0, 0, 0xff, 1, 0xff, 0, 0xff, 1, 0xff, 0xff, 1 }, { 0xff, 0, 0xff, 0, 0xff, 0, 0xff, 0xff, 0, 0xff, 0xff, 0 }, { 0, 0, 0xff, 0xff, 1, 0, 0xff, 0xff, 1, 0xff, 0xff, 1 }, { 0, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0xff, 0 }, { 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0, 0xff, 0xff, 0 }, { 0xff, 0xff, 0xff, 0xff, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff }, { 0, 0xff, 0xff, 0, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff }, { 0, 0, 0xff, 0xff, 0, 0xff, 0, 0, 0xff, 0xff, 0xff, 0xff }, { 0xff, 0, 0xff, 0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff, 0xff }, { 0xff, 0, 0, 0xff, 0, 0, 0xff, 0, 0xff, 0xff, 0xff, 0xff }, { 0, 0, 1, 1, 0xff, 0, 0xff, 1, 0xff, 0xff, 0xff, 0xff }, { 0, 0xff, 0, 0xff, 0, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff }, { 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff }, { 0xff, 0xff, 0, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0xff }, { 0, 0xff, 0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff, 0xff, 0xff }, { 1, 0, 0, 1, 1, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xff }, { 0xff, 0, 0, 0xff, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xff }, { 0xff, 0, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, { 0, 0, 0xff, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, { 0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff } }; // Number of iso vertices for DMC for each voxel config, this is LUT_1. // Voxel with 3B config with its adjacent voxel being 2B config CANNOT use this LUT. const uint8_t num_vertex_lut1[NUM_CONFIGS] = { 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 2, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 2, 1, 1, 2, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 2, 1, 2, 2, 2, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 1, 1, 1, 1, 1, 1, 2, 2, 1, 2, 2, 2, 2, 2, 4, 3, 2, 2, 3, 2, 1, 1, 1, 1, 1, 2, 1, 1, 1, 2, 2, 2, 1, 2, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 2, 2, 3, 2, 2, 2, 2, 1, 1, 1, 2, 1, 1, 2, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 2, 1, 1, 1, 2, 1, 1, 2, 1, 2, 2, 4, 2, 3, 1, 2, 2, 2, 2, 3, 2, 2, 1, 1, 1, 1, 2, 3, 2, 2, 1, 1, 2, 1, 2, 2, 2, 1, 1, 1, 1, 2, 1, 2, 1, 2, 1, 1, 2, 2, 1, 1, 1, 1, 1, 1, 2, 2, 1, 2, 2, 2, 1, 1, 3, 2, 1, 1, 2, 1, 1, 2, 1, 2, 1, 3, 1, 2, 1, 2, 2, 2, 1, 2, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 2, 1, 1, 1, 1, 0 }; const iso_vertex_m_type config_edge_lut2[NUM_CONFIGS][VOXEL_NUM_EDGES] = { { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, { 0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, { 0, 0, 0xff, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, { 0xff, 0, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, { 0xff, 0, 0, 0xff, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xff }, { 1, 0, 0, 1, 1, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xff }, { 0, 0xff, 0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff, 0xff, 0xff }, { 0xff, 0xff, 0, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0xff }, { 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff }, { 0, 0xff, 0, 0xff, 0, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff }, { 0, 0, 1, 1, 0xff, 0, 0xff, 1, 0xff, 0xff, 0xff, 0xff }, { 0xff, 0, 0, 0xff, 0, 0, 0xff, 0, 0xff, 0xff, 0xff, 0xff }, { 0xff, 0, 0xff, 0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff, 0xff }, { 0, 0, 0xff, 0xff, 0, 0xff, 0, 0, 0xff, 0xff, 0xff, 0xff }, { 0, 0xff, 0xff, 0, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff }, { 0xff, 0xff, 0xff, 0xff, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff }, { 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0, 0xff, 0xff, 0 }, { 0, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0xff, 0 }, { 0, 0, 0xff, 0xff, 1, 0, 0xff, 0xff, 1, 0xff, 0xff, 1 }, { 0xff, 0, 0xff, 0, 0xff, 0, 0xff, 0xff, 0, 0xff, 0xff, 0 }, { 0xff, 0, 0, 0xff, 1, 0xff, 0, 0xff, 1, 0xff, 0xff, 1 }, { 0, 1, 1, 0, 0xff, 0xff, 1, 0xff, 0, 0xff, 0xff, 0 }, { 0, 0xff, 0, 0xff, 1, 0, 0, 0xff, 1, 0xff, 0xff, 1 }, { 0xff, 0xff, 0, 0, 0xff, 0, 0, 0xff, 0, 0xff, 0xff, 0 }, { 0xff, 0xff, 1, 1, 0, 0xff, 0xff, 1, 0, 0xff, 0xff, 0 }, { 0, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0, 0, 0xff, 0xff, 0 }, { 1, 0, 0, 1, 1, 0, 0xff, 0, 0, 0xff, 0xff, 0 }, { 0xff, 0, 0, 0xff, 0xff, 0, 0xff, 0, 0, 0xff, 0xff, 0 }, { 0xff, 0, 0xff, 0, 1, 0xff, 0, 0, 1, 0xff, 0xff, 1 }, { 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0 }, { 0, 0xff, 0xff, 0, 0, 1, 1, 1, 1, 0xff, 0xff, 1 }, { 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0, 0, 0, 0xff, 0xff, 0 }, { 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0xff, 0, 0, 0xff, 0xff }, { 1, 0xff, 0xff, 1, 1, 0, 0xff, 0xff, 0, 0, 0xff, 0xff }, { 0, 0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0, 0xff, 0xff }, { 0xff, 0, 0xff, 0, 0, 0xff, 0xff, 0xff, 0, 0, 0xff, 0xff }, { 0xff, 0, 0, 0xff, 0xff, 1, 0, 0xff, 1, 1, 0xff, 0xff }, { 1, 1, 0, 0, 0, 1, 0, 0xff, 0, 0, 0xff, 0xff }, { 0, 0xff, 0, 0xff, 0xff, 0xff, 0, 0xff, 0, 0, 0xff, 0xff }, { 0xff, 0xff, 0, 0, 0, 0xff, 0, 0xff, 0, 0, 0xff, 0xff }, { 0xff, 0xff, 0, 0, 0xff, 1, 0xff, 0, 1, 1, 0xff, 0xff }, { 0, 0xff, 0, 0xff, 0, 1, 0xff, 0, 1, 1, 0xff, 0xff }, { 0, 0, 1, 1, 0xff, 0xff, 0xff, 1, 0, 0, 0xff, 0xff }, { 0xff, 0, 0, 0xff, 0, 0xff, 0xff, 0, 0, 0, 0xff, 0xff }, { 0xff, 0, 0xff, 0, 0xff, 1, 0, 0, 1, 1, 0xff, 0xff }, { 0, 0, 0xff, 0xff, 1, 0, 1, 1, 1, 1, 0xff, 0xff }, { 0, 0xff, 0xff, 0, 0xff, 0xff, 0, 0, 0, 0, 0xff, 0xff }, { 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0, 0, 0, 0, 0xff, 0xff }, { 0xff, 0xff, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff, 0, 0xff, 0 }, { 0, 0xff, 0xff, 0, 0xff, 0, 0xff, 0xff, 0xff, 0, 0xff, 0 }, { 0, 0, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0 }, { 0xff, 0, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0 }, { 0xff, 1, 1, 0xff, 0, 0, 1, 0xff, 0xff, 0, 0xff, 0 }, { 0, 0, 1, 1, 0xff, 0, 1, 0xff, 0xff, 1, 0xff, 1 }, { 0, 0xff, 0, 0xff, 0, 0xff, 0, 0xff, 0xff, 0, 0xff, 0 }, { 0xff, 0xff, 0, 0, 0xff, 0xff, 0, 0xff, 0xff, 0, 0xff, 0 }, { 0xff, 0xff, 1, 1, 0, 0, 0xff, 1, 0xff, 0, 0xff, 0 }, { 0, 0xff, 0, 0xff, 0xff, 0, 0xff, 0, 0xff, 0, 0xff, 0 }, { 0, 1, 1, 0, 0, 0xff, 0xff, 1, 0xff, 1, 0xff, 1 }, { 0xff, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0, 0xff, 0 }, { 0xff, 0, 0xff, 0, 0, 0, 1, 1, 0xff, 1, 0xff, 1 }, { 1, 1, 0xff, 0xff, 0xff, 1, 0, 0, 0xff, 0, 0xff, 0 }, { 1, 0xff, 0xff, 1, 1, 0xff, 0, 0, 0xff, 0, 0xff, 0 }, { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0, 0xff, 0, 0xff, 0 }, { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0xff, 0, 0, 0xff }, { 0, 0xff, 0xff, 0, 0, 0xff, 1, 0xff, 0xff, 1, 1, 0xff }, { 1, 1, 0xff, 0xff, 0xff, 1, 0, 0xff, 0xff, 0, 0, 0xff }, { 0xff, 0, 0xff, 0, 0, 0, 1, 0xff, 0xff, 1, 1, 0xff }, { 0xff, 0, 0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0, 0xff }, { 1, 0, 0, 1, 1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0xff }, { 0, 0xff, 0, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0, 0, 0xff }, { 0xff, 0xff, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0, 0, 0xff }, { 0xff, 0xff, 0, 0, 0xff, 0xff, 1, 0, 0xff, 1, 1, 0xff }, { 0, 0xff, 0, 0xff, 0, 0xff, 1, 0, 0xff, 1, 1, 0xff }, { 0, 1, 1, 0, 0xff, 0, 1, 0, 0xff, 0, 0, 0xff }, { 0xff, 0, 0, 0xff, 1, 1, 0, 1, 0xff, 1, 1, 0xff }, { 0xff, 0, 0xff, 0, 0xff, 0xff, 0xff, 0, 0xff, 0, 0, 0xff }, { 0, 0, 0xff, 0xff, 0, 0xff, 0xff, 0, 0xff, 0, 0, 0xff }, { 0, 0xff, 0xff, 0, 0xff, 0, 0xff, 0, 0xff, 0, 0, 0xff }, { 0xff, 0xff, 0xff, 0xff, 0, 0, 0xff, 0, 0xff, 0, 0, 0xff }, { 0xff, 0xff, 0xff, 0xff, 1, 0xff, 0, 0xff, 1, 0, 0, 1 }, { 0, 0xff, 0xff, 0, 0xff, 0xff, 1, 0xff, 0, 1, 1, 0 }, { 0, 0, 0xff, 0xff, 0, 1, 0, 0xff, 1, 1, 0, 0 }, { 0xff, 1, 0xff, 1, 0xff, 0, 1, 0xff, 0, 0, 1, 1 }, { 0xff, 0, 0, 0xff, 1, 0xff, 0xff, 0xff, 1, 0, 0, 1 }, { 0, 0, 1, 1, 0xff, 0xff, 0xff, 0xff, 0, 0, 1, 1 }, { 1, 0xff, 1, 0xff, 1, 0, 0xff, 0xff, 0, 0, 1, 1 }, { 0xff, 0xff, 0, 0, 0xff, 1, 0xff, 0xff, 1, 1, 0, 0 }, { 0xff, 0xff, 0, 0, 0, 0xff, 0, 1, 0, 0, 1, 1 }, { 1, 0xff, 1, 0xff, 0xff, 0xff, 1, 0, 1, 1, 0, 0 }, { 0, 1, 1, 0, 0, 2, 1, 3, 2, 2, 3, 3 }, { 0xff, 0, 0, 0xff, 0xff, 1, 0, 2, 1, 1, 2, 2 }, { 0xff, 1, 0xff, 1, 1, 0xff, 0xff, 0, 1, 1, 0, 0 }, { 0, 0, 0xff, 0xff, 0xff, 0xff, 0xff, 1, 0, 0, 1, 1 }, { 0, 0xff, 0xff, 0, 0, 2, 0xff, 1, 2, 2, 1, 1 }, { 0xff, 0xff, 0xff, 0xff, 0xff, 1, 0xff, 0, 1, 1, 0, 0 }, { 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0, 0xff, 0, 0xff, 0, 0xff }, { 1, 0xff, 0xff, 1, 1, 0, 0, 0xff, 0, 0xff, 0, 0xff }, { 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0, 0xff, 0, 0xff }, { 0xff, 0, 0xff, 0, 0, 0xff, 0, 0xff, 0, 0xff, 0, 0xff }, { 0xff, 0, 0, 0xff, 0xff, 0, 0xff, 0xff, 0, 0xff, 0, 0xff }, { 0, 0, 1, 1, 1, 0, 0xff, 0xff, 1, 0xff, 1, 0xff }, { 0, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0, 0xff }, { 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0, 0xff, 0, 0xff }, { 0xff, 0xff, 1, 1, 0xff, 0, 0, 1, 0, 0xff, 0, 0xff }, { 1, 0xff, 1, 0xff, 0, 1, 1, 0, 0, 0xff, 0, 0xff }, { 1, 0, 0, 1, 0xff, 0xff, 0, 1, 1, 0xff, 1, 0xff }, { 0xff, 1, 1, 0xff, 0, 0xff, 1, 0, 0, 0xff, 0, 0xff }, { 0xff, 0, 0xff, 0, 0xff, 0, 0xff, 0, 0, 0xff, 0, 0xff }, { 1, 1, 0xff, 0xff, 0, 1, 0xff, 0, 0, 0xff, 0, 0xff }, { 0, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0, 0, 0xff, 0, 0xff }, { 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0xff, 0, 0, 0xff, 0, 0xff }, { 0xff, 0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0, 0 }, { 0, 0xff, 0xff, 0, 0xff, 0, 0, 0xff, 0xff, 0xff, 0, 0 }, { 0, 0, 0xff, 0xff, 0, 0xff, 0, 0xff, 0xff, 0xff, 0, 0 }, { 0xff, 0, 0xff, 0, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0, 0 }, { 0xff, 0, 0, 0xff, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0 }, { 1, 1, 0, 0, 0xff, 1, 0xff, 0xff, 0xff, 0xff, 0, 0 }, { 0, 0xff, 0, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0 }, { 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0 }, { 0xff, 0xff, 1, 1, 1, 1, 1, 0, 0xff, 0xff, 0, 0 }, { 0, 0xff, 0, 0xff, 0xff, 0, 0, 1, 0xff, 0xff, 1, 1 }, { 1, 2, 2, 1, 1, 0xff, 2, 0, 0xff, 0xff, 0, 0 }, { 0xff, 0, 0, 0xff, 0xff, 0xff, 0, 1, 0xff, 0xff, 1, 1 }, { 0xff, 0, 0xff, 0, 0, 0, 0xff, 1, 0xff, 0xff, 1, 1 }, { 0, 0, 0xff, 0xff, 0xff, 0, 0xff, 1, 0xff, 0xff, 1, 1 }, { 1, 0xff, 0xff, 1, 1, 0xff, 0xff, 0, 0xff, 0xff, 0, 0 }, { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0xff, 0, 0 }, { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0xff, 0, 0 }, { 1, 0xff, 0xff, 1, 1, 0xff, 0xff, 0, 0xff, 0xff, 0, 0 }, { 0, 0, 0xff, 0xff, 0xff, 0, 0xff, 1, 0xff, 0xff, 1, 1 }, { 0xff, 0, 0xff, 0, 0, 0, 0xff, 1, 0xff, 0xff, 1, 1 }, { 0xff, 0, 0, 0xff, 0xff, 0xff, 0, 1, 0xff, 0xff, 1, 1 }, { 0, 0, 1, 1, 0, 0xff, 0, 1, 0xff, 0xff, 0, 0 }, { 0, 0xff, 0, 0xff, 0xff, 0, 0, 1, 0xff, 0xff, 1, 1 }, { 0xff, 0xff, 0, 0, 1, 1, 1, 0, 0xff, 0xff, 1, 1 }, { 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0 }, { 0, 0xff, 0, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0 }, { 1, 1, 0, 0, 0xff, 1, 0xff, 0xff, 0xff, 0xff, 0, 0 }, { 0xff, 0, 0, 0xff, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0 }, { 0xff, 0, 0xff, 0, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0, 0 }, { 0, 0, 0xff, 0xff, 0, 0xff, 0, 0xff, 0xff, 0xff, 0, 0 }, { 0, 0xff, 0xff, 0, 0xff, 0, 0, 0xff, 0xff, 0xff, 0, 0 }, { 0xff, 0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0, 0 }, { 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0xff, 0, 0, 0xff, 0, 0xff }, { 0, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0, 0, 0xff, 0, 0xff }, { 1, 1, 0xff, 0xff, 0, 1, 0xff, 0, 0, 0xff, 0, 0xff }, { 0xff, 0, 0xff, 0, 0xff, 0, 0xff, 0, 0, 0xff, 0, 0xff }, { 0xff, 1, 1, 0xff, 0, 0xff, 1, 0, 0, 0xff, 0, 0xff }, { 1, 1, 0, 0, 0xff, 0xff, 1, 0, 1, 0xff, 1, 0xff }, { 0, 0xff, 0, 0xff, 0, 1, 1, 0, 1, 0xff, 1, 0xff }, { 0xff, 0xff, 1, 1, 0xff, 0, 0, 1, 0, 0xff, 0, 0xff }, { 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0, 0xff, 0, 0xff }, { 0, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0, 0xff }, { 0, 1, 1, 0, 0, 1, 0xff, 0xff, 1, 0xff, 1, 0xff }, { 0xff, 0, 0, 0xff, 0xff, 0, 0xff, 0xff, 0, 0xff, 0, 0xff }, { 0xff, 0, 0xff, 0, 0, 0xff, 0, 0xff, 0, 0xff, 0, 0xff }, { 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0, 0xff, 0, 0xff }, { 1, 0xff, 0xff, 1, 1, 0, 0, 0xff, 0, 0xff, 0, 0xff }, { 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0, 0xff, 0, 0xff, 0, 0xff }, { 0xff, 0xff, 0xff, 0xff, 0xff, 1, 0xff, 0, 1, 1, 0, 0 }, { 0, 0xff, 0xff, 0, 1, 0, 0xff, 0, 1, 0, 0, 1 }, { 0, 0, 0xff, 0xff, 0xff, 0xff, 0xff, 1, 0, 0, 1, 1 }, { 0xff, 1, 0xff, 1, 0, 0xff, 0xff, 1, 0, 1, 1, 0 }, { 0xff, 0, 0, 0xff, 0xff, 0, 1, 0, 0, 1, 1, 0 }, { 0, 0, 1, 1, 3, 0, 2, 1, 3, 2, 2, 3 }, { 1, 0xff, 1, 0xff, 0xff, 0xff, 0, 1, 1, 0, 0, 1 }, { 0xff, 0xff, 0, 0, 2, 0xff, 1, 0, 2, 1, 1, 2 }, { 0xff, 0xff, 0, 0, 0xff, 1, 0xff, 0xff, 1, 1, 0, 0 }, { 1, 0xff, 1, 0xff, 0, 1, 0xff, 0xff, 0, 1, 1, 0 }, { 0, 1, 1, 0, 0xff, 0xff, 0xff, 0xff, 0, 1, 1, 0 }, { 0xff, 0, 0, 0xff, 1, 0xff, 0xff, 0xff, 1, 0, 0, 1 }, { 0xff, 1, 0xff, 1, 0xff, 1, 0, 0xff, 1, 0, 0, 1 }, { 0, 0, 0xff, 0xff, 1, 0, 2, 0xff, 1, 2, 2, 1 }, { 0, 0xff, 0xff, 0, 0xff, 0xff, 1, 0xff, 0, 1, 1, 0 }, { 0xff, 0xff, 0xff, 0xff, 1, 0xff, 0, 0xff, 1, 0, 0, 1 }, { 0xff, 0xff, 0xff, 0xff, 0, 0, 0xff, 0, 0xff, 0, 0, 0xff }, { 0, 0xff, 0xff, 0, 0xff, 0, 0xff, 0, 0xff, 0, 0, 0xff }, { 0, 0, 0xff, 0xff, 0, 0xff, 0xff, 0, 0xff, 0, 0, 0xff }, { 0xff, 0, 0xff, 0, 0xff, 0xff, 0xff, 0, 0xff, 0, 0, 0xff }, { 0xff, 1, 1, 0xff, 1, 1, 0, 1, 0xff, 0, 0, 0xff }, { 2, 2, 1, 1, 0xff, 2, 0, 1, 0xff, 0, 0, 0xff }, { 0, 0xff, 0, 0xff, 0, 0xff, 1, 0, 0xff, 1, 1, 0xff }, { 0xff, 0xff, 0, 0, 0xff, 0xff, 1, 0, 0xff, 1, 1, 0xff }, { 0xff, 0xff, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0, 0, 0xff }, { 0, 0xff, 0, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0, 0, 0xff }, { 1, 0, 0, 1, 1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0xff }, { 0xff, 0, 0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0, 0xff }, { 0xff, 0, 0xff, 0, 0, 0, 1, 0xff, 0xff, 1, 1, 0xff }, { 1, 1, 0xff, 0xff, 0xff, 1, 0, 0xff, 0xff, 0, 0, 0xff }, { 0, 0xff, 0xff, 0, 0, 0xff, 1, 0xff, 0xff, 1, 1, 0xff }, { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0xff, 0, 0, 0xff }, { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0, 0xff, 0, 0xff, 0 }, { 1, 0xff, 0xff, 1, 1, 0xff, 0, 0, 0xff, 0, 0xff, 0 }, { 1, 1, 0xff, 0xff, 0xff, 1, 0, 0, 0xff, 0, 0xff, 0 }, { 0xff, 1, 0xff, 1, 0, 0, 1, 1, 0xff, 0, 0xff, 0 }, { 0xff, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0, 0xff, 0 }, { 1, 1, 0, 0, 1, 0xff, 0xff, 0, 0xff, 1, 0xff, 1 }, { 0, 0xff, 0, 0xff, 0xff, 0, 0xff, 0, 0xff, 0, 0xff, 0 }, { 0xff, 0xff, 1, 1, 0, 0, 0xff, 1, 0xff, 0, 0xff, 0 }, { 0xff, 0xff, 0, 0, 0xff, 0xff, 0, 0xff, 0xff, 0, 0xff, 0 }, { 0, 0xff, 0, 0xff, 0, 0xff, 0, 0xff, 0xff, 0, 0xff, 0 }, { 1, 0, 0, 1, 0xff, 1, 0, 0xff, 0xff, 1, 0xff, 1 }, { 0xff, 1, 1, 0xff, 0, 0, 1, 0xff, 0xff, 0, 0xff, 0 }, { 0xff, 0, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0 }, { 0, 0, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0 }, { 0, 0xff, 0xff, 0, 0xff, 0, 0xff, 0xff, 0xff, 0, 0xff, 0 }, { 0xff, 0xff, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff, 0, 0xff, 0 }, { 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0, 0, 0, 0, 0xff, 0xff }, { 0, 0xff, 0xff, 0, 0xff, 0xff, 0, 0, 0, 0, 0xff, 0xff }, { 1, 1, 0xff, 0xff, 1, 0, 1, 1, 0, 0, 0xff, 0xff }, { 0xff, 0, 0xff, 0, 0xff, 1, 0, 0, 1, 1, 0xff, 0xff }, { 0xff, 0, 0, 0xff, 0, 0xff, 0xff, 0, 0, 0, 0xff, 0xff }, { 0, 0, 1, 1, 0xff, 0xff, 0xff, 1, 0, 0, 0xff, 0xff }, { 0, 0xff, 0, 0xff, 0, 1, 0xff, 0, 1, 1, 0xff, 0xff }, { 0xff, 0xff, 0, 0, 0xff, 1, 0xff, 0, 1, 1, 0xff, 0xff }, { 0xff, 0xff, 0, 0, 0, 0xff, 0, 0xff, 0, 0, 0xff, 0xff }, { 0, 0xff, 0, 0xff, 0xff, 0xff, 0, 0xff, 0, 0, 0xff, 0xff }, { 1, 0, 0, 1, 1, 2, 0, 0xff, 2, 2, 0xff, 0xff }, { 0xff, 0, 0, 0xff, 0xff, 1, 0, 0xff, 1, 1, 0xff, 0xff }, { 0xff, 0, 0xff, 0, 0, 0xff, 0xff, 0xff, 0, 0, 0xff, 0xff }, { 0, 0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0, 0xff, 0xff }, { 1, 0xff, 0xff, 1, 1, 0, 0xff, 0xff, 0, 0, 0xff, 0xff }, { 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0xff, 0, 0, 0xff, 0xff }, { 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0, 0, 0, 0xff, 0xff, 0 }, { 1, 0xff, 0xff, 1, 0, 1, 1, 1, 0, 0xff, 0xff, 0 }, { 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0 }, { 0xff, 0, 0xff, 0, 1, 0xff, 0, 0, 1, 0xff, 0xff, 1 }, { 0xff, 0, 0, 0xff, 0xff, 0, 0xff, 0, 0, 0xff, 0xff, 0 }, { 2, 2, 0, 0, 1, 2, 0xff, 0, 1, 0xff, 0xff, 1 }, { 0, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0, 0, 0xff, 0xff, 0 }, { 0xff, 0xff, 1, 1, 0, 0xff, 0xff, 1, 0, 0xff, 0xff, 0 }, { 0xff, 0xff, 0, 0, 0xff, 0, 0, 0xff, 0, 0xff, 0xff, 0 }, { 0, 0xff, 0, 0xff, 1, 0, 0, 0xff, 1, 0xff, 0xff, 1 }, { 0, 1, 1, 0, 0xff, 0xff, 1, 0xff, 0, 0xff, 0xff, 0 }, { 0xff, 0, 0, 0xff, 1, 0xff, 0, 0xff, 1, 0xff, 0xff, 1 }, { 0xff, 0, 0xff, 0, 0xff, 0, 0xff, 0xff, 0, 0xff, 0xff, 0 }, { 0, 0, 0xff, 0xff, 1, 0, 0xff, 0xff, 1, 0xff, 0xff, 1 }, { 0, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0xff, 0 }, { 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0, 0xff, 0xff, 0 }, { 0xff, 0xff, 0xff, 0xff, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff }, { 0, 0xff, 0xff, 0, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff }, { 0, 0, 0xff, 0xff, 0, 0xff, 0, 0, 0xff, 0xff, 0xff, 0xff }, { 0xff, 0, 0xff, 0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff, 0xff }, { 0xff, 0, 0, 0xff, 0, 0, 0xff, 0, 0xff, 0xff, 0xff, 0xff }, { 0, 0, 1, 1, 0xff, 0, 0xff, 1, 0xff, 0xff, 0xff, 0xff }, { 0, 0xff, 0, 0xff, 0, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff }, { 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff }, { 0xff, 0xff, 0, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0xff }, { 0, 0xff, 0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff, 0xff, 0xff }, { 1, 0, 0, 1, 1, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xff }, { 0xff, 0, 0, 0xff, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xff }, { 0xff, 0, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, { 0, 0, 0xff, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, { 0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff } }; const uint8_t num_vertex_lut2[NUM_CONFIGS] = { 0, 1, 1, 1, 1, 2, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 2, 1, 2, 2, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 1, 2, 1, 1, 2, 2, 1, 1, 2, 2, 2, 1, 2, 2, 1, 1, 1, 1, 1, 1, 2, 2, 1, 1, 2, 1, 2, 1, 2, 2, 2, 1, 1, 2, 2, 2, 1, 2, 1, 1, 2, 2, 2, 2, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 4, 3, 2, 2, 3, 2, 1, 2, 1, 1, 1, 2, 1, 1, 2, 2, 2, 2, 1, 2, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 2, 2, 3, 2, 2, 2, 2, 1, 1, 2, 2, 2, 2, 2, 2, 2, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 2, 1, 2, 2, 2, 2, 1, 1, 2, 1, 1, 1, 2, 1, 2, 2, 2, 2, 2, 4, 2, 3, 2, 2, 2, 2, 2, 3, 2, 2, 1, 1, 1, 1, 2, 3, 2, 2, 1, 1, 2, 1, 2, 2, 2, 1, 1, 2, 2, 2, 1, 2, 1, 2, 1, 1, 2, 2, 1, 1, 1, 1, 1, 1, 2, 2, 1, 2, 2, 2, 1, 1, 3, 2, 1, 1, 2, 1, 1, 2, 1, 2, 1, 3, 1, 2, 1, 2, 2, 2, 1, 2, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 2, 1, 1, 1, 1, 0 }; const unsigned NUM_AMBIGUOUS_CONFIGS = 36; const voxel_config_type config_2B_3B_lut[NUM_AMBIGUOUS_CONFIGS] = { 0xa0, 0x21, 0x42, 0x84, 0x05, 0x81, 0x48, 0x0a, 0x50, 0x12, 0x18, 0x24, // 2B 0xc1, 0xc2, 0x83, 0x45, 0x86, 0x49, 0x8a, 0x51, 0x92, 0x43, 0x54, 0x15, // 3B 0x16, 0x1c, 0x61, 0xa2, 0xa8, 0x29, 0x2a, 0x2c, 0x68, 0x34, 0x38, 0x94 }; const voxel_face_index_type config_2B_3B_ambiguous_face[NUM_AMBIGUOUS_CONFIGS] = { 5, 1, 2, 3, 0, 4, 3, 0, 5, 1, 4, 2, // 2B 4, 2, 4, 0, 3, 3, 0, 5, 1, 2, 5, 0, // 3B 1, 4, 1, 5, 5, 1, 0, 2, 3, 2, 4, 3 }; const voxel_face_index_type opposite_face_lut[VOXEL_NUM_FACES] = {5, 3, 4, 1, 2, 0}; const check_dir_type POS_X_DIR = 0; const check_dir_type NEG_X_DIR = 1; const check_dir_type POS_Y_DIR = 2; const check_dir_type NEG_Y_DIR = 3; const check_dir_type POS_Z_DIR = 4; const check_dir_type NEG_Z_DIR = 5; const check_dir_type face_to_check_dir_lut[VOXEL_NUM_FACES] = { NEG_Z_DIR, NEG_Y_DIR, POS_X_DIR, POS_Y_DIR, NEG_X_DIR, POS_Z_DIR }; const uint8_t LOCAL_EDGE_ENTRY = 0xff; const uint8_t edge_belonged_voxel_lut[VOXEL_NUM_EDGES] = { ( 0x00 | 0x40 | 0x20 ) | 10, // 0 ( 0x00 | 0x00 | 0x20 ) | 9, // 1 ( 0x00 | 0x00 | 0x20 ) | 10, // 2 ( 0x80 | 0x00 | 0x20 ) | 9, // 3 ( 0x80 | 0x40 | 0x00 ) | 6, // 4 ( 0x00 | 0x40 | 0x00 ) | 6, // 5 LOCAL_EDGE_ENTRY, // 6 ( 0x80 | 0x00 | 0x00 ) | 6, // 7 ( 0x00 | 0x40 | 0x00 ) | 10, // 8 LOCAL_EDGE_ENTRY, // 9 LOCAL_EDGE_ENTRY, // 10 ( 0x80 | 0x00 | 0x00 ) | 9, // 11 }; // Same edge shared by four voxels. Default in CCW order when looking align the positive // direction of the axis. voxel_edge_index_type circular_edge_lut[3][4] = { {6, 7, 4, 5}, {9, 1, 3, 11}, {10, 8, 0, 2} }; const uint8_t VOXEL_NUM_LOCAL_EDGES = 3; voxel_edge_index_type voxel_local_edges[VOXEL_NUM_LOCAL_EDGES] = {6, 9, 10}; // LUT on device memory texture<iso_vertex_m_type, hipTextureType1D, hipReadModeElementType> config_edge_lut1_tex; texture<iso_vertex_m_type, hipTextureType1D, hipReadModeElementType> config_edge_lut2_tex; texture<uint8_t, hipTextureType1D, hipReadModeElementType> num_vertex_lut1_tex; texture<uint8_t, hipTextureType1D, hipReadModeElementType> num_vertex_lut2_tex; texture<voxel_config_type, hipTextureType1D, hipReadModeElementType> config_2B_3B_lut_tex; texture<voxel_face_index_type, hipTextureType1D, hipReadModeElementType> config_2B_3B_ambiguous_face_tex; texture<voxel_face_index_type, hipTextureType1D, hipReadModeElementType> opposite_face_lut_tex; texture<check_dir_type, hipTextureType1D, hipReadModeElementType> face_to_check_dir_lut_tex; texture<uint8_t, hipTextureType1D, hipReadModeElementType> edge_belonged_voxel_lut_tex; texture<voxel_edge_index_type, hipTextureType1D, hipReadModeElementType> circular_edge_lut_tex; texture<voxel_edge_index_type, hipTextureType1D, hipReadModeElementType> voxel_local_edges_tex; // A singleton class to hold all the device pointers needed by static LUTs. Saves trouble // for maintaining these pointers on the client side. class LutPtrsCollection { private: static std::unique_ptr<LutPtrsCollection> m_instance; public: static LutPtrsCollection* instance() { if (!m_instance) { m_instance = std::unique_ptr<LutPtrsCollection>(new LutPtrsCollection); } return m_instance.get(); } iso_vertex_m_type* d_config_edge_lut1; iso_vertex_m_type* d_config_edge_lut2; uint8_t* d_num_vertex_lut1; uint8_t* d_num_vertex_lut2; voxel_config_type* d_config_2B_3B_lut; voxel_face_index_type* d_config_2B_3B_ambiguous_face; voxel_face_index_type* d_opposite_face_lut; check_dir_type* d_face_to_check_dir_lut; uint8_t* d_edge_belonged_voxel_lut; voxel_edge_index_type* d_circular_edge_lut; voxel_edge_index_type* d_voxel_local_edges; }; std::unique_ptr<LutPtrsCollection> LutPtrsCollection::m_instance = nullptr; void setup_device_luts() { LutPtrsCollection* luts = LutPtrsCollection::instance(); setup_device_luts(&(luts->d_config_edge_lut1), &(luts->d_config_edge_lut2), &(luts->d_num_vertex_lut1), &(luts->d_num_vertex_lut2), &(luts->d_config_2B_3B_lut), &(luts->d_config_2B_3B_ambiguous_face), &(luts->d_opposite_face_lut), &(luts->d_face_to_check_dir_lut), &(luts->d_edge_belonged_voxel_lut), &(luts->d_circular_edge_lut), &(luts->d_voxel_local_edges)); } void setup_device_luts(iso_vertex_m_type** d_config_edge_lut1, iso_vertex_m_type** d_config_edge_lut2, uint8_t** d_num_vertex_lut1, uint8_t** d_num_vertex_lut2, voxel_config_type** d_config_2B_3B_lut, voxel_face_index_type** d_config_2B_3B_ambiguous_face, voxel_face_index_type** d_opposite_face_lut, check_dir_type** d_face_to_check_dir_lut, uint8_t** d_edge_belonged_voxel_lut, voxel_edge_index_type** d_circular_edge_lut, voxel_edge_index_type** d_voxel_local_edges) { const hipChannelFormatDesc channel_desc = hipCreateChannelDesc(8, 0, 0, 0, hipChannelFormatKindUnsigned); // setup for d_config_edge_lut1 2D array checkCudaErrors(hipMalloc(d_config_edge_lut1, sizeof(voxel_config_type) * NUM_CONFIGS * VOXEL_NUM_EDGES)); checkCudaErrors(hipMemcpy(*d_config_edge_lut1, (voxel_config_type*)(*config_edge_lut1), sizeof(voxel_config_type) * NUM_CONFIGS * VOXEL_NUM_EDGES, hipMemcpyHostToDevice)); checkCudaErrors(hipBindTexture(0, config_edge_lut1_tex, *d_config_edge_lut1, channel_desc)); // setup for d_config_edge_lut2 2D array checkCudaErrors(hipMalloc(d_config_edge_lut2, sizeof(voxel_config_type) * NUM_CONFIGS * VOXEL_NUM_EDGES)); checkCudaErrors(hipMemcpy(*d_config_edge_lut2, (voxel_config_type*)(*config_edge_lut2), sizeof(voxel_config_type) * NUM_CONFIGS * VOXEL_NUM_EDGES, hipMemcpyHostToDevice)); checkCudaErrors(hipBindTexture(0, config_edge_lut2_tex, *d_config_edge_lut2, channel_desc)); // setup for d_num_vertex_lut1 checkCudaErrors(hipMalloc(d_num_vertex_lut1, sizeof(uint8_t) * NUM_CONFIGS)); checkCudaErrors(hipMemcpy(*d_num_vertex_lut1, num_vertex_lut1, sizeof(uint8_t) * NUM_CONFIGS, hipMemcpyHostToDevice)); checkCudaErrors(hipBindTexture(0, num_vertex_lut1_tex, *d_num_vertex_lut1, channel_desc)); // setup for d_num_vertex_lut2 checkCudaErrors(hipMalloc(d_num_vertex_lut2, sizeof(uint8_t) * NUM_CONFIGS)); checkCudaErrors(hipMemcpy(*d_num_vertex_lut2, num_vertex_lut2, sizeof(uint8_t) * NUM_CONFIGS, hipMemcpyHostToDevice)); checkCudaErrors(hipBindTexture(0, num_vertex_lut2_tex, *d_num_vertex_lut2, channel_desc)); // setup for d_config_2B_3B_lut checkCudaErrors(hipMalloc(d_config_2B_3B_lut, sizeof(voxel_config_type) * NUM_AMBIGUOUS_CONFIGS)); checkCudaErrors(hipMemcpy(*d_config_2B_3B_lut, config_2B_3B_lut, sizeof(voxel_config_type) * NUM_AMBIGUOUS_CONFIGS, hipMemcpyHostToDevice)); checkCudaErrors(hipBindTexture(0, config_2B_3B_lut_tex, *d_config_2B_3B_lut, channel_desc)); // setup for d_config_2B_3B_ambiguous_face checkCudaErrors(hipMalloc(d_config_2B_3B_ambiguous_face, sizeof(voxel_face_index_type) * NUM_AMBIGUOUS_CONFIGS)); checkCudaErrors(hipMemcpy(*d_config_2B_3B_ambiguous_face, config_2B_3B_ambiguous_face, sizeof(voxel_face_index_type) * NUM_AMBIGUOUS_CONFIGS, hipMemcpyHostToDevice)); checkCudaErrors(hipBindTexture(0, config_2B_3B_ambiguous_face_tex, *d_config_2B_3B_ambiguous_face, channel_desc)); // setup for d_opposite_face_lut checkCudaErrors(hipMalloc(d_opposite_face_lut, sizeof(voxel_face_index_type) * VOXEL_NUM_FACES)); checkCudaErrors(hipMemcpy(*d_opposite_face_lut, opposite_face_lut, sizeof(voxel_face_index_type) * VOXEL_NUM_FACES, hipMemcpyHostToDevice)); checkCudaErrors(hipBindTexture(0, opposite_face_lut_tex, *d_opposite_face_lut, channel_desc)); // setup for d_face_to_check_dir_lut checkCudaErrors(hipMalloc(d_face_to_check_dir_lut, sizeof(check_dir_type) * VOXEL_NUM_FACES)); checkCudaErrors(hipMemcpy(*d_face_to_check_dir_lut, face_to_check_dir_lut, sizeof(check_dir_type) * VOXEL_NUM_FACES, hipMemcpyHostToDevice)); checkCudaErrors(hipBindTexture(0, face_to_check_dir_lut_tex, *d_face_to_check_dir_lut, channel_desc)); // setup for d_edge_belonged_voxel_lut checkCudaErrors(hipMalloc(d_edge_belonged_voxel_lut, sizeof(uint8_t) * VOXEL_NUM_EDGES)); checkCudaErrors(hipMemcpy(*d_edge_belonged_voxel_lut, edge_belonged_voxel_lut, sizeof(uint8_t) * VOXEL_NUM_EDGES, hipMemcpyHostToDevice)); checkCudaErrors(hipBindTexture(0, edge_belonged_voxel_lut_tex, *d_edge_belonged_voxel_lut, channel_desc)); // setup for d_circular_edge_lut checkCudaErrors(hipMalloc(d_circular_edge_lut, sizeof(voxel_edge_index_type) * 12)); checkCudaErrors(hipMemcpy(*d_circular_edge_lut, (voxel_edge_index_type*)(*circular_edge_lut), sizeof(voxel_edge_index_type) * 12, hipMemcpyHostToDevice)); checkCudaErrors(hipBindTexture(0, circular_edge_lut_tex, *d_circular_edge_lut, channel_desc)); // setup for d_voxel_local_edges checkCudaErrors(hipMalloc(d_voxel_local_edges, sizeof(voxel_edge_index_type) * VOXEL_NUM_LOCAL_EDGES)); checkCudaErrors(hipMemcpy(*d_voxel_local_edges, voxel_local_edges, sizeof(voxel_edge_index_type) * VOXEL_NUM_LOCAL_EDGES, hipMemcpyHostToDevice)); checkCudaErrors(hipBindTexture(0, voxel_local_edges_tex, *d_voxel_local_edges, channel_desc)); } void cleanup_device_luts() { LutPtrsCollection* luts = LutPtrsCollection::instance(); cleanup_device_luts(luts->d_config_edge_lut1, luts->d_config_edge_lut2, luts->d_num_vertex_lut1, luts->d_num_vertex_lut2, luts->d_config_2B_3B_lut, luts->d_config_2B_3B_ambiguous_face, luts->d_opposite_face_lut, luts->d_face_to_check_dir_lut, luts->d_edge_belonged_voxel_lut, luts->d_circular_edge_lut, luts->d_voxel_local_edges); } void cleanup_device_luts(iso_vertex_m_type* d_config_edge_lut1, iso_vertex_m_type* d_config_edge_lut2, uint8_t* d_num_vertex_lut1, uint8_t* d_num_vertex_lut2, voxel_config_type* d_config_2B_3B_lut, voxel_face_index_type* d_config_2B_3B_ambiguous_face, voxel_face_index_type* d_opposite_face_lut, check_dir_type* d_face_to_check_dir_lut, uint8_t* d_edge_belonged_voxel_lut, voxel_edge_index_type* d_circular_edge_lut, voxel_edge_index_type* d_voxel_local_edges) { checkCudaErrors(hipFree(d_config_edge_lut1)); checkCudaErrors(hipFree(d_config_edge_lut2)); checkCudaErrors(hipFree(d_num_vertex_lut1)); checkCudaErrors(hipFree(d_num_vertex_lut2)); checkCudaErrors(hipFree(d_config_2B_3B_lut)); checkCudaErrors(hipFree(d_config_2B_3B_ambiguous_face)); checkCudaErrors(hipFree(d_opposite_face_lut)); checkCudaErrors(hipFree(d_face_to_check_dir_lut)); checkCudaErrors(hipFree(d_edge_belonged_voxel_lut)); checkCudaErrors(hipFree(d_circular_edge_lut)); checkCudaErrors(hipFree(d_voxel_local_edges)); } // Stores the minimum required information for each voxel class _VoxelInfo { typedef uint8_t info_type; static const uint8_t EDGE_6_SHIFT = 0; static const uint8_t EDGE_9_SHIFT = 1; static const uint8_t EDGE_10_SHIFT = 2; static const uint8_t USE_LUT2_SHIFT = 7; public: __host__ __device__ _VoxelInfo() = default; __host__ __device__ _VoxelInfo(voxel_index1D_type index) : m_index1D(index), m_info(0) { } __host__ __device__ void encode_edge_is_bipolar(voxel_edge_index_type edge, bool is_bipolar) { uint8_t shift = get_edge_shift(edge); info_write_bit(shift, is_bipolar); } __host__ __device__ void encode_edge_bipolar_info(voxel_edge_index_type edge, bool is_bipolar, bool use_ccw) { uint8_t shift = get_edge_shift(edge); info_write_bit(shift, is_bipolar); if (is_bipolar) { shift += 3; info_write_bit(shift, use_ccw); } } __host__ __device__ bool is_edge_bipolar(voxel_edge_index_type edge) const { //if (edge != 6 && edge != 9 && edge != 10) { // printf("is_edge_bipolar: edge: %d\n", edge); //} uint8_t shift = get_edge_shift(edge); return (bool)info_read_bit(shift); } // An edge that is 'CCW' means the polarization direction of the edge aligns with the positive axis. // [precondition] 'edge' must be bipolar __host__ __device__ bool is_edge_ccw(voxel_edge_index_type edge) const { assert(is_edge_bipolar(edge)); uint8_t shift = get_edge_shift(edge) + 3; return (bool)info_read_bit(shift); // return true; } __host__ __device__ inline void encode_use_lut2(bool use_lut2) { info_write_bit(USE_LUT2_SHIFT, use_lut2); } __host__ __device__ inline bool use_lut2() const { return (bool)info_read_bit(USE_LUT2_SHIFT); } __host__ __device__ voxel_index1D_type index1D() const { return m_index1D; } __host__ __device__ voxel_config_type config() const { return m_config; } __host__ __device__ void set_config(voxel_config_type c) { m_config = c; } __host__ __device__ inline vertex_index_type vertex_begin() const { return m_vertex_begin; } __host__ __device__ void set_vertex_begin(vertex_index_type begin) { m_vertex_begin = begin; } __host__ __device__ uint8_t num_vertices() const { return m_num_vertices; } __host__ __device__ void set_num_vertices(uint8_t num) { m_num_vertices = num; } __host__ __device__ uint8_t info() const { return m_info; } __host__ __device__ uint8_t num_edge_vertices() const { uint8_t num = 0; num += info_read_bit(EDGE_6_SHIFT); num += info_read_bit(EDGE_9_SHIFT); num += info_read_bit(EDGE_10_SHIFT); return num; } __host__ __device__ inline uint8_t num_iso_vertices() const { // return use_lut2() ? num_vertex_lut2[config] : num_vertex_lut1[config]; return m_num_vertices - num_edge_vertices(); } __host__ __device__ inline vertex_index_type iso_vertex_begin() const { return vertex_begin(); } __host__ __device__ inline vertex_index_type iso_vertex_index(iso_vertex_m_type iso_vertex_m) const { assert(iso_vertex_m < num_iso_vertices()); return iso_vertex_begin() + iso_vertex_m; } __host__ __device__ inline vertex_index_type edge_vertex_begin() const { return m_vertex_begin + num_iso_vertices(); } __host__ __device__ vertex_index_type edge_vertex_index(voxel_edge_index_type edge) const { assert(is_edge_bipolar(edge)); uint8_t offset = 0; for (uint8_t i = 0; i < get_edge_shift(edge); ++i) { offset += info_read_bit(i); } return edge_vertex_begin() + offset; } __device__ iso_vertex_m_type iso_vertex_m_by_edge(voxel_edge_index_type edge) const { if (use_lut2()) { // return config_edge_lut2[m_config][edge]; return tex1Dfetch(config_edge_lut2_tex, m_config * VOXEL_NUM_EDGES + edge); } else { // return config_edge_lut1[m_config][edge]; return tex1Dfetch(config_edge_lut1_tex, m_config * VOXEL_NUM_EDGES + edge); } } private: __host__ __device__ uint8_t get_edge_shift(voxel_edge_index_type edge) const { uint8_t shift; switch (edge) { case 6: shift = EDGE_6_SHIFT; break; case 9: shift = EDGE_9_SHIFT; break; case 10: shift = EDGE_10_SHIFT; break; default: assert(false); break; } return shift; } __host__ __device__ void info_write_bit(uint8_t shift, bool flag) { info_type mask; if (flag) { mask = 0x01 << shift; m_info |= mask; } else { mask = ~(0x01 << shift); m_info &= mask; } } __host__ __device__ inline info_type info_read_bit(uint8_t shift) const { info_type shifted_info = m_info >> shift; return shifted_info & 0x01; } friend std::ostream& operator<<(std::ostream& os, const _VoxelInfo vx_info); // Its index_1D voxel_index1D_type m_index1D = INVALID_INDEX_1D; // The beginning index of the vertices (both DMC iso_vertex and iso-surface edge // intersection point). vertex_index_type m_vertex_begin = INVALID_UINT32; // The voxel config mask, each bit corresponds to one unique vertex corner point. // LSB (bit 0) represents corner pt 0, MSB (bit 7) represents corner pt 7 voxel_config_type m_config = 0x00; // Compact bit vector: // bit 7: should use LUT2? // bit 5: is edge 10 using ccw? // bit 4: is edge 9 using ccw? // bit 3: is edge 6 using ccw? // bit 2: is edge 10 bipolar? // bit 1: is edge 9 bipolar? // bit 0: is edge 6 bipolar? // other bits: not used info_type m_info = 0x00; // Since this class will be enforeced aligned, we can use another 8 bit to store the // number of vertices, although we can fully retrieve this information under the help // of both 'info' and other 'LUT's. 8_bit is quite enough because each voxel will have // a maximum of 4 + 3 = 7 vertices. (4 for DMC iso-vertices, 3 for bipolar edge pts) uint8_t m_num_vertices = 0; uint8_t m_pad = 0xff; }; std::ostream& operator<<(std::ostream& os, const _VoxelInfo vx_info) { os << "index1D: " << vx_info.index1D() << " config: " << std::hex <<(unsigned) vx_info.config() << std::dec << " num_vertices: " << (unsigned)vx_info.num_vertices() << " vertex_begin: " << vx_info.vertex_begin() << " info: " << std::hex << (unsigned)vx_info.m_info << std::dec; return os; } // Calculate a voxel's config mask. __device__ voxel_config_type voxel_config_mask(const float* d_voxel_vals, float iso_value) { voxel_config_type mask = 0; for (uint8_t i = 0; i < 8; ++i) { mask |= (d_voxel_vals[i] < iso_value) << i; } return mask; } __device__ bool is_out_of_grid_bound(const uint3& index3D, const uint3& grid_size) { return (index3D.x >= grid_size.x) || (index3D.y >= grid_size.y) || (index3D.z >= grid_size.z); } // Scan through and flag out the active voxels according to its voxel config. __global__ void flag_active_voxels_kern(flag_type* d_voxel_flags, const float* d_scalar_grid, const uint3 num_voxels_dim, const float iso_value) { uint3 index3D; index3D.x = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; index3D.y = __mul24(blockDim.y, blockIdx.y) + threadIdx.y; index3D.z = __mul24(blockDim.z, blockIdx.z) + threadIdx.z; if (is_out_of_grid_bound(index3D, num_voxels_dim)) return; float voxel_vals[8] = { d_scalar_grid[index3D_to_1D(index3D.x, index3D.y, index3D.z, num_voxels_dim.x + 1, num_voxels_dim.y + 1)], d_scalar_grid[index3D_to_1D(index3D.x + 1, index3D.y, index3D.z, num_voxels_dim.x + 1, num_voxels_dim.y + 1)], d_scalar_grid[index3D_to_1D(index3D.x + 1, index3D.y + 1, index3D.z, num_voxels_dim.x + 1, num_voxels_dim.y + 1)], d_scalar_grid[index3D_to_1D(index3D.x, index3D.y + 1, index3D.z, num_voxels_dim.x + 1, num_voxels_dim.y + 1)], d_scalar_grid[index3D_to_1D(index3D.x, index3D.y, index3D.z + 1, num_voxels_dim.x + 1, num_voxels_dim.y + 1)], d_scalar_grid[index3D_to_1D(index3D.x + 1, index3D.y, index3D.z + 1, num_voxels_dim.x + 1, num_voxels_dim.y + 1)], d_scalar_grid[index3D_to_1D(index3D.x + 1, index3D.y + 1, index3D.z + 1, num_voxels_dim.x + 1, num_voxels_dim.y + 1)], d_scalar_grid[index3D_to_1D(index3D.x, index3D.y + 1, index3D.z + 1, num_voxels_dim.x + 1, num_voxels_dim.y + 1)], }; voxel_config_type voxel_config = voxel_config_mask(voxel_vals, iso_value); unsigned index1D = index3D_to_1D(index3D, num_voxels_dim); d_voxel_flags[index1D] = (voxel_config && voxel_config < MAX_VOXEL_CONFIG_MASK); // printf("i: %d, j: %d, k: %d, index1D: %d, config: %x, flag: %d\n", // index3D.x, index3D.y, index3D.z, index1D, voxel_config, d_voxel_flags[index1D]); } void launch_flag_active_voxels(flag_type* d_voxel_flags, const float* d_scalar_grid, const uint3 num_voxels_dim, const float iso_value, const dim3 blocks_dim3, const dim3 threads_dim3) { hipLaunchKernelGGL(( flag_active_voxels_kern), dim3(blocks_dim3), dim3(threads_dim3), 0, 0, d_voxel_flags, d_scalar_grid, num_voxels_dim, iso_value); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); } size_t launch_thrust_count(const unsigned* d_arr, size_t size) { return thrust::reduce(thrust::device, d_arr, d_arr + size); } void launch_thrust_scan(unsigned* d_scan, const unsigned* d_data, size_t size) { thrust::exclusive_scan(thrust::device, d_data, d_data + size, d_scan); } template <typename OutT, typename InT, typename UnaryOp> void launch_thrust_transform_scan(OutT* d_scan, const InT* d_data, size_t size, const UnaryOp& op) { auto begin = thrust::make_transform_iterator(d_data, op); auto end = thrust::make_transform_iterator(d_data + size, op); thrust::exclusive_scan(thrust::device, begin, end, d_scan); } // Calculate the position of the isosurface's vertex __device__ float3 lerp_float3(const float3& p0, const float3& p1, const float v0, const float v1, const float iso_value) { float interp = (iso_value - v0) / (v1 - v0); float one_minus_interp = 1.0f - interp; float3 iso_vertex = p0 * one_minus_interp + p1 * interp; return iso_vertex; } // Check if an edge is bipolar given its two endpoints' value __device__ bool is_edge_bipolar(float val0, float val1, float iso_value) { if (val0 == val1) return false; else if (val0 > val1) return is_edge_bipolar(val1, val0, iso_value); return !((val0 < iso_value && val1 < iso_value) || (val0 > iso_value && val1 > iso_value)); } // Return an edge index given its two point indices __device__ voxel_edge_index_type pt_pair_edge_lut(voxel_pt_index_type p0, voxel_pt_index_type p1) { // assert(p0 != p1); if (p0 > p1) return pt_pair_edge_lut(p1, p0); if (p0 == 0 && p1 == 1) return 0; else if (p0 == 1 && p1 == 2) return 1; else if (p0 == 2 && p1 == 3) return 2; else if (p0 == 0 && p1 == 3) return 3; else if (p0 == 0 && p1 == 4) return 4; else if (p0 == 1 && p1 == 5) return 5; else if (p0 == 2 && p1 == 6) return 6; else if (p0 == 3 && p1 == 7) return 7; else if (p0 == 4 && p1 == 5) return 8; else if (p0 == 5 && p1 == 6) return 9; else if (p0 == 6 && p1 == 7) return 10; else if (p0 == 4 && p1 == 7) return 11; // assert(false); } // Compact to get the active voxels, for each compacted voxel, store its index_1D. // [invariant] for 0 <= i < d_compact_voxel_info.size(), // d_full_voxel_index_map[d_compact_voxel_info[i].index1D] == i __global__ void compact_voxel_flags_kern(_VoxelInfo* d_compact_voxel_info, voxel_index1D_type* d_full_voxel_index_map, const uint3 num_voxels_dim, const flag_type* d_flags, const unsigned* d_flags_scan, const unsigned flags_size) { // index = (gridDim.x * blockDim.x) * blockIdx.y + blockIdx.x * blockDim.x + threadIdx.x // unsigned index1D = __mul24(blockIdx.y, gridDim.x) + blockIdx.x; // index1D = __mul24(index1D, blockDim.x) + threadIdx.x; // if (index1D >= flags_size) return; uint3 index3D; index3D.x = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; index3D.y = __mul24(blockDim.y, blockIdx.y) + threadIdx.y; index3D.z = __mul24(blockDim.z, blockIdx.z) + threadIdx.z; if (is_out_of_grid_bound(index3D, num_voxels_dim)) return; unsigned index1D = index3D_to_1D(index3D, num_voxels_dim); unsigned compact_index = d_flags_scan[index1D]; // printf("index1D: %d, scan: %d, flag: %d\n", index1D, compact_index, d_flags[index1D]); if (d_flags[index1D]) { d_full_voxel_index_map[index1D] = compact_index; // d_flags_scan[index1D]; d_compact_voxel_info[compact_index] = _VoxelInfo(index1D); } } void launch_compact_voxel_flags(_VoxelInfo* d_compact_voxel_info, voxel_index1D_type* d_full_voxel_index_map, const uint3 num_voxels_dim, const flag_type* d_flags, const unsigned* d_flags_scan, const unsigned flags_size, const dim3 blocks_dim3, const dim3 threads_dim3) { hipLaunchKernelGGL(( compact_voxel_flags_kern), dim3(blocks_dim3), dim3(threads_dim3), 0, 0, d_compact_voxel_info, d_full_voxel_index_map, num_voxels_dim, d_flags, d_flags_scan, flags_size); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); } // Initialize the voxel info. During this stage we only store the voxel config and // the edges this voxel manages (edge 6, 9, 10) are bipolar. The possible situation // where voxels with 2B config and 3B config are adjacent are not resolved at this stage. __global__ void init_voxels_info_kern(_VoxelInfo* d_compact_voxel_info, const unsigned compact_size, const float* d_scalar_grid, const uint3 num_voxels_dim, const float iso_value) { unsigned compact_index = __mul24(blockIdx.y, gridDim.x) + blockIdx.x; compact_index = __mul24(compact_index, blockDim.x) + threadIdx.x; if (compact_index >= compact_size) return; _VoxelInfo vx_info(d_compact_voxel_info[compact_index]); uint3 index3D = index1D_to_3D(vx_info.index1D(), num_voxels_dim); float voxel_vals[8] = { d_scalar_grid[index3D_to_1D(index3D.x, index3D.y, index3D.z, num_voxels_dim.x + 1, num_voxels_dim.y + 1)], d_scalar_grid[index3D_to_1D(index3D.x + 1, index3D.y, index3D.z, num_voxels_dim.x + 1, num_voxels_dim.y + 1)], d_scalar_grid[index3D_to_1D(index3D.x + 1, index3D.y + 1, index3D.z, num_voxels_dim.x + 1, num_voxels_dim.y + 1)], d_scalar_grid[index3D_to_1D(index3D.x, index3D.y + 1, index3D.z, num_voxels_dim.x + 1, num_voxels_dim.y + 1)], d_scalar_grid[index3D_to_1D(index3D.x, index3D.y, index3D.z + 1, num_voxels_dim.x + 1, num_voxels_dim.y + 1)], d_scalar_grid[index3D_to_1D(index3D.x + 1, index3D.y, index3D.z + 1, num_voxels_dim.x + 1, num_voxels_dim.y + 1)], d_scalar_grid[index3D_to_1D(index3D.x + 1, index3D.y + 1, index3D.z + 1, num_voxels_dim.x + 1, num_voxels_dim.y + 1)], d_scalar_grid[index3D_to_1D(index3D.x, index3D.y + 1, index3D.z + 1, num_voxels_dim.x + 1, num_voxels_dim.y + 1)], }; vx_info.set_config(voxel_config_mask(voxel_vals, iso_value)); auto encode_voxel_edge_info = [=, &vx_info](voxel_pt_index_type p0, voxel_pt_index_type p1) { voxel_edge_index_type edge_index = pt_pair_edge_lut(p0, p1); bool is_bipolar = is_edge_bipolar(voxel_vals[p0], voxel_vals[p1], iso_value); if (is_bipolar) { bool use_ccw = voxel_vals[p0] <= iso_value; vx_info.encode_edge_bipolar_info(edge_index, is_bipolar, use_ccw); } else { vx_info.encode_edge_is_bipolar(edge_index, is_bipolar); } }; encode_voxel_edge_info(2, 6); // edge 6 encode_voxel_edge_info(5, 6); // edge 9 encode_voxel_edge_info(7, 6); // edge 10 d_compact_voxel_info[compact_index] = vx_info; // printf("compact index: %d, index1D: %d, config: %x, info: %x\n", // compact_index, vx_info.index1D(), (unsigned)vx_info.config(), (unsigned)vx_info.info()); } void launch_init_voxels_info(_VoxelInfo* d_compact_voxel_info, const unsigned compact_size, const float* d_scalar_grid, const uint3 num_voxels_dim, const float iso_value, const dim3 blocks_dim3, const dim3 threads_dim3) { hipLaunchKernelGGL(( init_voxels_info_kern), dim3(blocks_dim3), dim3(threads_dim3), 0, 0, d_compact_voxel_info, compact_size, d_scalar_grid, num_voxels_dim, iso_value); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); } // Check if the given voxel config belongs to 2B or 3B ambiguous config category. __device__ bool is_ambiguous_config(voxel_config_type config, uint8_t& index) { for (unsigned i = 0; i < NUM_AMBIGUOUS_CONFIGS; ++i) { // if (config_2B_3B_lut[i] == config) // if (d_config_2B_3B_lut[i] == config) if (tex1Dfetch(config_2B_3B_lut_tex, i) == config) { index = i; return true; } } return false; } // Check if after we advance the 'index3D' according to 'dir', the new result will // exceed the boundary or not. Have to use this function because we are using unsigned // int instead of int. __device__ bool will_exceed_boundary(uint3 index3D, uint3 dims, const check_dir_type dir) { switch (dir) { case POS_X_DIR: // CHECK_DIR::PX: return index3D.x + 1 >= dims.x; case NEG_X_DIR: // CHECK_DIR::NX: return index3D.x == 0; case POS_Y_DIR: // CHECK_DIR::PY: return index3D.y + 1 >= dims.y; case NEG_Y_DIR: // CHECK_DIR::NY: return index3D.y == 0; case POS_Z_DIR: // CHECK_DIR::PZ: return index3D.z + 1 >= dims.z; case NEG_Z_DIR: // CHECK_DIR::NZ: return index3D.z == 0; default: return false; } } // Execute the 'dir' on 'index3D' to get the new result. It is the user's responsibility // to make sure that the result won't exceed the boundary. __device__ uint3 get_index3D_by_dir(uint3 index3D, const check_dir_type dir) { switch (dir) { case POS_X_DIR: // CHECK_DIR::PX: return make_uint3(index3D.x + 1, index3D.y, index3D.z); case NEG_X_DIR: // CHECK_DIR::NX: return make_uint3(index3D.x - 1, index3D.y, index3D.z); case POS_Y_DIR: // CHECK_DIR::PY: return make_uint3(index3D.x, index3D.y + 1, index3D.z); case NEG_Y_DIR: // CHECK_DIR::NY: return make_uint3(index3D.x, index3D.y - 1, index3D.z); case POS_Z_DIR: // CHECK_DIR::PZ: return make_uint3(index3D.x, index3D.y, index3D.z + 1); case NEG_Z_DIR: // CHECK_DIR::NZ: return make_uint3(index3D.x, index3D.y, index3D.z - 1); } } // Check if the active voxel indicated by 'cur_compact_index' has an adjacent voxel which has // an ambiguous config that will result in non-manifold situation. // [precondition] d_compact_voxel_info[cur_compact_index].config == config_2B_3B_lut[cur_config_index] __device__ bool is_adjacent_ambiguous_config(voxel_index1D_type& adjacent_compact_index, voxel_index1D_type cur_index1D, uint8_t cur_config_index, const _VoxelInfo* d_compact_voxel_info, const voxel_index1D_type* d_full_voxel_index_map, const uint3& num_voxels_dim) { // assert(compact_voxel_info[cur_compact_index].config() == config_2B_3B_lut[cur_config_index]); // Get the 3D coordinate of the current active voxel uint3 cur_index3D = index1D_to_3D(cur_index1D, num_voxels_dim); // uint3 cur_index3D = index1D_to_3D(compact_voxel_info[cur_compact_index].index1D(), num_voxels_dim); // Get the checking direction, or offset, according to 'cur_ambiguous_face' // voxel_face_index_type cur_ambiguous_face = config_2B_3B_ambiguous_face[cur_config_index]; voxel_face_index_type cur_ambiguous_face = tex1Dfetch(config_2B_3B_ambiguous_face_tex, cur_config_index); // CHECK_DIR dir = face_to_check_dir_lut[cur_ambiguous_face]; check_dir_type dir = tex1Dfetch(face_to_check_dir_lut_tex, cur_ambiguous_face); if (will_exceed_boundary(cur_index3D, num_voxels_dim, dir)) { return false; } // Compute the index of the voxel to be checked in 'd_compact_voxel_info' uint3 index3D_to_check = get_index3D_by_dir(cur_index3D, dir); voxel_index1D_type index1D_to_check; index3D_to_1D(index3D_to_check, num_voxels_dim, index1D_to_check); voxel_index1D_type adjc_compact_index_to_check = d_full_voxel_index_map[index1D_to_check]; // assert(adjc_compact_index_to_check != INVALID_INDEX_1D); uint8_t adj_config_index; if (is_ambiguous_config(d_compact_voxel_info[adjc_compact_index_to_check].config(), adj_config_index)) { // voxel_face_index_type adj_ambiguous_face = config_2B_3B_ambiguous_face[adj_config_index]; // assert(opposite_face_lut[cur_ambiguous_face] == adj_ambiguous_face); adjacent_compact_index = adjc_compact_index_to_check; return true; } return false; } // Correct some of the voxels when it and its adjacent voxel are having ambiguous configs that will // result in non-manifold. Returns the actual number of vertices, including both iso-vertex and // intersection vertex between voxel bipolar edge and iso-surface. __global__ void correct_voxels_info_kern(_VoxelInfo* d_compact_voxel_info, unsigned compact_size, const voxel_index1D_type* d_full_voxel_index_map, const uint3 num_voxels_dim) { unsigned compact_index = __mul24(blockIdx.y, gridDim.x) + blockIdx.x; compact_index = __mul24(compact_index, blockDim.x) + threadIdx.x; if (compact_index >= compact_size) return; _VoxelInfo vx_info(d_compact_voxel_info[compact_index]); uint8_t ambiguous_config_index = INVALID_UINT8; // if ((vx_info.use_lut2()) || (!is_ambiguous_config(vx_info.config(), ambiguous_config_index))) if (!is_ambiguous_config(vx_info.config(), ambiguous_config_index)) { return; } voxel_index1D_type adjacent_compact_index; if (is_adjacent_ambiguous_config(adjacent_compact_index, vx_info.index1D(), ambiguous_config_index, d_compact_voxel_info, d_full_voxel_index_map, num_voxels_dim)) { printf("compact_index %d uses lut2!\n", compact_index); d_compact_voxel_info[compact_index].encode_use_lut2(true); // d_compact_voxel_info[adjacent_compact_index].encode_use_lut2(true); } } void launch_correct_voxels_info(_VoxelInfo* d_compact_voxel_info, unsigned num_compact_voxels, const voxel_index1D_type* d_full_voxel_index_map, const uint3 num_voxels_dim, const dim3 blocks_dim3, const dim3 threads_dim3) { hipLaunchKernelGGL(( correct_voxels_info_kern), dim3(blocks_dim3), dim3(threads_dim3), 0, 0, d_compact_voxel_info, num_compact_voxels, d_full_voxel_index_map, num_voxels_dim); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); } __global__ void calc_num_vertices_per_voxel_kern(_VoxelInfo* d_compact_voxel_info, unsigned compact_size) { unsigned compact_index = __mul24(blockIdx.y, gridDim.x) + blockIdx.x; compact_index = __mul24(compact_index, blockDim.x) + threadIdx.x; if (compact_index >= compact_size) return; _VoxelInfo vx_info(d_compact_voxel_info[compact_index]); uint8_t num_voxel_vertices = 0; // num iso-vertices if (vx_info.use_lut2()) { // num_voxel_vertices += num_vertex_lut2[vx_info.config()]; num_voxel_vertices += tex1Dfetch(num_vertex_lut2_tex, vx_info.config()); } else { // num_voxel_vertices += num_vertex_lut1[vx_info.config()]; num_voxel_vertices += tex1Dfetch(num_vertex_lut1_tex, vx_info.config()); } // num edge iso-surface intersection vertices num_voxel_vertices += vx_info.num_edge_vertices(); d_compact_voxel_info[compact_index].set_num_vertices(num_voxel_vertices); } void launch_calc_num_vertices_per_voxel(_VoxelInfo* d_compact_voxel_info, unsigned num_compact_voxels, const dim3 blocks_dim3, const dim3 threads_dim3) { hipLaunchKernelGGL(( calc_num_vertices_per_voxel_kern), dim3(blocks_dim3), dim3(threads_dim3), 0, 0, d_compact_voxel_info, num_compact_voxels); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); } __global__ void set_vertices_begin_kern(_VoxelInfo* d_compact_voxel_info, const vertex_index_type* d_vertices_begin_scan, unsigned compact_size) { unsigned compact_index = __mul24(blockIdx.y, gridDim.x) + blockIdx.x; compact_index = __mul24(compact_index, blockDim.x) + threadIdx.x; if (compact_index >= compact_size) return; d_compact_voxel_info[compact_index].set_vertex_begin(d_vertices_begin_scan[compact_index]); } void launch_set_vertices_begin(_VoxelInfo* d_compact_voxel_info, const vertex_index_type* d_vertices_begin_scan, unsigned compact_size, dim3 blocks_dim3, dim3 threads_dim3) { hipLaunchKernelGGL(( set_vertices_begin_kern), dim3(blocks_dim3), dim3(threads_dim3), 0, 0, d_compact_voxel_info, d_vertices_begin_scan, compact_size); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); } __device__ void decode_edge_belong_voxel_entry(uint8_t entry, int8_t& x_offset, int8_t& y_offset, int8_t& z_offset, uint8_t& belonged_edge_index) { if (entry == LOCAL_EDGE_ENTRY) return; // extract the edge belonged_edge_index = 0x0f & entry; auto get_offset = [](uint8_t first_bit) { switch (first_bit) { case 0x00: return (int8_t)0; case 0x80: return (int8_t)-1; default: assert(false); return (int8_t)0xff; } }; uint8_t first_bit = entry & 0x80; x_offset = get_offset(first_bit); entry <<= 1; first_bit = entry & 0x80; y_offset = get_offset(first_bit); entry <<= 1; first_bit = entry & 0x80; z_offset = get_offset(first_bit); } // Sample the intersection vertices positions between voxel bipolar edges and iso-surface. // Each voxel is only responsible for its local edges, namely 6, 9 and 10. __global__ void sample_edge_intersection_vertices_kern(float3* d_vertices, const _VoxelInfo* d_compact_voxel_info, const unsigned compact_size, const float* d_scalar_grid, const uint3 num_voxels_dim, const float3 xyz_min, const float3 xyz_range, const float iso_value) { unsigned compact_index = __mul24(blockIdx.y, gridDim.x) + blockIdx.x; compact_index = __mul24(compact_index, blockDim.x) + threadIdx.x; if (compact_index >= compact_size) return; uint3 index3D = index1D_to_3D(d_compact_voxel_info[compact_index].index1D(), num_voxels_dim); vertex_index_type vx_edge_vertex_index = d_compact_voxel_info[compact_index].edge_vertex_begin(); float x1 = ijk_to_xyz(index3D.x + 1, num_voxels_dim.x, xyz_range.x, xyz_min.x); float y1 = ijk_to_xyz(index3D.y + 1, num_voxels_dim.y, xyz_range.y, xyz_min.y); float z1 = ijk_to_xyz(index3D.z + 1, num_voxels_dim.z, xyz_range.z, xyz_min.z); float val6 = d_scalar_grid[index3D_to_1D(index3D.x + 1, index3D.y + 1, index3D.z + 1, num_voxels_dim.x + 1, num_voxels_dim.y + 1)]; float xyz_changed = ijk_to_xyz(index3D.z, num_voxels_dim.z, xyz_range.z, xyz_min.z); if (d_compact_voxel_info[compact_index].is_edge_bipolar(6)) { // edge 6, pt 2 & 6 d_vertices[vx_edge_vertex_index] = lerp_float3(make_float3(x1, y1, xyz_changed), make_float3(x1, y1, z1), d_scalar_grid[index3D_to_1D(index3D.x + 1, index3D.y + 1, index3D.z, num_voxels_dim.x + 1, num_voxels_dim.y + 1)], val6, iso_value); ++vx_edge_vertex_index; } xyz_changed = ijk_to_xyz(index3D.y, num_voxels_dim.y, xyz_range.y, xyz_min.y); if (d_compact_voxel_info[compact_index].is_edge_bipolar(9)) { // edge 9, pt 5 & 6 d_vertices[vx_edge_vertex_index] = lerp_float3(make_float3(x1, xyz_changed, z1), make_float3(x1, y1, z1), d_scalar_grid[index3D_to_1D(index3D.x + 1, index3D.y, index3D.z + 1, num_voxels_dim.x + 1, num_voxels_dim.y + 1)], val6, iso_value); ++vx_edge_vertex_index; } xyz_changed = ijk_to_xyz(index3D.x, num_voxels_dim.x, xyz_range.x, xyz_min.x); if (d_compact_voxel_info[compact_index].is_edge_bipolar(10)) { // edge 10, pt 6 & 7 d_vertices[vx_edge_vertex_index] = lerp_float3(make_float3(x1, y1, z1), make_float3(xyz_changed, y1, z1), val6, d_scalar_grid[index3D_to_1D(index3D.x, index3D.y + 1, index3D.z + 1, num_voxels_dim.x + 1, num_voxels_dim.y + 1)], iso_value); ++vx_edge_vertex_index; } } // Calculate the iso vertices positions in each voxel. __global__ void calc_iso_vertices_kern(float3* d_vertices, const _VoxelInfo* d_compact_voxel_info, const unsigned compact_size, const voxel_index1D_type* d_full_voxel_index_map, const uint3 num_voxels_dim) { unsigned compact_index = __mul24(blockIdx.y, gridDim.x) + blockIdx.x; compact_index = __mul24(compact_index, blockDim.x) + threadIdx.x; if (compact_index >= compact_size) return; extern __shared__ _VoxelInfo sh_vx_info[]; sh_vx_info[threadIdx.x] = d_compact_voxel_info[compact_index]; // _VoxelInfo vx_info(d_compact_voxel_info[compact_index]); uint3 index3D = index1D_to_3D(sh_vx_info[threadIdx.x].index1D(), num_voxels_dim); uint8_t iso_vertex_num_incident[4] = {0, 0, 0, 0}; for (voxel_edge_index_type edge = 0; edge < VOXEL_NUM_EDGES; ++edge) { iso_vertex_m_type iso_vertex_m = sh_vx_info[threadIdx.x].iso_vertex_m_by_edge(edge); if (iso_vertex_m == NO_VERTEX) { continue; } // uint8_t entry = edge_belonged_voxel_lut[edge]; uint8_t entry = tex1Dfetch(edge_belonged_voxel_lut_tex, edge); voxel_edge_index_type belonged_edge = 0xff; voxel_index1D_type belonged_index1D = INVALID_INDEX_1D; if (entry == LOCAL_EDGE_ENTRY) { // edge belongs to current voxel belonged_index1D = sh_vx_info[threadIdx.x].index1D(); belonged_edge = edge; } else { int8_t x_offset = 0xff, y_offset = 0xff, z_offset = 0xff; decode_edge_belong_voxel_entry(entry, x_offset, y_offset, z_offset, belonged_edge); bool exceed_boundary = (x_offset < 0 && index3D.x == 0) || (y_offset < 0 && index3D.y == 0) || (z_offset < 0 && index3D.z == 0); if (exceed_boundary) { continue; } belonged_index1D = index3D_to_1D(index3D.x + x_offset, index3D.y + y_offset, index3D.z + z_offset, num_voxels_dim.x, num_voxels_dim.y); } // Get the 'belonged_voxel' which manages 'belonged_edge' vertex_index_type edge_intersect_vertex_index = d_compact_voxel_info[d_full_voxel_index_map[belonged_index1D]].edge_vertex_index(belonged_edge); vertex_index_type iso_vertex_index = sh_vx_info[threadIdx.x].iso_vertex_index(iso_vertex_m); if (iso_vertex_num_incident[iso_vertex_m] == 0) { // If this is the first time we see 'iso_vertex_m', we just assign it d_vertices[iso_vertex_index] = d_vertices[edge_intersect_vertex_index]; } else { // Otherwise we increase it d_vertices[iso_vertex_index] += d_vertices[edge_intersect_vertex_index]; } ++iso_vertex_num_incident[iso_vertex_m]; } // For each iso-vertex managed by 'vx_info', calculate its new position by averaging its // associated edges intersection vertex positions. for (iso_vertex_m_type iso_vertex_m = 0; iso_vertex_m < sh_vx_info[threadIdx.x].num_iso_vertices(); ++iso_vertex_m) { vertex_index_type iso_vertex_index = sh_vx_info[threadIdx.x].iso_vertex_index(iso_vertex_m); if (iso_vertex_num_incident[iso_vertex_m]) { d_vertices[iso_vertex_index] /= (float)(iso_vertex_num_incident[iso_vertex_m]); } } } class CircularEdgeRange { public: class CircularEdgeIterator { public: typedef CircularEdgeIterator iterator_type; __device__ CircularEdgeIterator(voxel_edge_index_type edge, bool ccw) : m_lut_index(get_lut_index_by_edge(edge)), m_cur_state(0), m_ccw(ccw) { } // For end iterator __device__ CircularEdgeIterator(voxel_edge_index_type edge) : m_lut_index(get_lut_index_by_edge(edge)), m_cur_state(4), m_ccw(true) { } // We are using CircularEdgeIterator itself, it does not represent any data underlying it. However, // for range object to work in c++11, we have to define dereference opreator*(). Therefore we let // it to dereference to itself. __device__ const CircularEdgeIterator& operator*() const { return *this; } // We've been lazy here and only compares 'm_lut_index' and 'm_cur_state'. // It's not absolutely safe, but we don't expect the client should use this class at all! __device__ bool operator==(const iterator_type& other) const { return (m_lut_index == other.m_lut_index) && (m_cur_state == other.m_cur_state); } __device__ bool operator!=(const iterator_type& other) const { return !(this->operator==(other)); } __device__ iterator_type& operator++() { if (m_cur_state < 4) ++m_cur_state; return (*this); } // Retrieve the information of the adjacent voxel that shares the edge, along with // the edge index in that voxel, in circular order __device__ void retrieve(uint3& circular_index3D, voxel_edge_index_type& circular_edge, const uint3& src_index3D) const { // ccw order: 0, 1, 2, 3 // cw order: 0, 3, 2, 1 // cw[i] = (3 - ccw[i] + 1) % 4 = (4 - ccw[i]) % 4 if (m_ccw) { // circular_edge = circular_edge_lut[m_lut_index][ccw_order[m_cur_state]]; circular_edge = tex1Dfetch(circular_edge_lut_tex, m_lut_index + m_cur_state); } else { // circular_edge = circular_edge_lut[m_lut_index][cw_order[m_cur_state]]; circular_edge = tex1Dfetch(circular_edge_lut_tex, m_lut_index + ((4 - m_cur_state) % 4)); } // reverse calculate the adjacent voxel that shares the edge // uint8_t entry = edge_belonged_voxel_lut[circular_edge]; uint8_t entry = tex1Dfetch(edge_belonged_voxel_lut_tex, circular_edge); if (entry == LOCAL_EDGE_ENTRY) { circular_index3D = src_index3D; } else { int8_t x_offset, y_offset, z_offset; voxel_edge_index_type src_edge; decode_edge_belong_voxel_entry(entry, x_offset, y_offset, z_offset, src_edge); assert(get_lut_index_by_edge(src_edge) == m_lut_index); x_offset = -x_offset; y_offset = -y_offset; z_offset = -z_offset; circular_index3D = src_index3D; circular_index3D.x += x_offset; circular_index3D.y += y_offset; circular_index3D.z += z_offset; } } private: __device__ uint8_t get_lut_index_by_edge(voxel_edge_index_type edge) const { if (edge == 6) return 0; else if (edge == 9) return 4; // 1; else if (edge == 10) return 8; // 2; assert(false); } uint8_t m_lut_index; uint8_t m_cur_state; bool m_ccw; }; __device__ CircularEdgeRange(voxel_edge_index_type edge, bool ccw = true) : m_edge(edge), m_ccw(ccw) { } __device__ CircularEdgeIterator begin() const { return {m_edge, m_ccw}; } __device__ CircularEdgeIterator end() const { return {m_edge}; } private: uint8_t m_edge; bool m_ccw; }; // Check, when we want to retrieve all the four voxels sharing the same 'edge', if any of these voxels // will actually exceed the boundary. Notice that all the circular edges are carefully designed so that // the adjacent voxels will only increase their position along the positive axis direction. __device__ bool circular_edge_exceed_boundary(voxel_edge_index_type edge, const uint3& index3D, const uint3& num_voxels_dim) { switch (edge) { case 6: return (index3D.x + 1 >= num_voxels_dim.x) || (index3D.y + 1 >= num_voxels_dim.y); case 9: return (index3D.x + 1 >= num_voxels_dim.x) || (index3D.z + 1 >= num_voxels_dim.z); case 10: return (index3D.y + 1 >= num_voxels_dim.y) || (index3D.z + 1 >= num_voxels_dim.z); default: assert(false); } } __device__ void project_vertices_by_shared_edge(float2* projected_vertex_pos, voxel_edge_index_type edge, const vertex_index_type* iso_vertex_indices, const float3* compact_vertices) { if (edge == 6) { for (uint8_t i = 0; i < 4; ++i) { projected_vertex_pos[i] = xy(compact_vertices[iso_vertex_indices[i]]); } } else if (edge == 9) { for (uint8_t i = 0; i < 4; ++i) { projected_vertex_pos[i] = xz(compact_vertices[iso_vertex_indices[i]]); } } else if (edge == 10) { for (uint8_t i = 0; i < 4; ++i) { projected_vertex_pos[i] = yz(compact_vertices[iso_vertex_indices[i]]); } } else { assert(false); } } inline __device__ int8_t calc_cross_z_sign(const float2& p_left, const float2& p_mid, const float2& p_right) { float dx1 = p_right.x - p_mid.x, dy1 = p_right.y - p_mid.y; float dx2 = p_left.x - p_mid.x, dy2 = p_left.y - p_mid.y; float cross_z = dx1 * dy2 - dx2 * dy1; return cross_z >= 0 ? 1 : -1; } __device__ void calc_quadrilateral_signs(const float2* pts, uint8_t& pos_info, uint8_t& neg_info) { pos_info = 0x00; neg_info = 0x00; auto encode_sign_info = [&](uint8_t& info, uint8_t index) { // info: // bit 3-0, count of pos/neg signs // bit 7-4, index info &= 0x0f; info += 1; index = (index & 0x0f) << 4; info |= index; }; auto calc_sign = [&](uint8_t index) { int8_t sign = calc_cross_z_sign(pts[(index + 4 - 1) % 4], pts[index], pts[(index + 1) % 4]); if (sign == 1) { encode_sign_info(pos_info, index); } else { encode_sign_info(neg_info, index); } }; for (uint8_t i = 0; i < 4; ++i) { calc_sign(i); } } // The only case for this is when (pos_info & 0x0f) == (pos_info & 0x0f) == 2 __device__ bool is_quadrilateral_complex(uint8_t pos_info, uint8_t neg_info) { return (pos_info & 0x0f) == (neg_info & 0x0f); } // is_quadrilateral_convex function acts a bit weird. It tests if the four points // in 'pts' form a convex quadrilateral. If they does, then 'split_index' will not // be changed. Otherwise if they form a concave quadrilateral, 'split_index' stores // the index of the point (in range [0, 3]) that causes the concavity. __device__ bool is_quadrilateral_convex(uint8_t pos_info, uint8_t neg_info, uint8_t& unique_index) { if (((pos_info & 0x0f) == 0) || ((neg_info & 0x0f) == 0)) { return true; } else if ((pos_info & 0x0f) < (neg_info & 0x0f)) { unique_index = (pos_info & 0xf0) >> 4; } else if ((neg_info & 0x0f) < (pos_info & 0x0f)) { unique_index = (neg_info & 0xf0) >> 4; } else { assert(false); } return false; } __device__ bool is_quadrilateral_convex(const float2* pts, uint8_t& unique_index) { uint8_t pos_info = 0x00, neg_info = 0x00; calc_quadrilateral_signs(pts, pos_info, neg_info); return is_quadrilateral_convex(pos_info, neg_info, unique_index); } __device__ float calc_radian(const float2& p_left, const float2& p_mid, const float2& p_right) { float2 v_ml = p_left - p_mid; normalize(v_ml); float2 v_mr = p_right - p_mid; normalize(v_mr); float theta = acosf(v_ml.x * v_mr.x + v_ml.y * v_mr.y); return theta; } __device__ void find_quadrilateral_split(const float2* pts, uint8_t pos_info, uint8_t neg_info, uint8_t& split0, uint8_t& split1) { uint8_t split_index; if (is_quadrilateral_convex(pos_info, neg_info, split_index)) { // If it is convex, then we split the quadrilateral with the diagonal that connects the // point that forms the largest angle. float radians[4] = { calc_radian(pts[3], pts[0], pts[1]), calc_radian(pts[0], pts[1], pts[2]), calc_radian(pts[1], pts[2], pts[3]), calc_radian(pts[2], pts[3], pts[0]) }; uint8_t max_radian_index = 0; for (uint8_t i = 1; i < 4; ++i) { if (radians[i] > radians[max_radian_index]) max_radian_index = i; } split_index = max_radian_index; // split_index = (uint8_t)argmax(radian0, radian1, radian2, radian3); } split0 = split_index; split1 = (split0 + 2) % 4; // pts.size(); } __device__ void find_quadrilateral_split(const float2* pts, uint8_t& split0, uint8_t& split1) { uint8_t pos_info = 0x00, neg_info = 0x00; calc_quadrilateral_signs(pts, pos_info, neg_info); find_quadrilateral_split(pts, pos_info, neg_info, split0, split1); } __device__ void get_circular_vertices_by_edge(vertex_index_type* iso_vertex_indices, const voxel_edge_index_type edge, const uint3& index3D, const _VoxelInfo& vx_info, const _VoxelInfo* d_compact_voxel_info, const voxel_index1D_type* d_full_voxel_index_map, const uint3& num_voxels_dim) { uint8_t iter = 0; for (auto circular_edge_iter : CircularEdgeRange(edge, vx_info.is_edge_ccw(edge))) { uint3 circular_index3D; voxel_edge_index_type circular_edge; circular_edge_iter.retrieve(circular_index3D, circular_edge, index3D); voxel_index1D_type circular_index1D = index3D_to_1D(circular_index3D, num_voxels_dim); assert(d_full_voxel_index_map[circular_index1D] != INVALID_INDEX_1D); const _VoxelInfo& circular_vx_info = d_compact_voxel_info[d_full_voxel_index_map[circular_index1D]]; iso_vertex_m_type circular_iso_vertex_m = circular_vx_info.iso_vertex_m_by_edge(circular_edge); assert(circular_iso_vertex_m != NO_VERTEX); vertex_index_type circular_iso_vertex_index = circular_vx_info.iso_vertex_index(circular_iso_vertex_m); iso_vertex_indices[iter] = circular_iso_vertex_index; ++iter; } } template <typename Vec> __device__ bool is_inside_triangle(const Vec& p0, const Vec& p1, const Vec& p2, const Vec& pt, float& alpha, float& beta, float& gamma) { Vec v0(p1 - p0), v1(p2 - p0), v2(pt - p0); float d00 = dot(v0, v0); float d10 = dot(v1, v0); float d11 = dot(v1, v1); float d20 = dot(v2, v0); float d21 = dot(v2, v1); float denom_inv = d00 * d11 - d10 * d10; denom_inv = 1.0f / denom_inv; beta = (d11 * d20 - d10 * d21) * denom_inv; gamma = (d00 * d21 - d10 * d20) * denom_inv; alpha = 1.0f - beta - gamma; return (-1e-4 < beta) && (-1e-4 < gamma) && (beta + gamma < 1.0 + 1e-4); } __global__ void smooth_edge_vertices(float3* d_vertices, const _VoxelInfo* d_compact_voxel_info, const unsigned compact_size, const voxel_index1D_type* d_full_voxel_index_map, const float3 xyz_min, const float3 xyz_range, const uint3 num_voxels_dim) { unsigned compact_index = __mul24(blockIdx.y, gridDim.x) + blockIdx.x; compact_index = __mul24(compact_index, blockDim.x) + threadIdx.x; if (compact_index >= compact_size) return; extern __shared__ float2 sh_projected_vertex_pos[]; float2* projected_vertex_pos = sh_projected_vertex_pos + threadIdx.x * 4; _VoxelInfo vx_info(d_compact_voxel_info[compact_index]); uint3 index3D = index1D_to_3D(vx_info.index1D(), num_voxels_dim); for (uint8_t edge_iter = 0; edge_iter < VOXEL_NUM_LOCAL_EDGES; ++edge_iter) { // voxel_edge_index_type edge = voxel_local_edges[edge_iter]; voxel_edge_index_type edge = tex1Dfetch(voxel_local_edges_tex, edge_iter); if ((!vx_info.is_edge_bipolar(edge)) || circular_edge_exceed_boundary(edge, index3D, num_voxels_dim)) { continue; } vertex_index_type iso_vertex_indices[4] = {INVALID_UINT8, INVALID_UINT8, INVALID_UINT8, INVALID_UINT8}; get_circular_vertices_by_edge(iso_vertex_indices, edge, index3D, vx_info, d_compact_voxel_info, d_full_voxel_index_map, num_voxels_dim); project_vertices_by_shared_edge(projected_vertex_pos, edge, iso_vertex_indices, d_vertices); uint8_t pos_info = 0x00, neg_info = 0x00; calc_quadrilateral_signs(projected_vertex_pos, pos_info, neg_info); if (is_quadrilateral_complex(pos_info, neg_info)) { continue; } uint8_t split0 = INVALID_UINT8, split1 = INVALID_UINT8; find_quadrilateral_split(projected_vertex_pos, pos_info, neg_info, split0, split1); float x1 = ijk_to_xyz(index3D.x + 1, num_voxels_dim.x, xyz_range.x, xyz_min.x); float y1 = ijk_to_xyz(index3D.y + 1, num_voxels_dim.y, xyz_range.y, xyz_min.y); float z1 = ijk_to_xyz(index3D.z + 1, num_voxels_dim.z, xyz_range.z, xyz_min.z); float2 origin; if (edge == 6) origin = make_float2(x1, y1); else if (edge == 9) origin = make_float2(x1, z1); else origin = make_float2(y1, z1); float alpha, beta, gamma; if (is_inside_triangle(projected_vertex_pos[split0], projected_vertex_pos[(split0 + 1) % 4], projected_vertex_pos[split1], origin, alpha, beta, gamma)) { float3& edge_vertex = d_vertices[vx_info.edge_vertex_index(edge)]; edge_vertex = alpha * d_vertices[iso_vertex_indices[split0]]; edge_vertex += beta * d_vertices[iso_vertex_indices[(split0 + 1) % 4]]; edge_vertex += gamma * d_vertices[iso_vertex_indices[split1]]; } else if (is_inside_triangle(projected_vertex_pos[split1], projected_vertex_pos[(split1 + 1) % 4], projected_vertex_pos[split0], origin, alpha, beta, gamma)) { float3& edge_vertex = d_vertices[vx_info.edge_vertex_index(edge)]; edge_vertex = alpha * d_vertices[iso_vertex_indices[split1]]; edge_vertex += beta * d_vertices[iso_vertex_indices[(split1 + 1) % 4]]; edge_vertex += gamma * d_vertices[iso_vertex_indices[split0]]; } } } __global__ void calc_num_triangles_per_voxel_kern(unsigned* d_num_triangles, const _VoxelInfo* d_compact_voxel_info, const unsigned compact_size, const uint3 num_voxels_dim) { unsigned compact_index = __mul24(blockIdx.y, gridDim.x) + blockIdx.x; compact_index = __mul24(compact_index, blockDim.x) + threadIdx.x; if (compact_index >= compact_size) return; _VoxelInfo vx_info(d_compact_voxel_info[compact_index]); uint3 index3D = index1D_to_3D(vx_info.index1D(), num_voxels_dim); uint8_t vx_num_triangles = 0; for (uint8_t edge_iter = 0; edge_iter < VOXEL_NUM_LOCAL_EDGES; ++edge_iter) { // voxel_edge_index_type edge = voxel_local_edges[edge_iter]; voxel_edge_index_type edge = tex1Dfetch(voxel_local_edges_tex, edge_iter); if ((!vx_info.is_edge_bipolar(edge)) || circular_edge_exceed_boundary(edge, index3D, num_voxels_dim)) { continue; } vx_num_triangles += 2; } d_num_triangles[compact_index] = (unsigned)vx_num_triangles; } // Genreate the actual triangles information of the mesh. __global__ void generate_triangles_kern(uint3* d_triangles, const unsigned* d_triangles_scan, const _VoxelInfo* d_compact_voxel_info, const unsigned compact_size, const voxel_index1D_type* d_full_voxel_index_map, const uint3 num_voxels_dim) { unsigned compact_index = __mul24(blockIdx.y, gridDim.x) + blockIdx.x; compact_index = __mul24(compact_index, blockDim.x) + threadIdx.x; if (compact_index >= compact_size) return; _VoxelInfo vx_info(d_compact_voxel_info[compact_index]); uint3 index3D = index1D_to_3D(vx_info.index1D(), num_voxels_dim); unsigned vx_triangle_index = d_triangles_scan[compact_index]; for (uint8_t edge_iter = 0; edge_iter < VOXEL_NUM_LOCAL_EDGES; ++edge_iter) { // voxel_edge_index_type edge = voxel_local_edges[edge_iter]; voxel_edge_index_type edge = tex1Dfetch(voxel_local_edges_tex, edge_iter); if ((!vx_info.is_edge_bipolar(edge)) || circular_edge_exceed_boundary(edge, index3D, num_voxels_dim)) { continue; } vertex_index_type iso_vertex_indices[4] = {INVALID_UINT8, INVALID_UINT8, INVALID_UINT8, INVALID_UINT8}; get_circular_vertices_by_edge(iso_vertex_indices, edge, index3D, vx_info, d_compact_voxel_info, d_full_voxel_index_map, num_voxels_dim); uint3 triangle = make_uint3(iso_vertex_indices[0], iso_vertex_indices[1], iso_vertex_indices[2]); d_triangles[vx_triangle_index] = triangle; ++vx_triangle_index; triangle = make_uint3(iso_vertex_indices[2], iso_vertex_indices[3], iso_vertex_indices[0]); d_triangles[vx_triangle_index] = triangle; ++vx_triangle_index; } } inline void get_num_voxels_dim_from_scalar_grid(uint3& num_voxels_dim, const scalar_grid_type& h_scalar_grid) { num_voxels_dim.x = h_scalar_grid.dim_x() - 1; num_voxels_dim.y = h_scalar_grid.dim_y() - 1; num_voxels_dim.z = h_scalar_grid.dim_z() - 1; } class _VoxelInfoToNumVerticesUniOp { public: typedef _VoxelInfo argument_type; typedef unsigned result_type; __device__ result_type operator()(const argument_type& vx_info) const { return (unsigned)(vx_info.num_vertices()); } }; void run_dmc(std::vector<float3>& vertices, std::vector<uint3>& triangles, const scalar_grid_type& h_scalar_grid, const float3& xyz_min, const float3& xyz_max, float iso_value, unsigned num_smooth) { uint3 num_voxels_dim; get_num_voxels_dim_from_scalar_grid(num_voxels_dim, h_scalar_grid); const size_t num_total_voxels = num_voxels_dim.x * num_voxels_dim.y * num_voxels_dim.z; float* d_scalar_grid; checkCudaErrors(hipMalloc(&d_scalar_grid, sizeof(float) * h_scalar_grid.size())); checkCudaErrors(hipMemcpy(d_scalar_grid, h_scalar_grid.data(), sizeof(float) * h_scalar_grid.size(), hipMemcpyHostToDevice)); flag_type* d_voxel_flags; checkCudaErrors(hipMalloc(&d_voxel_flags, sizeof(flag_type) * num_total_voxels)); checkCudaErrors(hipMemset(d_voxel_flags, 0, sizeof(unsigned) * num_total_voxels)); dim3 threads_dim3(16, 16, 1); dim3 blocks_dim3((num_voxels_dim.x + threads_dim3.x - 1) / threads_dim3.x, (num_voxels_dim.y + threads_dim3.y - 1) / threads_dim3.y, (num_voxels_dim.z + threads_dim3.z - 1) / threads_dim3.z); launch_flag_active_voxels(d_voxel_flags, d_scalar_grid, num_voxels_dim, iso_value, blocks_dim3, threads_dim3); // print_d_arr(d_voxel_flags, num_total_voxels, "voxel flag: "); size_t num_compact_voxels = launch_thrust_count(d_voxel_flags, num_total_voxels); unsigned* d_voxel_flags_scan; checkCudaErrors(hipMalloc(&d_voxel_flags_scan, sizeof(unsigned) * num_total_voxels)); checkCudaErrors(hipMemset(d_voxel_flags_scan, 0, sizeof(unsigned) * num_total_voxels)); launch_thrust_scan(d_voxel_flags_scan, d_voxel_flags, num_total_voxels); // print_d_arr(d_voxel_flags_scan, num_total_voxels, "flags scan: "); // thrust::device_vector<_VoxelInfo> d_compact_voxel_info_vec(num_compact_voxels); _VoxelInfo* d_compact_voxel_info; // = thrust::raw_pointer_cast(d_compact_voxel_info_vec.data()); checkCudaErrors(hipMalloc(&d_compact_voxel_info, sizeof(_VoxelInfo) * num_compact_voxels)); checkCudaErrors(hipMemset(d_compact_voxel_info, 0xff, sizeof(_VoxelInfo) * num_compact_voxels)); voxel_index1D_type* d_full_voxel_index_map; checkCudaErrors(hipMalloc(&d_full_voxel_index_map, sizeof(voxel_index1D_type) * num_total_voxels)); checkCudaErrors(hipMemset(d_full_voxel_index_map, 0xff, sizeof(voxel_index1D_type) * num_total_voxels)); launch_compact_voxel_flags(d_compact_voxel_info, d_full_voxel_index_map, num_voxels_dim, d_voxel_flags, d_voxel_flags_scan, num_total_voxels, blocks_dim3, threads_dim3); // print_d_arr(d_full_voxel_index_map, num_total_voxels, "full voxel map: "); threads_dim3 = dim3(128, 1, 1); blocks_dim3 = dim3((num_compact_voxels + 127) / 128, 1, 1); while (blocks_dim3.x > 32768) { blocks_dim3.x /= 2; blocks_dim3.y *= 2; } checkCudaErrors(hipFree(d_voxel_flags)); checkCudaErrors(hipFree(d_voxel_flags_scan)); launch_init_voxels_info(d_compact_voxel_info, num_compact_voxels, d_scalar_grid, num_voxels_dim, iso_value, blocks_dim3, threads_dim3); launch_correct_voxels_info(d_compact_voxel_info, num_compact_voxels, d_full_voxel_index_map, num_voxels_dim, blocks_dim3, threads_dim3); launch_calc_num_vertices_per_voxel(d_compact_voxel_info, num_compact_voxels, blocks_dim3, threads_dim3); // print_d_arr(d_compact_voxel_info, num_compact_voxels, "vx_info: "); unsigned num_vertices = thrust::transform_reduce(thrust::device, d_compact_voxel_info, d_compact_voxel_info + num_compact_voxels, _VoxelInfoToNumVerticesUniOp(), 0, thrust::plus<unsigned>()); unsigned* d_vertices_begin_scan; checkCudaErrors(hipMalloc(&d_vertices_begin_scan, sizeof(unsigned) * num_compact_voxels)); checkCudaErrors(hipMemset(d_vertices_begin_scan, 0x00, sizeof(unsigned) * num_compact_voxels)); launch_thrust_transform_scan(d_vertices_begin_scan, d_compact_voxel_info, num_compact_voxels, _VoxelInfoToNumVerticesUniOp()); launch_set_vertices_begin(d_compact_voxel_info, d_vertices_begin_scan, num_compact_voxels, blocks_dim3, threads_dim3); // print_d_arr(d_vertices_begin_scan, num_compact_voxels, "vertices begin: "); checkCudaErrors(hipFree(d_vertices_begin_scan)); // print_d_arr(d_compact_voxel_info, num_compact_voxels, "vx_info: "); float3* d_vertices; checkCudaErrors(hipMalloc(&d_vertices, sizeof(float3) * num_vertices)); float3 xyz_range = xyz_max - xyz_min; hipLaunchKernelGGL(( sample_edge_intersection_vertices_kern), dim3(blocks_dim3), dim3(threads_dim3), 0, 0, d_vertices, d_compact_voxel_info, num_compact_voxels, d_scalar_grid, num_voxels_dim, xyz_min, xyz_range, iso_value); checkCudaErrors(hipFree(d_scalar_grid)); hipLaunchKernelGGL(( calc_iso_vertices_kern), dim3(blocks_dim3), dim3(threads_dim3), threads_dim3.x * sizeof(_VoxelInfo), 0, d_vertices, d_compact_voxel_info, num_compact_voxels, d_full_voxel_index_map, num_voxels_dim); for (unsigned smooth_iter = 0; smooth_iter < num_smooth; ++ smooth_iter) { hipLaunchKernelGGL(( smooth_edge_vertices), dim3(blocks_dim3), dim3(threads_dim3), threads_dim3.x * sizeof(float2) * 4, 0, d_vertices, d_compact_voxel_info, num_compact_voxels, d_full_voxel_index_map, xyz_min, xyz_range, num_voxels_dim); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); hipLaunchKernelGGL(( calc_iso_vertices_kern), dim3(blocks_dim3), dim3(threads_dim3), threads_dim3.x * sizeof(_VoxelInfo), 0, d_vertices, d_compact_voxel_info, num_compact_voxels, d_full_voxel_index_map, num_voxels_dim); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); std::cout << "done for smooth iteration: " << smooth_iter << std::endl; } // print_d_arr(d_vertices, num_vertices, "all vertices: "); unsigned* d_num_triangles; checkCudaErrors(hipMalloc(&d_num_triangles, sizeof(unsigned) * num_compact_voxels)); checkCudaErrors(hipMemset(d_num_triangles, 0, sizeof(unsigned) * num_compact_voxels)); hipLaunchKernelGGL(( calc_num_triangles_per_voxel_kern), dim3(blocks_dim3), dim3(threads_dim3), 0, 0, d_num_triangles, d_compact_voxel_info, num_compact_voxels, num_voxels_dim); size_t num_triangles = launch_thrust_count(d_num_triangles, num_compact_voxels); unsigned* d_triangles_scan; checkCudaErrors(hipMalloc(&d_triangles_scan, sizeof(unsigned) * num_compact_voxels)); checkCudaErrors(hipMemset(d_triangles_scan, 0, sizeof(unsigned) * num_compact_voxels)); launch_thrust_scan(d_triangles_scan, d_num_triangles, num_compact_voxels); checkCudaErrors(hipFree(d_num_triangles)); uint3* d_triangles; checkCudaErrors(hipMalloc(&d_triangles, sizeof(uint3) * num_triangles)); checkCudaErrors(hipMemset(d_triangles, 0xff, sizeof(uint3) * num_triangles)); hipLaunchKernelGGL(( generate_triangles_kern), dim3(blocks_dim3), dim3(threads_dim3), 0, 0, d_triangles, d_triangles_scan, d_compact_voxel_info, num_compact_voxels, d_full_voxel_index_map, num_voxels_dim); // print_d_arr(d_triangles, num_triangles, "all triangles: "); checkCudaErrors(hipFree(d_compact_voxel_info)); checkCudaErrors(hipFree(d_full_voxel_index_map)); checkCudaErrors(hipFree(d_triangles_scan)); vertices.clear(); triangles.clear(); vertices.resize(num_vertices); checkCudaErrors(hipMemcpy(vertices.data(), d_vertices, sizeof(float3) * num_vertices, hipMemcpyDeviceToHost)); checkCudaErrors(hipFree(d_vertices)); triangles.resize(num_triangles); checkCudaErrors(hipMemcpy(triangles.data(), d_triangles, sizeof(uint3) * num_triangles, hipMemcpyDeviceToHost)); checkCudaErrors(hipFree(d_triangles)); std::cout << "Dual Marching Cubes done!" << std::endl; } }; // namespace dmc /* class Isosurface { public: virtual ~Isosurface() = default; virtual float value(float x, float y, float z) const = 0; }; class SphereSurface : public Isosurface { public: float value(float x, float y, float z) const override { return sqrtf(x * x + y * y + z * z); } }; class GyroidSurface : public Isosurface { public: float value(float x, float y, float z) const override { return 2.0 * (cosf(x) * sinf(y) + cosf(y) * sinf(z) + cosf(z) * sinf(x)); } }; void dump_obj(const char* filename, const std::vector<float3>& compact_vertices, const std::vector<uint3>& compact_triangles) { std::ofstream of(filename); for (const auto& v : compact_vertices) of << "v " << v.x << " " << v.y << " " << v.z << std::endl; for (const auto& t: compact_triangles) of << "f " << t.x + 1 << " " << t.y + 1 << " " << t.z + 1 << std::endl; } void test_dmc() { using namespace utils; using namespace dmc; iso_vertex_m_type* d_config_edge_lut1, * d_config_edge_lut2; uint8_t* d_num_vertex_lut1, * d_num_vertex_lut2; voxel_config_type* d_config_2B_3B_lut; voxel_face_index_type* d_config_2B_3B_ambiguous_face; voxel_face_index_type* d_opposite_face_lut; check_dir_type* d_face_to_check_dir_lut; uint8_t* d_edge_belonged_voxel_lut; voxel_edge_index_type* d_circular_edge_lut; voxel_edge_index_type* d_voxel_local_edges; setup_device_luts(&d_config_edge_lut1, &d_config_edge_lut2, &d_num_vertex_lut1, &d_num_vertex_lut2, &d_config_2B_3B_lut, &d_config_2B_3B_ambiguous_face, &d_opposite_face_lut, &d_face_to_check_dir_lut, &d_edge_belonged_voxel_lut, &d_circular_edge_lut, &d_voxel_local_edges); SphereSurface surface; // GyroidSurface surface; float3 xyz_min = make_float3(-5, -5, -5); float3 xyz_max = make_float3(5, 5, 5); float3 xyz_range = xyz_max - xyz_min; float iso_value = 4.1f; unsigned resolution = 20; Array3D<float> scalar_grid(resolution + 1, resolution + 1, resolution + 1); for (unsigned k = 0; k < scalar_grid.dim_z(); ++k) { float z = ijk_to_xyz(k, resolution, xyz_range.z, xyz_min.z); for (unsigned j = 0; j < scalar_grid.dim_y(); ++j) { float y = ijk_to_xyz(j, resolution, xyz_range.y, xyz_min.y); for (unsigned i = 0; i < scalar_grid.dim_x(); ++i) { float x = ijk_to_xyz(i, resolution, xyz_range.x, xyz_min.x); scalar_grid(i, j, k) = surface.value(x, y, z); } } } std::vector<float3> compact_vertices; std::vector<uint3> compact_triangles; dmc::run_dmc(compact_vertices, compact_triangles, scalar_grid, xyz_min, xyz_max, iso_value, 15); dump_obj("sphere.obj", compact_vertices, compact_triangles); for (const auto& vertex : compact_vertices) { std::cout << "v " << vertex.x << " " << vertex.y << " " << vertex.z << std::endl; } for (const auto& tri : compact_triangles) { std::cout << "f " << tri.x+1 << " " << tri.y+1 << " " << tri.z+1 << std::endl; } } int main() { test_dmc(); return 0; } */
715d2ad002115ed4e04fbaf4fd028d6725a6ba24.cu
#include <cassert> #include <cmath> #include <algorithm> #include <memory> // std::unique_ptr #include <iostream> #include <thrust\execution_policy.h> #include <thrust\device_vector.h> #include <thrust\scan.h> #include <thrust\reduce.h> #include <thrust\count.h> #include "cuda_texture_types.h" // texture #include "texture_fetch_functions.h" // tex1Dfetch #include "helper_math.hpp" #include "dmc.hpp" template <typename T> void print_d_arr(const T* d_in, unsigned size, const std::string& prefix) { T* h_in = new T[size]; checkCudaErrors(cudaMemcpy(h_in, d_in, sizeof(T) * size, cudaMemcpyDeviceToHost)); std::cout << prefix << std::endl; for (unsigned i = 0; i < size; ++i) { std::cout << "[" << i << "] " << h_in[i] << std::endl; } delete[] h_in; } namespace dmc { #ifndef M_PI #define M_PI 3.14159265358979323846 #endif using namespace utils; // For each voxel config and an edge index, return the associated iso vertex in DMC. // This is LUT 1. Voxel with 3B config with its adjacent voxel being 2B config CANNOT use this LUT. const iso_vertex_m_type config_edge_lut1[NUM_CONFIGS][VOXEL_NUM_EDGES] = { { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, { 0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, { 0, 0, 0xff, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, { 0xff, 0, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, { 0xff, 0, 0, 0xff, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xff }, { 0, 0, 0, 0, 0, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xff }, { 0, 0xff, 0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff, 0xff, 0xff }, { 0xff, 0xff, 0, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0xff }, { 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff }, { 0, 0xff, 0, 0xff, 0, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff }, { 0, 0, 0, 0, 0xff, 0, 0xff, 0, 0xff, 0xff, 0xff, 0xff }, { 0xff, 0, 0, 0xff, 0, 0, 0xff, 0, 0xff, 0xff, 0xff, 0xff }, { 0xff, 0, 0xff, 0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff, 0xff }, { 0, 0, 0xff, 0xff, 0, 0xff, 0, 0, 0xff, 0xff, 0xff, 0xff }, { 0, 0xff, 0xff, 0, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff }, { 0xff, 0xff, 0xff, 0xff, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff }, { 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0, 0xff, 0xff, 0 }, { 0, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0xff, 0 }, { 0, 0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0, 0xff, 0xff, 0 }, { 0xff, 0, 0xff, 0, 0xff, 0, 0xff, 0xff, 0, 0xff, 0xff, 0 }, { 0xff, 0, 0, 0xff, 1, 0xff, 0, 0xff, 1, 0xff, 0xff, 1 }, { 0, 0, 0, 0, 0xff, 0xff, 0, 0xff, 0, 0xff, 0xff, 0 }, { 0, 0xff, 0, 0xff, 0, 0, 0, 0xff, 0, 0xff, 0xff, 0 }, { 0xff, 0xff, 0, 0, 0xff, 0, 0, 0xff, 0, 0xff, 0xff, 0 }, { 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0 }, { 0, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0, 0, 0xff, 0xff, 0 }, { 1, 0, 0, 1, 1, 0, 0xff, 0, 0, 0xff, 0xff, 0 }, { 0xff, 0, 0, 0xff, 0xff, 0, 0xff, 0, 0, 0xff, 0xff, 0 }, { 0xff, 0, 0xff, 0, 0, 0xff, 0, 0, 0, 0xff, 0xff, 0 }, { 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0 }, { 0, 0xff, 0xff, 0, 0, 1, 1, 1, 1, 0xff, 0xff, 1 }, { 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0, 0, 0, 0xff, 0xff, 0 }, { 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0xff, 0, 0, 0xff, 0xff }, { 0, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0, 0, 0xff, 0xff }, { 0, 0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0, 0xff, 0xff }, { 0xff, 0, 0xff, 0, 0, 0xff, 0xff, 0xff, 0, 0, 0xff, 0xff }, { 0xff, 0, 0, 0xff, 0xff, 0, 0, 0xff, 0, 0, 0xff, 0xff }, { 1, 1, 0, 0, 0, 1, 0, 0xff, 0, 0, 0xff, 0xff }, { 0, 0xff, 0, 0xff, 0xff, 0xff, 0, 0xff, 0, 0, 0xff, 0xff }, { 0xff, 0xff, 0, 0, 0, 0xff, 0, 0xff, 0, 0, 0xff, 0xff }, { 0xff, 0xff, 0, 0, 0xff, 1, 0xff, 0, 1, 1, 0xff, 0xff }, { 0, 0xff, 0, 0xff, 0, 0, 0xff, 0, 0, 0, 0xff, 0xff }, { 0, 0, 0, 0, 0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff }, { 0xff, 0, 0, 0xff, 0, 0xff, 0xff, 0, 0, 0, 0xff, 0xff }, { 0xff, 0, 0xff, 0, 0xff, 0, 0, 0, 0, 0, 0xff, 0xff }, { 0, 0, 0xff, 0xff, 1, 0, 1, 1, 1, 1, 0xff, 0xff }, { 0, 0xff, 0xff, 0, 0xff, 0xff, 0, 0, 0, 0, 0xff, 0xff }, { 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0, 0, 0, 0, 0xff, 0xff }, { 0xff, 0xff, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff, 0, 0xff, 0 }, { 0, 0xff, 0xff, 0, 0xff, 0, 0xff, 0xff, 0xff, 0, 0xff, 0 }, { 0, 0, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0 }, { 0xff, 0, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0 }, { 0xff, 0, 0, 0xff, 0, 0, 0, 0xff, 0xff, 0, 0xff, 0 }, { 0, 0, 1, 1, 0xff, 0, 1, 0xff, 0xff, 1, 0xff, 1 }, { 0, 0xff, 0, 0xff, 0, 0xff, 0, 0xff, 0xff, 0, 0xff, 0 }, { 0xff, 0xff, 0, 0, 0xff, 0xff, 0, 0xff, 0xff, 0, 0xff, 0 }, { 0xff, 0xff, 0, 0, 0, 0, 0xff, 0, 0xff, 0, 0xff, 0 }, { 0, 0xff, 0, 0xff, 0xff, 0, 0xff, 0, 0xff, 0, 0xff, 0 }, { 0, 1, 1, 0, 0, 0xff, 0xff, 1, 0xff, 1, 0xff, 1 }, { 0xff, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0, 0xff, 0 }, { 0xff, 0, 0xff, 0, 0, 0, 1, 1, 0xff, 1, 0xff, 1 }, { 1, 1, 0xff, 0xff, 0xff, 1, 0, 0, 0xff, 0, 0xff, 0 }, { 1, 0xff, 0xff, 1, 1, 0xff, 0, 0, 0xff, 0, 0xff, 0 }, { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0, 0xff, 0, 0xff, 0 }, { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0xff, 0, 0, 0xff }, { 0, 0xff, 0xff, 0, 0, 0xff, 1, 0xff, 0xff, 1, 1, 0xff }, { 0, 0, 0xff, 0xff, 0xff, 0, 0, 0xff, 0xff, 0, 0, 0xff }, { 0xff, 0, 0xff, 0, 0, 0, 0, 0xff, 0xff, 0, 0, 0xff }, { 0xff, 0, 0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0, 0xff }, { 0, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0, 0xff }, { 0, 0xff, 0, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0, 0, 0xff }, { 0xff, 0xff, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0, 0, 0xff }, { 0xff, 0xff, 0, 0, 0xff, 0xff, 0, 0, 0xff, 0, 0, 0xff }, { 0, 0xff, 0, 0xff, 0, 0xff, 0, 0, 0xff, 0, 0, 0xff }, { 0, 1, 1, 0, 0xff, 0, 1, 0, 0xff, 0, 0, 0xff }, { 0xff, 0, 0, 0xff, 1, 1, 0, 1, 0xff, 1, 1, 0xff }, { 0xff, 0, 0xff, 0, 0xff, 0xff, 0xff, 0, 0xff, 0, 0, 0xff }, { 0, 0, 0xff, 0xff, 0, 0xff, 0xff, 0, 0xff, 0, 0, 0xff }, { 0, 0xff, 0xff, 0, 0xff, 0, 0xff, 0, 0xff, 0, 0, 0xff }, { 0xff, 0xff, 0xff, 0xff, 0, 0, 0xff, 0, 0xff, 0, 0, 0xff }, { 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0, 0xff, 0, 0, 0, 0 }, { 0, 0xff, 0xff, 0, 0xff, 0xff, 0, 0xff, 0, 0, 0, 0 }, { 0, 0, 0xff, 0xff, 0, 1, 0, 0xff, 1, 1, 0, 0 }, { 0xff, 1, 0xff, 1, 0xff, 0, 1, 0xff, 0, 0, 1, 1 }, { 0xff, 0, 0, 0xff, 0, 0xff, 0xff, 0xff, 0, 0, 0, 0 }, { 0, 0, 1, 1, 0xff, 0xff, 0xff, 0xff, 0, 0, 1, 1 }, { 1, 0xff, 1, 0xff, 1, 0, 0xff, 0xff, 0, 0, 1, 1 }, { 0xff, 0xff, 0, 0, 0xff, 1, 0xff, 0xff, 1, 1, 0, 0 }, { 0xff, 0xff, 0, 0, 0, 0xff, 0, 1, 0, 0, 1, 1 }, { 1, 0xff, 1, 0xff, 0xff, 0xff, 1, 0, 1, 1, 0, 0 }, { 0, 1, 1, 0, 0, 2, 1, 3, 2, 2, 3, 3 }, { 0xff, 0, 0, 0xff, 0xff, 1, 0, 2, 1, 1, 2, 2 }, { 0xff, 1, 0xff, 1, 1, 0xff, 0xff, 0, 1, 1, 0, 0 }, { 0, 0, 0xff, 0xff, 0xff, 0xff, 0xff, 1, 0, 0, 1, 1 }, { 0, 0xff, 0xff, 0, 0, 2, 0xff, 1, 2, 2, 1, 1 }, { 0xff, 0xff, 0xff, 0xff, 0xff, 1, 0xff, 0, 1, 1, 0, 0 }, { 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0, 0xff, 0, 0xff, 0, 0xff }, { 0, 0xff, 0xff, 0, 0, 0, 0, 0xff, 0, 0xff, 0, 0xff }, { 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0, 0xff, 0, 0xff }, { 0xff, 0, 0xff, 0, 0, 0xff, 0, 0xff, 0, 0xff, 0, 0xff }, { 0xff, 0, 0, 0xff, 0xff, 0, 0xff, 0xff, 0, 0xff, 0, 0xff }, { 0, 0, 1, 1, 1, 0, 0xff, 0xff, 1, 0xff, 1, 0xff }, { 0, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0, 0xff }, { 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0, 0xff, 0, 0xff }, { 0xff, 0xff, 0, 0, 0xff, 0, 0, 0, 0, 0xff, 0, 0xff }, { 1, 0xff, 1, 0xff, 0, 1, 1, 0, 0, 0xff, 0, 0xff }, { 1, 0, 0, 1, 0xff, 0xff, 0, 1, 1, 0xff, 1, 0xff }, { 0xff, 1, 1, 0xff, 0, 0xff, 1, 0, 0, 0xff, 0, 0xff }, { 0xff, 0, 0xff, 0, 0xff, 0, 0xff, 0, 0, 0xff, 0, 0xff }, { 1, 1, 0xff, 0xff, 0, 1, 0xff, 0, 0, 0xff, 0, 0xff }, { 0, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0, 0, 0xff, 0, 0xff }, { 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0xff, 0, 0, 0xff, 0, 0xff }, { 0xff, 0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0, 0 }, { 0, 0xff, 0xff, 0, 0xff, 0, 0, 0xff, 0xff, 0xff, 0, 0 }, { 0, 0, 0xff, 0xff, 0, 0xff, 0, 0xff, 0xff, 0xff, 0, 0 }, { 0xff, 0, 0xff, 0, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0, 0 }, { 0xff, 0, 0, 0xff, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0 }, { 1, 1, 0, 0, 0xff, 1, 0xff, 0xff, 0xff, 0xff, 0, 0 }, { 0, 0xff, 0, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0 }, { 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0 }, { 0xff, 0xff, 1, 1, 1, 1, 1, 0, 0xff, 0xff, 0, 0 }, { 0, 0xff, 0, 0xff, 0xff, 0, 0, 1, 0xff, 0xff, 1, 1 }, { 1, 2, 2, 1, 1, 0xff, 2, 0, 0xff, 0xff, 0, 0 }, { 0xff, 0, 0, 0xff, 0xff, 0xff, 0, 1, 0xff, 0xff, 1, 1 }, { 0xff, 0, 0xff, 0, 0, 0, 0xff, 1, 0xff, 0xff, 1, 1 }, { 0, 0, 0xff, 0xff, 0xff, 0, 0xff, 1, 0xff, 0xff, 1, 1 }, { 1, 0xff, 0xff, 1, 1, 0xff, 0xff, 0, 0xff, 0xff, 0, 0 }, { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0xff, 0, 0 }, { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0xff, 0, 0 }, { 0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0, 0xff, 0xff, 0, 0 }, { 0, 0, 0xff, 0xff, 0xff, 0, 0xff, 1, 0xff, 0xff, 1, 1 }, { 0xff, 0, 0xff, 0, 0, 0, 0xff, 0, 0xff, 0xff, 0, 0 }, { 0xff, 0, 0, 0xff, 0xff, 0xff, 0, 0, 0xff, 0xff, 0, 0 }, { 0, 0, 1, 1, 0, 0xff, 0, 1, 0xff, 0xff, 0, 0 }, { 0, 0xff, 0, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0, 0 }, { 0xff, 0xff, 0, 0, 1, 1, 1, 0, 0xff, 0xff, 1, 1 }, { 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0 }, { 0, 0xff, 0, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0 }, { 0, 0, 0, 0, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0, 0 }, { 0xff, 0, 0, 0xff, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0 }, { 0xff, 0, 0xff, 0, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0, 0 }, { 0, 0, 0xff, 0xff, 0, 0xff, 0, 0xff, 0xff, 0xff, 0, 0 }, { 0, 0xff, 0xff, 0, 0xff, 0, 0, 0xff, 0xff, 0xff, 0, 0 }, { 0xff, 0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0, 0 }, { 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0xff, 0, 0, 0xff, 0, 0xff }, { 0, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0, 0, 0xff, 0, 0xff }, { 0, 0, 0xff, 0xff, 0, 0, 0xff, 0, 0, 0xff, 0, 0xff }, { 0xff, 0, 0xff, 0, 0xff, 0, 0xff, 0, 0, 0xff, 0, 0xff }, { 0xff, 0, 0, 0xff, 0, 0xff, 0, 0, 0, 0xff, 0, 0xff }, { 1, 1, 0, 0, 0xff, 0xff, 1, 0, 1, 0xff, 1, 0xff }, { 0, 0xff, 0, 0xff, 0, 1, 1, 0, 1, 0xff, 1, 0xff }, { 0xff, 0xff, 1, 1, 0xff, 0, 0, 1, 0, 0xff, 0, 0xff }, { 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0, 0xff, 0, 0xff }, { 0, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0, 0xff }, { 0, 1, 1, 0, 0, 1, 0xff, 0xff, 1, 0xff, 1, 0xff }, { 0xff, 0, 0, 0xff, 0xff, 0, 0xff, 0xff, 0, 0xff, 0, 0xff }, { 0xff, 0, 0xff, 0, 0, 0xff, 0, 0xff, 0, 0xff, 0, 0xff }, { 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0, 0xff, 0, 0xff }, { 1, 0xff, 0xff, 1, 1, 0, 0, 0xff, 0, 0xff, 0, 0xff }, { 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0, 0xff, 0, 0xff, 0, 0xff }, { 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0, 0, 0, 0, 0 }, { 0, 0xff, 0xff, 0, 1, 0, 0xff, 0, 1, 0, 0, 1 }, { 0, 0, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0, 0, 0, 0 }, { 0xff, 1, 0xff, 1, 0, 0xff, 0xff, 1, 0, 1, 1, 0 }, { 0xff, 0, 0, 0xff, 0xff, 0, 1, 0, 0, 1, 1, 0 }, { 0, 0, 1, 1, 3, 0, 2, 1, 3, 2, 2, 3 }, { 1, 0xff, 1, 0xff, 0xff, 0xff, 0, 1, 1, 0, 0, 1 }, { 0xff, 0xff, 0, 0, 2, 0xff, 1, 0, 2, 1, 1, 2 }, { 0xff, 0xff, 0, 0, 0xff, 0, 0xff, 0xff, 0, 0, 0, 0 }, { 1, 0xff, 1, 0xff, 0, 1, 0xff, 0xff, 0, 1, 1, 0 }, { 0, 1, 1, 0, 0xff, 0xff, 0xff, 0xff, 0, 1, 1, 0 }, { 0xff, 0, 0, 0xff, 1, 0xff, 0xff, 0xff, 1, 0, 0, 1 }, { 0xff, 1, 0xff, 1, 0xff, 1, 0, 0xff, 1, 0, 0, 1 }, { 0, 0, 0xff, 0xff, 1, 0, 2, 0xff, 1, 2, 2, 1 }, { 0, 0xff, 0xff, 0, 0xff, 0xff, 1, 0xff, 0, 1, 1, 0 }, { 0xff, 0xff, 0xff, 0xff, 1, 0xff, 0, 0xff, 1, 0, 0, 1 }, { 0xff, 0xff, 0xff, 0xff, 0, 0, 0xff, 0, 0xff, 0, 0, 0xff }, { 0, 0xff, 0xff, 0, 0xff, 0, 0xff, 0, 0xff, 0, 0, 0xff }, { 0, 0, 0xff, 0xff, 0, 0xff, 0xff, 0, 0xff, 0, 0, 0xff }, { 0xff, 0, 0xff, 0, 0xff, 0xff, 0xff, 0, 0xff, 0, 0, 0xff }, { 0xff, 1, 1, 0xff, 1, 1, 0, 1, 0xff, 0, 0, 0xff }, { 2, 2, 1, 1, 0xff, 2, 0, 1, 0xff, 0, 0, 0xff }, { 0, 0xff, 0, 0xff, 0, 0xff, 1, 0, 0xff, 1, 1, 0xff }, { 0xff, 0xff, 0, 0, 0xff, 0xff, 1, 0, 0xff, 1, 1, 0xff }, { 0xff, 0xff, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0, 0, 0xff }, { 0, 0xff, 0, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0, 0, 0xff }, { 1, 0, 0, 1, 1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0xff }, { 0xff, 0, 0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0, 0xff }, { 0xff, 0, 0xff, 0, 0, 0, 1, 0xff, 0xff, 1, 1, 0xff }, { 1, 1, 0xff, 0xff, 0xff, 1, 0, 0xff, 0xff, 0, 0, 0xff }, { 0, 0xff, 0xff, 0, 0, 0xff, 1, 0xff, 0xff, 1, 1, 0xff }, { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0xff, 0, 0, 0xff }, { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0, 0xff, 0, 0xff, 0 }, { 0, 0xff, 0xff, 0, 0, 0xff, 0, 0, 0xff, 0, 0xff, 0 }, { 0, 0, 0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0, 0xff, 0 }, { 0xff, 1, 0xff, 1, 0, 0, 1, 1, 0xff, 0, 0xff, 0 }, { 0xff, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0, 0xff, 0 }, { 1, 1, 0, 0, 1, 0xff, 0xff, 0, 0xff, 1, 0xff, 1 }, { 0, 0xff, 0, 0xff, 0xff, 0, 0xff, 0, 0xff, 0, 0xff, 0 }, { 0xff, 0xff, 1, 1, 0, 0, 0xff, 1, 0xff, 0, 0xff, 0 }, { 0xff, 0xff, 0, 0, 0xff, 0xff, 0, 0xff, 0xff, 0, 0xff, 0 }, { 0, 0xff, 0, 0xff, 0, 0xff, 0, 0xff, 0xff, 0, 0xff, 0 }, { 1, 0, 0, 1, 0xff, 1, 0, 0xff, 0xff, 1, 0xff, 1 }, { 0xff, 1, 1, 0xff, 0, 0, 1, 0xff, 0xff, 0, 0xff, 0 }, { 0xff, 0, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0 }, { 0, 0, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0 }, { 0, 0xff, 0xff, 0, 0xff, 0, 0xff, 0xff, 0xff, 0, 0xff, 0 }, { 0xff, 0xff, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff, 0, 0xff, 0 }, { 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0, 0, 0, 0, 0xff, 0xff }, { 0, 0xff, 0xff, 0, 0xff, 0xff, 0, 0, 0, 0, 0xff, 0xff }, { 1, 1, 0xff, 0xff, 1, 0, 1, 1, 0, 0, 0xff, 0xff }, { 0xff, 0, 0xff, 0, 0xff, 1, 0, 0, 1, 1, 0xff, 0xff }, { 0xff, 0, 0, 0xff, 0, 0xff, 0xff, 0, 0, 0, 0xff, 0xff }, { 0, 0, 1, 1, 0xff, 0xff, 0xff, 1, 0, 0, 0xff, 0xff }, { 0, 0xff, 0, 0xff, 0, 1, 0xff, 0, 1, 1, 0xff, 0xff }, { 0xff, 0xff, 0, 0, 0xff, 1, 0xff, 0, 1, 1, 0xff, 0xff }, { 0xff, 0xff, 0, 0, 0, 0xff, 0, 0xff, 0, 0, 0xff, 0xff }, { 0, 0xff, 0, 0xff, 0xff, 0xff, 0, 0xff, 0, 0, 0xff, 0xff }, { 1, 0, 0, 1, 1, 2, 0, 0xff, 2, 2, 0xff, 0xff }, { 0xff, 0, 0, 0xff, 0xff, 1, 0, 0xff, 1, 1, 0xff, 0xff }, { 0xff, 0, 0xff, 0, 0, 0xff, 0xff, 0xff, 0, 0, 0xff, 0xff }, { 0, 0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0, 0xff, 0xff }, { 1, 0xff, 0xff, 1, 1, 0, 0xff, 0xff, 0, 0, 0xff, 0xff }, { 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0xff, 0, 0, 0xff, 0xff }, { 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0, 0, 0, 0xff, 0xff, 0 }, { 1, 0xff, 0xff, 1, 0, 1, 1, 1, 0, 0xff, 0xff, 0 }, { 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0 }, { 0xff, 0, 0xff, 0, 1, 0xff, 0, 0, 1, 0xff, 0xff, 1 }, { 0xff, 0, 0, 0xff, 0xff, 0, 0xff, 0, 0, 0xff, 0xff, 0 }, { 2, 2, 0, 0, 1, 2, 0xff, 0, 1, 0xff, 0xff, 1 }, { 0, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0, 0, 0xff, 0xff, 0 }, { 0xff, 0xff, 1, 1, 0, 0xff, 0xff, 1, 0, 0xff, 0xff, 0 }, { 0xff, 0xff, 0, 0, 0xff, 0, 0, 0xff, 0, 0xff, 0xff, 0 }, { 0, 0xff, 0, 0xff, 1, 0, 0, 0xff, 1, 0xff, 0xff, 1 }, { 0, 1, 1, 0, 0xff, 0xff, 1, 0xff, 0, 0xff, 0xff, 0 }, { 0xff, 0, 0, 0xff, 1, 0xff, 0, 0xff, 1, 0xff, 0xff, 1 }, { 0xff, 0, 0xff, 0, 0xff, 0, 0xff, 0xff, 0, 0xff, 0xff, 0 }, { 0, 0, 0xff, 0xff, 1, 0, 0xff, 0xff, 1, 0xff, 0xff, 1 }, { 0, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0xff, 0 }, { 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0, 0xff, 0xff, 0 }, { 0xff, 0xff, 0xff, 0xff, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff }, { 0, 0xff, 0xff, 0, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff }, { 0, 0, 0xff, 0xff, 0, 0xff, 0, 0, 0xff, 0xff, 0xff, 0xff }, { 0xff, 0, 0xff, 0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff, 0xff }, { 0xff, 0, 0, 0xff, 0, 0, 0xff, 0, 0xff, 0xff, 0xff, 0xff }, { 0, 0, 1, 1, 0xff, 0, 0xff, 1, 0xff, 0xff, 0xff, 0xff }, { 0, 0xff, 0, 0xff, 0, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff }, { 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff }, { 0xff, 0xff, 0, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0xff }, { 0, 0xff, 0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff, 0xff, 0xff }, { 1, 0, 0, 1, 1, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xff }, { 0xff, 0, 0, 0xff, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xff }, { 0xff, 0, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, { 0, 0, 0xff, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, { 0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff } }; // Number of iso vertices for DMC for each voxel config, this is LUT_1. // Voxel with 3B config with its adjacent voxel being 2B config CANNOT use this LUT. const uint8_t num_vertex_lut1[NUM_CONFIGS] = { 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 2, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 2, 1, 1, 2, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 2, 1, 2, 2, 2, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 1, 1, 1, 1, 1, 1, 2, 2, 1, 2, 2, 2, 2, 2, 4, 3, 2, 2, 3, 2, 1, 1, 1, 1, 1, 2, 1, 1, 1, 2, 2, 2, 1, 2, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 2, 2, 3, 2, 2, 2, 2, 1, 1, 1, 2, 1, 1, 2, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 2, 1, 1, 1, 2, 1, 1, 2, 1, 2, 2, 4, 2, 3, 1, 2, 2, 2, 2, 3, 2, 2, 1, 1, 1, 1, 2, 3, 2, 2, 1, 1, 2, 1, 2, 2, 2, 1, 1, 1, 1, 2, 1, 2, 1, 2, 1, 1, 2, 2, 1, 1, 1, 1, 1, 1, 2, 2, 1, 2, 2, 2, 1, 1, 3, 2, 1, 1, 2, 1, 1, 2, 1, 2, 1, 3, 1, 2, 1, 2, 2, 2, 1, 2, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 2, 1, 1, 1, 1, 0 }; const iso_vertex_m_type config_edge_lut2[NUM_CONFIGS][VOXEL_NUM_EDGES] = { { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, { 0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, { 0, 0, 0xff, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, { 0xff, 0, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, { 0xff, 0, 0, 0xff, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xff }, { 1, 0, 0, 1, 1, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xff }, { 0, 0xff, 0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff, 0xff, 0xff }, { 0xff, 0xff, 0, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0xff }, { 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff }, { 0, 0xff, 0, 0xff, 0, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff }, { 0, 0, 1, 1, 0xff, 0, 0xff, 1, 0xff, 0xff, 0xff, 0xff }, { 0xff, 0, 0, 0xff, 0, 0, 0xff, 0, 0xff, 0xff, 0xff, 0xff }, { 0xff, 0, 0xff, 0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff, 0xff }, { 0, 0, 0xff, 0xff, 0, 0xff, 0, 0, 0xff, 0xff, 0xff, 0xff }, { 0, 0xff, 0xff, 0, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff }, { 0xff, 0xff, 0xff, 0xff, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff }, { 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0, 0xff, 0xff, 0 }, { 0, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0xff, 0 }, { 0, 0, 0xff, 0xff, 1, 0, 0xff, 0xff, 1, 0xff, 0xff, 1 }, { 0xff, 0, 0xff, 0, 0xff, 0, 0xff, 0xff, 0, 0xff, 0xff, 0 }, { 0xff, 0, 0, 0xff, 1, 0xff, 0, 0xff, 1, 0xff, 0xff, 1 }, { 0, 1, 1, 0, 0xff, 0xff, 1, 0xff, 0, 0xff, 0xff, 0 }, { 0, 0xff, 0, 0xff, 1, 0, 0, 0xff, 1, 0xff, 0xff, 1 }, { 0xff, 0xff, 0, 0, 0xff, 0, 0, 0xff, 0, 0xff, 0xff, 0 }, { 0xff, 0xff, 1, 1, 0, 0xff, 0xff, 1, 0, 0xff, 0xff, 0 }, { 0, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0, 0, 0xff, 0xff, 0 }, { 1, 0, 0, 1, 1, 0, 0xff, 0, 0, 0xff, 0xff, 0 }, { 0xff, 0, 0, 0xff, 0xff, 0, 0xff, 0, 0, 0xff, 0xff, 0 }, { 0xff, 0, 0xff, 0, 1, 0xff, 0, 0, 1, 0xff, 0xff, 1 }, { 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0 }, { 0, 0xff, 0xff, 0, 0, 1, 1, 1, 1, 0xff, 0xff, 1 }, { 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0, 0, 0, 0xff, 0xff, 0 }, { 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0xff, 0, 0, 0xff, 0xff }, { 1, 0xff, 0xff, 1, 1, 0, 0xff, 0xff, 0, 0, 0xff, 0xff }, { 0, 0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0, 0xff, 0xff }, { 0xff, 0, 0xff, 0, 0, 0xff, 0xff, 0xff, 0, 0, 0xff, 0xff }, { 0xff, 0, 0, 0xff, 0xff, 1, 0, 0xff, 1, 1, 0xff, 0xff }, { 1, 1, 0, 0, 0, 1, 0, 0xff, 0, 0, 0xff, 0xff }, { 0, 0xff, 0, 0xff, 0xff, 0xff, 0, 0xff, 0, 0, 0xff, 0xff }, { 0xff, 0xff, 0, 0, 0, 0xff, 0, 0xff, 0, 0, 0xff, 0xff }, { 0xff, 0xff, 0, 0, 0xff, 1, 0xff, 0, 1, 1, 0xff, 0xff }, { 0, 0xff, 0, 0xff, 0, 1, 0xff, 0, 1, 1, 0xff, 0xff }, { 0, 0, 1, 1, 0xff, 0xff, 0xff, 1, 0, 0, 0xff, 0xff }, { 0xff, 0, 0, 0xff, 0, 0xff, 0xff, 0, 0, 0, 0xff, 0xff }, { 0xff, 0, 0xff, 0, 0xff, 1, 0, 0, 1, 1, 0xff, 0xff }, { 0, 0, 0xff, 0xff, 1, 0, 1, 1, 1, 1, 0xff, 0xff }, { 0, 0xff, 0xff, 0, 0xff, 0xff, 0, 0, 0, 0, 0xff, 0xff }, { 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0, 0, 0, 0, 0xff, 0xff }, { 0xff, 0xff, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff, 0, 0xff, 0 }, { 0, 0xff, 0xff, 0, 0xff, 0, 0xff, 0xff, 0xff, 0, 0xff, 0 }, { 0, 0, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0 }, { 0xff, 0, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0 }, { 0xff, 1, 1, 0xff, 0, 0, 1, 0xff, 0xff, 0, 0xff, 0 }, { 0, 0, 1, 1, 0xff, 0, 1, 0xff, 0xff, 1, 0xff, 1 }, { 0, 0xff, 0, 0xff, 0, 0xff, 0, 0xff, 0xff, 0, 0xff, 0 }, { 0xff, 0xff, 0, 0, 0xff, 0xff, 0, 0xff, 0xff, 0, 0xff, 0 }, { 0xff, 0xff, 1, 1, 0, 0, 0xff, 1, 0xff, 0, 0xff, 0 }, { 0, 0xff, 0, 0xff, 0xff, 0, 0xff, 0, 0xff, 0, 0xff, 0 }, { 0, 1, 1, 0, 0, 0xff, 0xff, 1, 0xff, 1, 0xff, 1 }, { 0xff, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0, 0xff, 0 }, { 0xff, 0, 0xff, 0, 0, 0, 1, 1, 0xff, 1, 0xff, 1 }, { 1, 1, 0xff, 0xff, 0xff, 1, 0, 0, 0xff, 0, 0xff, 0 }, { 1, 0xff, 0xff, 1, 1, 0xff, 0, 0, 0xff, 0, 0xff, 0 }, { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0, 0xff, 0, 0xff, 0 }, { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0xff, 0, 0, 0xff }, { 0, 0xff, 0xff, 0, 0, 0xff, 1, 0xff, 0xff, 1, 1, 0xff }, { 1, 1, 0xff, 0xff, 0xff, 1, 0, 0xff, 0xff, 0, 0, 0xff }, { 0xff, 0, 0xff, 0, 0, 0, 1, 0xff, 0xff, 1, 1, 0xff }, { 0xff, 0, 0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0, 0xff }, { 1, 0, 0, 1, 1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0xff }, { 0, 0xff, 0, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0, 0, 0xff }, { 0xff, 0xff, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0, 0, 0xff }, { 0xff, 0xff, 0, 0, 0xff, 0xff, 1, 0, 0xff, 1, 1, 0xff }, { 0, 0xff, 0, 0xff, 0, 0xff, 1, 0, 0xff, 1, 1, 0xff }, { 0, 1, 1, 0, 0xff, 0, 1, 0, 0xff, 0, 0, 0xff }, { 0xff, 0, 0, 0xff, 1, 1, 0, 1, 0xff, 1, 1, 0xff }, { 0xff, 0, 0xff, 0, 0xff, 0xff, 0xff, 0, 0xff, 0, 0, 0xff }, { 0, 0, 0xff, 0xff, 0, 0xff, 0xff, 0, 0xff, 0, 0, 0xff }, { 0, 0xff, 0xff, 0, 0xff, 0, 0xff, 0, 0xff, 0, 0, 0xff }, { 0xff, 0xff, 0xff, 0xff, 0, 0, 0xff, 0, 0xff, 0, 0, 0xff }, { 0xff, 0xff, 0xff, 0xff, 1, 0xff, 0, 0xff, 1, 0, 0, 1 }, { 0, 0xff, 0xff, 0, 0xff, 0xff, 1, 0xff, 0, 1, 1, 0 }, { 0, 0, 0xff, 0xff, 0, 1, 0, 0xff, 1, 1, 0, 0 }, { 0xff, 1, 0xff, 1, 0xff, 0, 1, 0xff, 0, 0, 1, 1 }, { 0xff, 0, 0, 0xff, 1, 0xff, 0xff, 0xff, 1, 0, 0, 1 }, { 0, 0, 1, 1, 0xff, 0xff, 0xff, 0xff, 0, 0, 1, 1 }, { 1, 0xff, 1, 0xff, 1, 0, 0xff, 0xff, 0, 0, 1, 1 }, { 0xff, 0xff, 0, 0, 0xff, 1, 0xff, 0xff, 1, 1, 0, 0 }, { 0xff, 0xff, 0, 0, 0, 0xff, 0, 1, 0, 0, 1, 1 }, { 1, 0xff, 1, 0xff, 0xff, 0xff, 1, 0, 1, 1, 0, 0 }, { 0, 1, 1, 0, 0, 2, 1, 3, 2, 2, 3, 3 }, { 0xff, 0, 0, 0xff, 0xff, 1, 0, 2, 1, 1, 2, 2 }, { 0xff, 1, 0xff, 1, 1, 0xff, 0xff, 0, 1, 1, 0, 0 }, { 0, 0, 0xff, 0xff, 0xff, 0xff, 0xff, 1, 0, 0, 1, 1 }, { 0, 0xff, 0xff, 0, 0, 2, 0xff, 1, 2, 2, 1, 1 }, { 0xff, 0xff, 0xff, 0xff, 0xff, 1, 0xff, 0, 1, 1, 0, 0 }, { 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0, 0xff, 0, 0xff, 0, 0xff }, { 1, 0xff, 0xff, 1, 1, 0, 0, 0xff, 0, 0xff, 0, 0xff }, { 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0, 0xff, 0, 0xff }, { 0xff, 0, 0xff, 0, 0, 0xff, 0, 0xff, 0, 0xff, 0, 0xff }, { 0xff, 0, 0, 0xff, 0xff, 0, 0xff, 0xff, 0, 0xff, 0, 0xff }, { 0, 0, 1, 1, 1, 0, 0xff, 0xff, 1, 0xff, 1, 0xff }, { 0, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0, 0xff }, { 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0, 0xff, 0, 0xff }, { 0xff, 0xff, 1, 1, 0xff, 0, 0, 1, 0, 0xff, 0, 0xff }, { 1, 0xff, 1, 0xff, 0, 1, 1, 0, 0, 0xff, 0, 0xff }, { 1, 0, 0, 1, 0xff, 0xff, 0, 1, 1, 0xff, 1, 0xff }, { 0xff, 1, 1, 0xff, 0, 0xff, 1, 0, 0, 0xff, 0, 0xff }, { 0xff, 0, 0xff, 0, 0xff, 0, 0xff, 0, 0, 0xff, 0, 0xff }, { 1, 1, 0xff, 0xff, 0, 1, 0xff, 0, 0, 0xff, 0, 0xff }, { 0, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0, 0, 0xff, 0, 0xff }, { 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0xff, 0, 0, 0xff, 0, 0xff }, { 0xff, 0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0, 0 }, { 0, 0xff, 0xff, 0, 0xff, 0, 0, 0xff, 0xff, 0xff, 0, 0 }, { 0, 0, 0xff, 0xff, 0, 0xff, 0, 0xff, 0xff, 0xff, 0, 0 }, { 0xff, 0, 0xff, 0, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0, 0 }, { 0xff, 0, 0, 0xff, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0 }, { 1, 1, 0, 0, 0xff, 1, 0xff, 0xff, 0xff, 0xff, 0, 0 }, { 0, 0xff, 0, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0 }, { 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0 }, { 0xff, 0xff, 1, 1, 1, 1, 1, 0, 0xff, 0xff, 0, 0 }, { 0, 0xff, 0, 0xff, 0xff, 0, 0, 1, 0xff, 0xff, 1, 1 }, { 1, 2, 2, 1, 1, 0xff, 2, 0, 0xff, 0xff, 0, 0 }, { 0xff, 0, 0, 0xff, 0xff, 0xff, 0, 1, 0xff, 0xff, 1, 1 }, { 0xff, 0, 0xff, 0, 0, 0, 0xff, 1, 0xff, 0xff, 1, 1 }, { 0, 0, 0xff, 0xff, 0xff, 0, 0xff, 1, 0xff, 0xff, 1, 1 }, { 1, 0xff, 0xff, 1, 1, 0xff, 0xff, 0, 0xff, 0xff, 0, 0 }, { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0xff, 0, 0 }, { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0xff, 0, 0 }, { 1, 0xff, 0xff, 1, 1, 0xff, 0xff, 0, 0xff, 0xff, 0, 0 }, { 0, 0, 0xff, 0xff, 0xff, 0, 0xff, 1, 0xff, 0xff, 1, 1 }, { 0xff, 0, 0xff, 0, 0, 0, 0xff, 1, 0xff, 0xff, 1, 1 }, { 0xff, 0, 0, 0xff, 0xff, 0xff, 0, 1, 0xff, 0xff, 1, 1 }, { 0, 0, 1, 1, 0, 0xff, 0, 1, 0xff, 0xff, 0, 0 }, { 0, 0xff, 0, 0xff, 0xff, 0, 0, 1, 0xff, 0xff, 1, 1 }, { 0xff, 0xff, 0, 0, 1, 1, 1, 0, 0xff, 0xff, 1, 1 }, { 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0 }, { 0, 0xff, 0, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0 }, { 1, 1, 0, 0, 0xff, 1, 0xff, 0xff, 0xff, 0xff, 0, 0 }, { 0xff, 0, 0, 0xff, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0 }, { 0xff, 0, 0xff, 0, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0, 0 }, { 0, 0, 0xff, 0xff, 0, 0xff, 0, 0xff, 0xff, 0xff, 0, 0 }, { 0, 0xff, 0xff, 0, 0xff, 0, 0, 0xff, 0xff, 0xff, 0, 0 }, { 0xff, 0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0, 0 }, { 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0xff, 0, 0, 0xff, 0, 0xff }, { 0, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0, 0, 0xff, 0, 0xff }, { 1, 1, 0xff, 0xff, 0, 1, 0xff, 0, 0, 0xff, 0, 0xff }, { 0xff, 0, 0xff, 0, 0xff, 0, 0xff, 0, 0, 0xff, 0, 0xff }, { 0xff, 1, 1, 0xff, 0, 0xff, 1, 0, 0, 0xff, 0, 0xff }, { 1, 1, 0, 0, 0xff, 0xff, 1, 0, 1, 0xff, 1, 0xff }, { 0, 0xff, 0, 0xff, 0, 1, 1, 0, 1, 0xff, 1, 0xff }, { 0xff, 0xff, 1, 1, 0xff, 0, 0, 1, 0, 0xff, 0, 0xff }, { 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0, 0xff, 0, 0xff }, { 0, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0, 0xff }, { 0, 1, 1, 0, 0, 1, 0xff, 0xff, 1, 0xff, 1, 0xff }, { 0xff, 0, 0, 0xff, 0xff, 0, 0xff, 0xff, 0, 0xff, 0, 0xff }, { 0xff, 0, 0xff, 0, 0, 0xff, 0, 0xff, 0, 0xff, 0, 0xff }, { 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0, 0xff, 0, 0xff }, { 1, 0xff, 0xff, 1, 1, 0, 0, 0xff, 0, 0xff, 0, 0xff }, { 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0, 0xff, 0, 0xff, 0, 0xff }, { 0xff, 0xff, 0xff, 0xff, 0xff, 1, 0xff, 0, 1, 1, 0, 0 }, { 0, 0xff, 0xff, 0, 1, 0, 0xff, 0, 1, 0, 0, 1 }, { 0, 0, 0xff, 0xff, 0xff, 0xff, 0xff, 1, 0, 0, 1, 1 }, { 0xff, 1, 0xff, 1, 0, 0xff, 0xff, 1, 0, 1, 1, 0 }, { 0xff, 0, 0, 0xff, 0xff, 0, 1, 0, 0, 1, 1, 0 }, { 0, 0, 1, 1, 3, 0, 2, 1, 3, 2, 2, 3 }, { 1, 0xff, 1, 0xff, 0xff, 0xff, 0, 1, 1, 0, 0, 1 }, { 0xff, 0xff, 0, 0, 2, 0xff, 1, 0, 2, 1, 1, 2 }, { 0xff, 0xff, 0, 0, 0xff, 1, 0xff, 0xff, 1, 1, 0, 0 }, { 1, 0xff, 1, 0xff, 0, 1, 0xff, 0xff, 0, 1, 1, 0 }, { 0, 1, 1, 0, 0xff, 0xff, 0xff, 0xff, 0, 1, 1, 0 }, { 0xff, 0, 0, 0xff, 1, 0xff, 0xff, 0xff, 1, 0, 0, 1 }, { 0xff, 1, 0xff, 1, 0xff, 1, 0, 0xff, 1, 0, 0, 1 }, { 0, 0, 0xff, 0xff, 1, 0, 2, 0xff, 1, 2, 2, 1 }, { 0, 0xff, 0xff, 0, 0xff, 0xff, 1, 0xff, 0, 1, 1, 0 }, { 0xff, 0xff, 0xff, 0xff, 1, 0xff, 0, 0xff, 1, 0, 0, 1 }, { 0xff, 0xff, 0xff, 0xff, 0, 0, 0xff, 0, 0xff, 0, 0, 0xff }, { 0, 0xff, 0xff, 0, 0xff, 0, 0xff, 0, 0xff, 0, 0, 0xff }, { 0, 0, 0xff, 0xff, 0, 0xff, 0xff, 0, 0xff, 0, 0, 0xff }, { 0xff, 0, 0xff, 0, 0xff, 0xff, 0xff, 0, 0xff, 0, 0, 0xff }, { 0xff, 1, 1, 0xff, 1, 1, 0, 1, 0xff, 0, 0, 0xff }, { 2, 2, 1, 1, 0xff, 2, 0, 1, 0xff, 0, 0, 0xff }, { 0, 0xff, 0, 0xff, 0, 0xff, 1, 0, 0xff, 1, 1, 0xff }, { 0xff, 0xff, 0, 0, 0xff, 0xff, 1, 0, 0xff, 1, 1, 0xff }, { 0xff, 0xff, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0, 0, 0xff }, { 0, 0xff, 0, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0, 0, 0xff }, { 1, 0, 0, 1, 1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0xff }, { 0xff, 0, 0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0, 0xff }, { 0xff, 0, 0xff, 0, 0, 0, 1, 0xff, 0xff, 1, 1, 0xff }, { 1, 1, 0xff, 0xff, 0xff, 1, 0, 0xff, 0xff, 0, 0, 0xff }, { 0, 0xff, 0xff, 0, 0, 0xff, 1, 0xff, 0xff, 1, 1, 0xff }, { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0xff, 0, 0, 0xff }, { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0, 0xff, 0, 0xff, 0 }, { 1, 0xff, 0xff, 1, 1, 0xff, 0, 0, 0xff, 0, 0xff, 0 }, { 1, 1, 0xff, 0xff, 0xff, 1, 0, 0, 0xff, 0, 0xff, 0 }, { 0xff, 1, 0xff, 1, 0, 0, 1, 1, 0xff, 0, 0xff, 0 }, { 0xff, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0, 0xff, 0 }, { 1, 1, 0, 0, 1, 0xff, 0xff, 0, 0xff, 1, 0xff, 1 }, { 0, 0xff, 0, 0xff, 0xff, 0, 0xff, 0, 0xff, 0, 0xff, 0 }, { 0xff, 0xff, 1, 1, 0, 0, 0xff, 1, 0xff, 0, 0xff, 0 }, { 0xff, 0xff, 0, 0, 0xff, 0xff, 0, 0xff, 0xff, 0, 0xff, 0 }, { 0, 0xff, 0, 0xff, 0, 0xff, 0, 0xff, 0xff, 0, 0xff, 0 }, { 1, 0, 0, 1, 0xff, 1, 0, 0xff, 0xff, 1, 0xff, 1 }, { 0xff, 1, 1, 0xff, 0, 0, 1, 0xff, 0xff, 0, 0xff, 0 }, { 0xff, 0, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0 }, { 0, 0, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0 }, { 0, 0xff, 0xff, 0, 0xff, 0, 0xff, 0xff, 0xff, 0, 0xff, 0 }, { 0xff, 0xff, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff, 0, 0xff, 0 }, { 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0, 0, 0, 0, 0xff, 0xff }, { 0, 0xff, 0xff, 0, 0xff, 0xff, 0, 0, 0, 0, 0xff, 0xff }, { 1, 1, 0xff, 0xff, 1, 0, 1, 1, 0, 0, 0xff, 0xff }, { 0xff, 0, 0xff, 0, 0xff, 1, 0, 0, 1, 1, 0xff, 0xff }, { 0xff, 0, 0, 0xff, 0, 0xff, 0xff, 0, 0, 0, 0xff, 0xff }, { 0, 0, 1, 1, 0xff, 0xff, 0xff, 1, 0, 0, 0xff, 0xff }, { 0, 0xff, 0, 0xff, 0, 1, 0xff, 0, 1, 1, 0xff, 0xff }, { 0xff, 0xff, 0, 0, 0xff, 1, 0xff, 0, 1, 1, 0xff, 0xff }, { 0xff, 0xff, 0, 0, 0, 0xff, 0, 0xff, 0, 0, 0xff, 0xff }, { 0, 0xff, 0, 0xff, 0xff, 0xff, 0, 0xff, 0, 0, 0xff, 0xff }, { 1, 0, 0, 1, 1, 2, 0, 0xff, 2, 2, 0xff, 0xff }, { 0xff, 0, 0, 0xff, 0xff, 1, 0, 0xff, 1, 1, 0xff, 0xff }, { 0xff, 0, 0xff, 0, 0, 0xff, 0xff, 0xff, 0, 0, 0xff, 0xff }, { 0, 0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0, 0xff, 0xff }, { 1, 0xff, 0xff, 1, 1, 0, 0xff, 0xff, 0, 0, 0xff, 0xff }, { 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0xff, 0, 0, 0xff, 0xff }, { 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0, 0, 0, 0xff, 0xff, 0 }, { 1, 0xff, 0xff, 1, 0, 1, 1, 1, 0, 0xff, 0xff, 0 }, { 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0 }, { 0xff, 0, 0xff, 0, 1, 0xff, 0, 0, 1, 0xff, 0xff, 1 }, { 0xff, 0, 0, 0xff, 0xff, 0, 0xff, 0, 0, 0xff, 0xff, 0 }, { 2, 2, 0, 0, 1, 2, 0xff, 0, 1, 0xff, 0xff, 1 }, { 0, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0, 0, 0xff, 0xff, 0 }, { 0xff, 0xff, 1, 1, 0, 0xff, 0xff, 1, 0, 0xff, 0xff, 0 }, { 0xff, 0xff, 0, 0, 0xff, 0, 0, 0xff, 0, 0xff, 0xff, 0 }, { 0, 0xff, 0, 0xff, 1, 0, 0, 0xff, 1, 0xff, 0xff, 1 }, { 0, 1, 1, 0, 0xff, 0xff, 1, 0xff, 0, 0xff, 0xff, 0 }, { 0xff, 0, 0, 0xff, 1, 0xff, 0, 0xff, 1, 0xff, 0xff, 1 }, { 0xff, 0, 0xff, 0, 0xff, 0, 0xff, 0xff, 0, 0xff, 0xff, 0 }, { 0, 0, 0xff, 0xff, 1, 0, 0xff, 0xff, 1, 0xff, 0xff, 1 }, { 0, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0xff, 0 }, { 0xff, 0xff, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0, 0xff, 0xff, 0 }, { 0xff, 0xff, 0xff, 0xff, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff }, { 0, 0xff, 0xff, 0, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff }, { 0, 0, 0xff, 0xff, 0, 0xff, 0, 0, 0xff, 0xff, 0xff, 0xff }, { 0xff, 0, 0xff, 0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff, 0xff }, { 0xff, 0, 0, 0xff, 0, 0, 0xff, 0, 0xff, 0xff, 0xff, 0xff }, { 0, 0, 1, 1, 0xff, 0, 0xff, 1, 0xff, 0xff, 0xff, 0xff }, { 0, 0xff, 0, 0xff, 0, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff }, { 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff }, { 0xff, 0xff, 0, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0xff }, { 0, 0xff, 0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff, 0xff, 0xff }, { 1, 0, 0, 1, 1, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xff }, { 0xff, 0, 0, 0xff, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xff }, { 0xff, 0, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, { 0, 0, 0xff, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, { 0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff } }; const uint8_t num_vertex_lut2[NUM_CONFIGS] = { 0, 1, 1, 1, 1, 2, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 2, 1, 2, 2, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 1, 2, 1, 1, 2, 2, 1, 1, 2, 2, 2, 1, 2, 2, 1, 1, 1, 1, 1, 1, 2, 2, 1, 1, 2, 1, 2, 1, 2, 2, 2, 1, 1, 2, 2, 2, 1, 2, 1, 1, 2, 2, 2, 2, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 4, 3, 2, 2, 3, 2, 1, 2, 1, 1, 1, 2, 1, 1, 2, 2, 2, 2, 1, 2, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 2, 2, 3, 2, 2, 2, 2, 1, 1, 2, 2, 2, 2, 2, 2, 2, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 2, 1, 2, 2, 2, 2, 1, 1, 2, 1, 1, 1, 2, 1, 2, 2, 2, 2, 2, 4, 2, 3, 2, 2, 2, 2, 2, 3, 2, 2, 1, 1, 1, 1, 2, 3, 2, 2, 1, 1, 2, 1, 2, 2, 2, 1, 1, 2, 2, 2, 1, 2, 1, 2, 1, 1, 2, 2, 1, 1, 1, 1, 1, 1, 2, 2, 1, 2, 2, 2, 1, 1, 3, 2, 1, 1, 2, 1, 1, 2, 1, 2, 1, 3, 1, 2, 1, 2, 2, 2, 1, 2, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 2, 1, 1, 1, 1, 0 }; const unsigned NUM_AMBIGUOUS_CONFIGS = 36; const voxel_config_type config_2B_3B_lut[NUM_AMBIGUOUS_CONFIGS] = { 0xa0, 0x21, 0x42, 0x84, 0x05, 0x81, 0x48, 0x0a, 0x50, 0x12, 0x18, 0x24, // 2B 0xc1, 0xc2, 0x83, 0x45, 0x86, 0x49, 0x8a, 0x51, 0x92, 0x43, 0x54, 0x15, // 3B 0x16, 0x1c, 0x61, 0xa2, 0xa8, 0x29, 0x2a, 0x2c, 0x68, 0x34, 0x38, 0x94 }; const voxel_face_index_type config_2B_3B_ambiguous_face[NUM_AMBIGUOUS_CONFIGS] = { 5, 1, 2, 3, 0, 4, 3, 0, 5, 1, 4, 2, // 2B 4, 2, 4, 0, 3, 3, 0, 5, 1, 2, 5, 0, // 3B 1, 4, 1, 5, 5, 1, 0, 2, 3, 2, 4, 3 }; const voxel_face_index_type opposite_face_lut[VOXEL_NUM_FACES] = {5, 3, 4, 1, 2, 0}; const check_dir_type POS_X_DIR = 0; const check_dir_type NEG_X_DIR = 1; const check_dir_type POS_Y_DIR = 2; const check_dir_type NEG_Y_DIR = 3; const check_dir_type POS_Z_DIR = 4; const check_dir_type NEG_Z_DIR = 5; const check_dir_type face_to_check_dir_lut[VOXEL_NUM_FACES] = { NEG_Z_DIR, NEG_Y_DIR, POS_X_DIR, POS_Y_DIR, NEG_X_DIR, POS_Z_DIR }; const uint8_t LOCAL_EDGE_ENTRY = 0xff; const uint8_t edge_belonged_voxel_lut[VOXEL_NUM_EDGES] = { ( 0x00 | 0x40 | 0x20 ) | 10, // 0 ( 0x00 | 0x00 | 0x20 ) | 9, // 1 ( 0x00 | 0x00 | 0x20 ) | 10, // 2 ( 0x80 | 0x00 | 0x20 ) | 9, // 3 ( 0x80 | 0x40 | 0x00 ) | 6, // 4 ( 0x00 | 0x40 | 0x00 ) | 6, // 5 LOCAL_EDGE_ENTRY, // 6 ( 0x80 | 0x00 | 0x00 ) | 6, // 7 ( 0x00 | 0x40 | 0x00 ) | 10, // 8 LOCAL_EDGE_ENTRY, // 9 LOCAL_EDGE_ENTRY, // 10 ( 0x80 | 0x00 | 0x00 ) | 9, // 11 }; // Same edge shared by four voxels. Default in CCW order when looking align the positive // direction of the axis. voxel_edge_index_type circular_edge_lut[3][4] = { {6, 7, 4, 5}, {9, 1, 3, 11}, {10, 8, 0, 2} }; const uint8_t VOXEL_NUM_LOCAL_EDGES = 3; voxel_edge_index_type voxel_local_edges[VOXEL_NUM_LOCAL_EDGES] = {6, 9, 10}; // LUT on device memory texture<iso_vertex_m_type, cudaTextureType1D, cudaReadModeElementType> config_edge_lut1_tex; texture<iso_vertex_m_type, cudaTextureType1D, cudaReadModeElementType> config_edge_lut2_tex; texture<uint8_t, cudaTextureType1D, cudaReadModeElementType> num_vertex_lut1_tex; texture<uint8_t, cudaTextureType1D, cudaReadModeElementType> num_vertex_lut2_tex; texture<voxel_config_type, cudaTextureType1D, cudaReadModeElementType> config_2B_3B_lut_tex; texture<voxel_face_index_type, cudaTextureType1D, cudaReadModeElementType> config_2B_3B_ambiguous_face_tex; texture<voxel_face_index_type, cudaTextureType1D, cudaReadModeElementType> opposite_face_lut_tex; texture<check_dir_type, cudaTextureType1D, cudaReadModeElementType> face_to_check_dir_lut_tex; texture<uint8_t, cudaTextureType1D, cudaReadModeElementType> edge_belonged_voxel_lut_tex; texture<voxel_edge_index_type, cudaTextureType1D, cudaReadModeElementType> circular_edge_lut_tex; texture<voxel_edge_index_type, cudaTextureType1D, cudaReadModeElementType> voxel_local_edges_tex; // A singleton class to hold all the device pointers needed by static LUTs. Saves trouble // for maintaining these pointers on the client side. class LutPtrsCollection { private: static std::unique_ptr<LutPtrsCollection> m_instance; public: static LutPtrsCollection* instance() { if (!m_instance) { m_instance = std::unique_ptr<LutPtrsCollection>(new LutPtrsCollection); } return m_instance.get(); } iso_vertex_m_type* d_config_edge_lut1; iso_vertex_m_type* d_config_edge_lut2; uint8_t* d_num_vertex_lut1; uint8_t* d_num_vertex_lut2; voxel_config_type* d_config_2B_3B_lut; voxel_face_index_type* d_config_2B_3B_ambiguous_face; voxel_face_index_type* d_opposite_face_lut; check_dir_type* d_face_to_check_dir_lut; uint8_t* d_edge_belonged_voxel_lut; voxel_edge_index_type* d_circular_edge_lut; voxel_edge_index_type* d_voxel_local_edges; }; std::unique_ptr<LutPtrsCollection> LutPtrsCollection::m_instance = nullptr; void setup_device_luts() { LutPtrsCollection* luts = LutPtrsCollection::instance(); setup_device_luts(&(luts->d_config_edge_lut1), &(luts->d_config_edge_lut2), &(luts->d_num_vertex_lut1), &(luts->d_num_vertex_lut2), &(luts->d_config_2B_3B_lut), &(luts->d_config_2B_3B_ambiguous_face), &(luts->d_opposite_face_lut), &(luts->d_face_to_check_dir_lut), &(luts->d_edge_belonged_voxel_lut), &(luts->d_circular_edge_lut), &(luts->d_voxel_local_edges)); } void setup_device_luts(iso_vertex_m_type** d_config_edge_lut1, iso_vertex_m_type** d_config_edge_lut2, uint8_t** d_num_vertex_lut1, uint8_t** d_num_vertex_lut2, voxel_config_type** d_config_2B_3B_lut, voxel_face_index_type** d_config_2B_3B_ambiguous_face, voxel_face_index_type** d_opposite_face_lut, check_dir_type** d_face_to_check_dir_lut, uint8_t** d_edge_belonged_voxel_lut, voxel_edge_index_type** d_circular_edge_lut, voxel_edge_index_type** d_voxel_local_edges) { const cudaChannelFormatDesc channel_desc = cudaCreateChannelDesc(8, 0, 0, 0, cudaChannelFormatKindUnsigned); // setup for d_config_edge_lut1 2D array checkCudaErrors(cudaMalloc(d_config_edge_lut1, sizeof(voxel_config_type) * NUM_CONFIGS * VOXEL_NUM_EDGES)); checkCudaErrors(cudaMemcpy(*d_config_edge_lut1, (voxel_config_type*)(*config_edge_lut1), sizeof(voxel_config_type) * NUM_CONFIGS * VOXEL_NUM_EDGES, cudaMemcpyHostToDevice)); checkCudaErrors(cudaBindTexture(0, config_edge_lut1_tex, *d_config_edge_lut1, channel_desc)); // setup for d_config_edge_lut2 2D array checkCudaErrors(cudaMalloc(d_config_edge_lut2, sizeof(voxel_config_type) * NUM_CONFIGS * VOXEL_NUM_EDGES)); checkCudaErrors(cudaMemcpy(*d_config_edge_lut2, (voxel_config_type*)(*config_edge_lut2), sizeof(voxel_config_type) * NUM_CONFIGS * VOXEL_NUM_EDGES, cudaMemcpyHostToDevice)); checkCudaErrors(cudaBindTexture(0, config_edge_lut2_tex, *d_config_edge_lut2, channel_desc)); // setup for d_num_vertex_lut1 checkCudaErrors(cudaMalloc(d_num_vertex_lut1, sizeof(uint8_t) * NUM_CONFIGS)); checkCudaErrors(cudaMemcpy(*d_num_vertex_lut1, num_vertex_lut1, sizeof(uint8_t) * NUM_CONFIGS, cudaMemcpyHostToDevice)); checkCudaErrors(cudaBindTexture(0, num_vertex_lut1_tex, *d_num_vertex_lut1, channel_desc)); // setup for d_num_vertex_lut2 checkCudaErrors(cudaMalloc(d_num_vertex_lut2, sizeof(uint8_t) * NUM_CONFIGS)); checkCudaErrors(cudaMemcpy(*d_num_vertex_lut2, num_vertex_lut2, sizeof(uint8_t) * NUM_CONFIGS, cudaMemcpyHostToDevice)); checkCudaErrors(cudaBindTexture(0, num_vertex_lut2_tex, *d_num_vertex_lut2, channel_desc)); // setup for d_config_2B_3B_lut checkCudaErrors(cudaMalloc(d_config_2B_3B_lut, sizeof(voxel_config_type) * NUM_AMBIGUOUS_CONFIGS)); checkCudaErrors(cudaMemcpy(*d_config_2B_3B_lut, config_2B_3B_lut, sizeof(voxel_config_type) * NUM_AMBIGUOUS_CONFIGS, cudaMemcpyHostToDevice)); checkCudaErrors(cudaBindTexture(0, config_2B_3B_lut_tex, *d_config_2B_3B_lut, channel_desc)); // setup for d_config_2B_3B_ambiguous_face checkCudaErrors(cudaMalloc(d_config_2B_3B_ambiguous_face, sizeof(voxel_face_index_type) * NUM_AMBIGUOUS_CONFIGS)); checkCudaErrors(cudaMemcpy(*d_config_2B_3B_ambiguous_face, config_2B_3B_ambiguous_face, sizeof(voxel_face_index_type) * NUM_AMBIGUOUS_CONFIGS, cudaMemcpyHostToDevice)); checkCudaErrors(cudaBindTexture(0, config_2B_3B_ambiguous_face_tex, *d_config_2B_3B_ambiguous_face, channel_desc)); // setup for d_opposite_face_lut checkCudaErrors(cudaMalloc(d_opposite_face_lut, sizeof(voxel_face_index_type) * VOXEL_NUM_FACES)); checkCudaErrors(cudaMemcpy(*d_opposite_face_lut, opposite_face_lut, sizeof(voxel_face_index_type) * VOXEL_NUM_FACES, cudaMemcpyHostToDevice)); checkCudaErrors(cudaBindTexture(0, opposite_face_lut_tex, *d_opposite_face_lut, channel_desc)); // setup for d_face_to_check_dir_lut checkCudaErrors(cudaMalloc(d_face_to_check_dir_lut, sizeof(check_dir_type) * VOXEL_NUM_FACES)); checkCudaErrors(cudaMemcpy(*d_face_to_check_dir_lut, face_to_check_dir_lut, sizeof(check_dir_type) * VOXEL_NUM_FACES, cudaMemcpyHostToDevice)); checkCudaErrors(cudaBindTexture(0, face_to_check_dir_lut_tex, *d_face_to_check_dir_lut, channel_desc)); // setup for d_edge_belonged_voxel_lut checkCudaErrors(cudaMalloc(d_edge_belonged_voxel_lut, sizeof(uint8_t) * VOXEL_NUM_EDGES)); checkCudaErrors(cudaMemcpy(*d_edge_belonged_voxel_lut, edge_belonged_voxel_lut, sizeof(uint8_t) * VOXEL_NUM_EDGES, cudaMemcpyHostToDevice)); checkCudaErrors(cudaBindTexture(0, edge_belonged_voxel_lut_tex, *d_edge_belonged_voxel_lut, channel_desc)); // setup for d_circular_edge_lut checkCudaErrors(cudaMalloc(d_circular_edge_lut, sizeof(voxel_edge_index_type) * 12)); checkCudaErrors(cudaMemcpy(*d_circular_edge_lut, (voxel_edge_index_type*)(*circular_edge_lut), sizeof(voxel_edge_index_type) * 12, cudaMemcpyHostToDevice)); checkCudaErrors(cudaBindTexture(0, circular_edge_lut_tex, *d_circular_edge_lut, channel_desc)); // setup for d_voxel_local_edges checkCudaErrors(cudaMalloc(d_voxel_local_edges, sizeof(voxel_edge_index_type) * VOXEL_NUM_LOCAL_EDGES)); checkCudaErrors(cudaMemcpy(*d_voxel_local_edges, voxel_local_edges, sizeof(voxel_edge_index_type) * VOXEL_NUM_LOCAL_EDGES, cudaMemcpyHostToDevice)); checkCudaErrors(cudaBindTexture(0, voxel_local_edges_tex, *d_voxel_local_edges, channel_desc)); } void cleanup_device_luts() { LutPtrsCollection* luts = LutPtrsCollection::instance(); cleanup_device_luts(luts->d_config_edge_lut1, luts->d_config_edge_lut2, luts->d_num_vertex_lut1, luts->d_num_vertex_lut2, luts->d_config_2B_3B_lut, luts->d_config_2B_3B_ambiguous_face, luts->d_opposite_face_lut, luts->d_face_to_check_dir_lut, luts->d_edge_belonged_voxel_lut, luts->d_circular_edge_lut, luts->d_voxel_local_edges); } void cleanup_device_luts(iso_vertex_m_type* d_config_edge_lut1, iso_vertex_m_type* d_config_edge_lut2, uint8_t* d_num_vertex_lut1, uint8_t* d_num_vertex_lut2, voxel_config_type* d_config_2B_3B_lut, voxel_face_index_type* d_config_2B_3B_ambiguous_face, voxel_face_index_type* d_opposite_face_lut, check_dir_type* d_face_to_check_dir_lut, uint8_t* d_edge_belonged_voxel_lut, voxel_edge_index_type* d_circular_edge_lut, voxel_edge_index_type* d_voxel_local_edges) { checkCudaErrors(cudaFree(d_config_edge_lut1)); checkCudaErrors(cudaFree(d_config_edge_lut2)); checkCudaErrors(cudaFree(d_num_vertex_lut1)); checkCudaErrors(cudaFree(d_num_vertex_lut2)); checkCudaErrors(cudaFree(d_config_2B_3B_lut)); checkCudaErrors(cudaFree(d_config_2B_3B_ambiguous_face)); checkCudaErrors(cudaFree(d_opposite_face_lut)); checkCudaErrors(cudaFree(d_face_to_check_dir_lut)); checkCudaErrors(cudaFree(d_edge_belonged_voxel_lut)); checkCudaErrors(cudaFree(d_circular_edge_lut)); checkCudaErrors(cudaFree(d_voxel_local_edges)); } // Stores the minimum required information for each voxel class _VoxelInfo { typedef uint8_t info_type; static const uint8_t EDGE_6_SHIFT = 0; static const uint8_t EDGE_9_SHIFT = 1; static const uint8_t EDGE_10_SHIFT = 2; static const uint8_t USE_LUT2_SHIFT = 7; public: __host__ __device__ _VoxelInfo() = default; __host__ __device__ _VoxelInfo(voxel_index1D_type index) : m_index1D(index), m_info(0) { } __host__ __device__ void encode_edge_is_bipolar(voxel_edge_index_type edge, bool is_bipolar) { uint8_t shift = get_edge_shift(edge); info_write_bit(shift, is_bipolar); } __host__ __device__ void encode_edge_bipolar_info(voxel_edge_index_type edge, bool is_bipolar, bool use_ccw) { uint8_t shift = get_edge_shift(edge); info_write_bit(shift, is_bipolar); if (is_bipolar) { shift += 3; info_write_bit(shift, use_ccw); } } __host__ __device__ bool is_edge_bipolar(voxel_edge_index_type edge) const { //if (edge != 6 && edge != 9 && edge != 10) { // printf("is_edge_bipolar: edge: %d\n", edge); //} uint8_t shift = get_edge_shift(edge); return (bool)info_read_bit(shift); } // An edge that is 'CCW' means the polarization direction of the edge aligns with the positive axis. // [precondition] 'edge' must be bipolar __host__ __device__ bool is_edge_ccw(voxel_edge_index_type edge) const { assert(is_edge_bipolar(edge)); uint8_t shift = get_edge_shift(edge) + 3; return (bool)info_read_bit(shift); // return true; } __host__ __device__ inline void encode_use_lut2(bool use_lut2) { info_write_bit(USE_LUT2_SHIFT, use_lut2); } __host__ __device__ inline bool use_lut2() const { return (bool)info_read_bit(USE_LUT2_SHIFT); } __host__ __device__ voxel_index1D_type index1D() const { return m_index1D; } __host__ __device__ voxel_config_type config() const { return m_config; } __host__ __device__ void set_config(voxel_config_type c) { m_config = c; } __host__ __device__ inline vertex_index_type vertex_begin() const { return m_vertex_begin; } __host__ __device__ void set_vertex_begin(vertex_index_type begin) { m_vertex_begin = begin; } __host__ __device__ uint8_t num_vertices() const { return m_num_vertices; } __host__ __device__ void set_num_vertices(uint8_t num) { m_num_vertices = num; } __host__ __device__ uint8_t info() const { return m_info; } __host__ __device__ uint8_t num_edge_vertices() const { uint8_t num = 0; num += info_read_bit(EDGE_6_SHIFT); num += info_read_bit(EDGE_9_SHIFT); num += info_read_bit(EDGE_10_SHIFT); return num; } __host__ __device__ inline uint8_t num_iso_vertices() const { // return use_lut2() ? num_vertex_lut2[config] : num_vertex_lut1[config]; return m_num_vertices - num_edge_vertices(); } __host__ __device__ inline vertex_index_type iso_vertex_begin() const { return vertex_begin(); } __host__ __device__ inline vertex_index_type iso_vertex_index(iso_vertex_m_type iso_vertex_m) const { assert(iso_vertex_m < num_iso_vertices()); return iso_vertex_begin() + iso_vertex_m; } __host__ __device__ inline vertex_index_type edge_vertex_begin() const { return m_vertex_begin + num_iso_vertices(); } __host__ __device__ vertex_index_type edge_vertex_index(voxel_edge_index_type edge) const { assert(is_edge_bipolar(edge)); uint8_t offset = 0; for (uint8_t i = 0; i < get_edge_shift(edge); ++i) { offset += info_read_bit(i); } return edge_vertex_begin() + offset; } __device__ iso_vertex_m_type iso_vertex_m_by_edge(voxel_edge_index_type edge) const { if (use_lut2()) { // return config_edge_lut2[m_config][edge]; return tex1Dfetch(config_edge_lut2_tex, m_config * VOXEL_NUM_EDGES + edge); } else { // return config_edge_lut1[m_config][edge]; return tex1Dfetch(config_edge_lut1_tex, m_config * VOXEL_NUM_EDGES + edge); } } private: __host__ __device__ uint8_t get_edge_shift(voxel_edge_index_type edge) const { uint8_t shift; switch (edge) { case 6: shift = EDGE_6_SHIFT; break; case 9: shift = EDGE_9_SHIFT; break; case 10: shift = EDGE_10_SHIFT; break; default: assert(false); break; } return shift; } __host__ __device__ void info_write_bit(uint8_t shift, bool flag) { info_type mask; if (flag) { mask = 0x01 << shift; m_info |= mask; } else { mask = ~(0x01 << shift); m_info &= mask; } } __host__ __device__ inline info_type info_read_bit(uint8_t shift) const { info_type shifted_info = m_info >> shift; return shifted_info & 0x01; } friend std::ostream& operator<<(std::ostream& os, const _VoxelInfo vx_info); // Its index_1D voxel_index1D_type m_index1D = INVALID_INDEX_1D; // The beginning index of the vertices (both DMC iso_vertex and iso-surface edge // intersection point). vertex_index_type m_vertex_begin = INVALID_UINT32; // The voxel config mask, each bit corresponds to one unique vertex corner point. // LSB (bit 0) represents corner pt 0, MSB (bit 7) represents corner pt 7 voxel_config_type m_config = 0x00; // Compact bit vector: // bit 7: should use LUT2? // bit 5: is edge 10 using ccw? // bit 4: is edge 9 using ccw? // bit 3: is edge 6 using ccw? // bit 2: is edge 10 bipolar? // bit 1: is edge 9 bipolar? // bit 0: is edge 6 bipolar? // other bits: not used info_type m_info = 0x00; // Since this class will be enforeced aligned, we can use another 8 bit to store the // number of vertices, although we can fully retrieve this information under the help // of both 'info' and other 'LUT's. 8_bit is quite enough because each voxel will have // a maximum of 4 + 3 = 7 vertices. (4 for DMC iso-vertices, 3 for bipolar edge pts) uint8_t m_num_vertices = 0; uint8_t m_pad = 0xff; }; std::ostream& operator<<(std::ostream& os, const _VoxelInfo vx_info) { os << "index1D: " << vx_info.index1D() << " config: " << std::hex <<(unsigned) vx_info.config() << std::dec << " num_vertices: " << (unsigned)vx_info.num_vertices() << " vertex_begin: " << vx_info.vertex_begin() << " info: " << std::hex << (unsigned)vx_info.m_info << std::dec; return os; } // Calculate a voxel's config mask. __device__ voxel_config_type voxel_config_mask(const float* d_voxel_vals, float iso_value) { voxel_config_type mask = 0; for (uint8_t i = 0; i < 8; ++i) { mask |= (d_voxel_vals[i] < iso_value) << i; } return mask; } __device__ bool is_out_of_grid_bound(const uint3& index3D, const uint3& grid_size) { return (index3D.x >= grid_size.x) || (index3D.y >= grid_size.y) || (index3D.z >= grid_size.z); } // Scan through and flag out the active voxels according to its voxel config. __global__ void flag_active_voxels_kern(flag_type* d_voxel_flags, const float* d_scalar_grid, const uint3 num_voxels_dim, const float iso_value) { uint3 index3D; index3D.x = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; index3D.y = __mul24(blockDim.y, blockIdx.y) + threadIdx.y; index3D.z = __mul24(blockDim.z, blockIdx.z) + threadIdx.z; if (is_out_of_grid_bound(index3D, num_voxels_dim)) return; float voxel_vals[8] = { d_scalar_grid[index3D_to_1D(index3D.x, index3D.y, index3D.z, num_voxels_dim.x + 1, num_voxels_dim.y + 1)], d_scalar_grid[index3D_to_1D(index3D.x + 1, index3D.y, index3D.z, num_voxels_dim.x + 1, num_voxels_dim.y + 1)], d_scalar_grid[index3D_to_1D(index3D.x + 1, index3D.y + 1, index3D.z, num_voxels_dim.x + 1, num_voxels_dim.y + 1)], d_scalar_grid[index3D_to_1D(index3D.x, index3D.y + 1, index3D.z, num_voxels_dim.x + 1, num_voxels_dim.y + 1)], d_scalar_grid[index3D_to_1D(index3D.x, index3D.y, index3D.z + 1, num_voxels_dim.x + 1, num_voxels_dim.y + 1)], d_scalar_grid[index3D_to_1D(index3D.x + 1, index3D.y, index3D.z + 1, num_voxels_dim.x + 1, num_voxels_dim.y + 1)], d_scalar_grid[index3D_to_1D(index3D.x + 1, index3D.y + 1, index3D.z + 1, num_voxels_dim.x + 1, num_voxels_dim.y + 1)], d_scalar_grid[index3D_to_1D(index3D.x, index3D.y + 1, index3D.z + 1, num_voxels_dim.x + 1, num_voxels_dim.y + 1)], }; voxel_config_type voxel_config = voxel_config_mask(voxel_vals, iso_value); unsigned index1D = index3D_to_1D(index3D, num_voxels_dim); d_voxel_flags[index1D] = (voxel_config && voxel_config < MAX_VOXEL_CONFIG_MASK); // printf("i: %d, j: %d, k: %d, index1D: %d, config: %x, flag: %d\n", // index3D.x, index3D.y, index3D.z, index1D, voxel_config, d_voxel_flags[index1D]); } void launch_flag_active_voxels(flag_type* d_voxel_flags, const float* d_scalar_grid, const uint3 num_voxels_dim, const float iso_value, const dim3 blocks_dim3, const dim3 threads_dim3) { flag_active_voxels_kern<<<blocks_dim3, threads_dim3>>>(d_voxel_flags, d_scalar_grid, num_voxels_dim, iso_value); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); } size_t launch_thrust_count(const unsigned* d_arr, size_t size) { return thrust::reduce(thrust::device, d_arr, d_arr + size); } void launch_thrust_scan(unsigned* d_scan, const unsigned* d_data, size_t size) { thrust::exclusive_scan(thrust::device, d_data, d_data + size, d_scan); } template <typename OutT, typename InT, typename UnaryOp> void launch_thrust_transform_scan(OutT* d_scan, const InT* d_data, size_t size, const UnaryOp& op) { auto begin = thrust::make_transform_iterator(d_data, op); auto end = thrust::make_transform_iterator(d_data + size, op); thrust::exclusive_scan(thrust::device, begin, end, d_scan); } // Calculate the position of the isosurface's vertex __device__ float3 lerp_float3(const float3& p0, const float3& p1, const float v0, const float v1, const float iso_value) { float interp = (iso_value - v0) / (v1 - v0); float one_minus_interp = 1.0f - interp; float3 iso_vertex = p0 * one_minus_interp + p1 * interp; return iso_vertex; } // Check if an edge is bipolar given its two endpoints' value __device__ bool is_edge_bipolar(float val0, float val1, float iso_value) { if (val0 == val1) return false; else if (val0 > val1) return is_edge_bipolar(val1, val0, iso_value); return !((val0 < iso_value && val1 < iso_value) || (val0 > iso_value && val1 > iso_value)); } // Return an edge index given its two point indices __device__ voxel_edge_index_type pt_pair_edge_lut(voxel_pt_index_type p0, voxel_pt_index_type p1) { // assert(p0 != p1); if (p0 > p1) return pt_pair_edge_lut(p1, p0); if (p0 == 0 && p1 == 1) return 0; else if (p0 == 1 && p1 == 2) return 1; else if (p0 == 2 && p1 == 3) return 2; else if (p0 == 0 && p1 == 3) return 3; else if (p0 == 0 && p1 == 4) return 4; else if (p0 == 1 && p1 == 5) return 5; else if (p0 == 2 && p1 == 6) return 6; else if (p0 == 3 && p1 == 7) return 7; else if (p0 == 4 && p1 == 5) return 8; else if (p0 == 5 && p1 == 6) return 9; else if (p0 == 6 && p1 == 7) return 10; else if (p0 == 4 && p1 == 7) return 11; // assert(false); } // Compact to get the active voxels, for each compacted voxel, store its index_1D. // [invariant] for 0 <= i < d_compact_voxel_info.size(), // d_full_voxel_index_map[d_compact_voxel_info[i].index1D] == i __global__ void compact_voxel_flags_kern(_VoxelInfo* d_compact_voxel_info, voxel_index1D_type* d_full_voxel_index_map, const uint3 num_voxels_dim, const flag_type* d_flags, const unsigned* d_flags_scan, const unsigned flags_size) { // index = (gridDim.x * blockDim.x) * blockIdx.y + blockIdx.x * blockDim.x + threadIdx.x // unsigned index1D = __mul24(blockIdx.y, gridDim.x) + blockIdx.x; // index1D = __mul24(index1D, blockDim.x) + threadIdx.x; // if (index1D >= flags_size) return; uint3 index3D; index3D.x = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; index3D.y = __mul24(blockDim.y, blockIdx.y) + threadIdx.y; index3D.z = __mul24(blockDim.z, blockIdx.z) + threadIdx.z; if (is_out_of_grid_bound(index3D, num_voxels_dim)) return; unsigned index1D = index3D_to_1D(index3D, num_voxels_dim); unsigned compact_index = d_flags_scan[index1D]; // printf("index1D: %d, scan: %d, flag: %d\n", index1D, compact_index, d_flags[index1D]); if (d_flags[index1D]) { d_full_voxel_index_map[index1D] = compact_index; // d_flags_scan[index1D]; d_compact_voxel_info[compact_index] = _VoxelInfo(index1D); } } void launch_compact_voxel_flags(_VoxelInfo* d_compact_voxel_info, voxel_index1D_type* d_full_voxel_index_map, const uint3 num_voxels_dim, const flag_type* d_flags, const unsigned* d_flags_scan, const unsigned flags_size, const dim3 blocks_dim3, const dim3 threads_dim3) { compact_voxel_flags_kern<<<blocks_dim3, threads_dim3>>>(d_compact_voxel_info, d_full_voxel_index_map, num_voxels_dim, d_flags, d_flags_scan, flags_size); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); } // Initialize the voxel info. During this stage we only store the voxel config and // the edges this voxel manages (edge 6, 9, 10) are bipolar. The possible situation // where voxels with 2B config and 3B config are adjacent are not resolved at this stage. __global__ void init_voxels_info_kern(_VoxelInfo* d_compact_voxel_info, const unsigned compact_size, const float* d_scalar_grid, const uint3 num_voxels_dim, const float iso_value) { unsigned compact_index = __mul24(blockIdx.y, gridDim.x) + blockIdx.x; compact_index = __mul24(compact_index, blockDim.x) + threadIdx.x; if (compact_index >= compact_size) return; _VoxelInfo vx_info(d_compact_voxel_info[compact_index]); uint3 index3D = index1D_to_3D(vx_info.index1D(), num_voxels_dim); float voxel_vals[8] = { d_scalar_grid[index3D_to_1D(index3D.x, index3D.y, index3D.z, num_voxels_dim.x + 1, num_voxels_dim.y + 1)], d_scalar_grid[index3D_to_1D(index3D.x + 1, index3D.y, index3D.z, num_voxels_dim.x + 1, num_voxels_dim.y + 1)], d_scalar_grid[index3D_to_1D(index3D.x + 1, index3D.y + 1, index3D.z, num_voxels_dim.x + 1, num_voxels_dim.y + 1)], d_scalar_grid[index3D_to_1D(index3D.x, index3D.y + 1, index3D.z, num_voxels_dim.x + 1, num_voxels_dim.y + 1)], d_scalar_grid[index3D_to_1D(index3D.x, index3D.y, index3D.z + 1, num_voxels_dim.x + 1, num_voxels_dim.y + 1)], d_scalar_grid[index3D_to_1D(index3D.x + 1, index3D.y, index3D.z + 1, num_voxels_dim.x + 1, num_voxels_dim.y + 1)], d_scalar_grid[index3D_to_1D(index3D.x + 1, index3D.y + 1, index3D.z + 1, num_voxels_dim.x + 1, num_voxels_dim.y + 1)], d_scalar_grid[index3D_to_1D(index3D.x, index3D.y + 1, index3D.z + 1, num_voxels_dim.x + 1, num_voxels_dim.y + 1)], }; vx_info.set_config(voxel_config_mask(voxel_vals, iso_value)); auto encode_voxel_edge_info = [=, &vx_info](voxel_pt_index_type p0, voxel_pt_index_type p1) { voxel_edge_index_type edge_index = pt_pair_edge_lut(p0, p1); bool is_bipolar = is_edge_bipolar(voxel_vals[p0], voxel_vals[p1], iso_value); if (is_bipolar) { bool use_ccw = voxel_vals[p0] <= iso_value; vx_info.encode_edge_bipolar_info(edge_index, is_bipolar, use_ccw); } else { vx_info.encode_edge_is_bipolar(edge_index, is_bipolar); } }; encode_voxel_edge_info(2, 6); // edge 6 encode_voxel_edge_info(5, 6); // edge 9 encode_voxel_edge_info(7, 6); // edge 10 d_compact_voxel_info[compact_index] = vx_info; // printf("compact index: %d, index1D: %d, config: %x, info: %x\n", // compact_index, vx_info.index1D(), (unsigned)vx_info.config(), (unsigned)vx_info.info()); } void launch_init_voxels_info(_VoxelInfo* d_compact_voxel_info, const unsigned compact_size, const float* d_scalar_grid, const uint3 num_voxels_dim, const float iso_value, const dim3 blocks_dim3, const dim3 threads_dim3) { init_voxels_info_kern<<<blocks_dim3, threads_dim3>>>(d_compact_voxel_info, compact_size, d_scalar_grid, num_voxels_dim, iso_value); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); } // Check if the given voxel config belongs to 2B or 3B ambiguous config category. __device__ bool is_ambiguous_config(voxel_config_type config, uint8_t& index) { for (unsigned i = 0; i < NUM_AMBIGUOUS_CONFIGS; ++i) { // if (config_2B_3B_lut[i] == config) // if (d_config_2B_3B_lut[i] == config) if (tex1Dfetch(config_2B_3B_lut_tex, i) == config) { index = i; return true; } } return false; } // Check if after we advance the 'index3D' according to 'dir', the new result will // exceed the boundary or not. Have to use this function because we are using unsigned // int instead of int. __device__ bool will_exceed_boundary(uint3 index3D, uint3 dims, const check_dir_type dir) { switch (dir) { case POS_X_DIR: // CHECK_DIR::PX: return index3D.x + 1 >= dims.x; case NEG_X_DIR: // CHECK_DIR::NX: return index3D.x == 0; case POS_Y_DIR: // CHECK_DIR::PY: return index3D.y + 1 >= dims.y; case NEG_Y_DIR: // CHECK_DIR::NY: return index3D.y == 0; case POS_Z_DIR: // CHECK_DIR::PZ: return index3D.z + 1 >= dims.z; case NEG_Z_DIR: // CHECK_DIR::NZ: return index3D.z == 0; default: return false; } } // Execute the 'dir' on 'index3D' to get the new result. It is the user's responsibility // to make sure that the result won't exceed the boundary. __device__ uint3 get_index3D_by_dir(uint3 index3D, const check_dir_type dir) { switch (dir) { case POS_X_DIR: // CHECK_DIR::PX: return make_uint3(index3D.x + 1, index3D.y, index3D.z); case NEG_X_DIR: // CHECK_DIR::NX: return make_uint3(index3D.x - 1, index3D.y, index3D.z); case POS_Y_DIR: // CHECK_DIR::PY: return make_uint3(index3D.x, index3D.y + 1, index3D.z); case NEG_Y_DIR: // CHECK_DIR::NY: return make_uint3(index3D.x, index3D.y - 1, index3D.z); case POS_Z_DIR: // CHECK_DIR::PZ: return make_uint3(index3D.x, index3D.y, index3D.z + 1); case NEG_Z_DIR: // CHECK_DIR::NZ: return make_uint3(index3D.x, index3D.y, index3D.z - 1); } } // Check if the active voxel indicated by 'cur_compact_index' has an adjacent voxel which has // an ambiguous config that will result in non-manifold situation. // [precondition] d_compact_voxel_info[cur_compact_index].config == config_2B_3B_lut[cur_config_index] __device__ bool is_adjacent_ambiguous_config(voxel_index1D_type& adjacent_compact_index, voxel_index1D_type cur_index1D, uint8_t cur_config_index, const _VoxelInfo* d_compact_voxel_info, const voxel_index1D_type* d_full_voxel_index_map, const uint3& num_voxels_dim) { // assert(compact_voxel_info[cur_compact_index].config() == config_2B_3B_lut[cur_config_index]); // Get the 3D coordinate of the current active voxel uint3 cur_index3D = index1D_to_3D(cur_index1D, num_voxels_dim); // uint3 cur_index3D = index1D_to_3D(compact_voxel_info[cur_compact_index].index1D(), num_voxels_dim); // Get the checking direction, or offset, according to 'cur_ambiguous_face' // voxel_face_index_type cur_ambiguous_face = config_2B_3B_ambiguous_face[cur_config_index]; voxel_face_index_type cur_ambiguous_face = tex1Dfetch(config_2B_3B_ambiguous_face_tex, cur_config_index); // CHECK_DIR dir = face_to_check_dir_lut[cur_ambiguous_face]; check_dir_type dir = tex1Dfetch(face_to_check_dir_lut_tex, cur_ambiguous_face); if (will_exceed_boundary(cur_index3D, num_voxels_dim, dir)) { return false; } // Compute the index of the voxel to be checked in 'd_compact_voxel_info' uint3 index3D_to_check = get_index3D_by_dir(cur_index3D, dir); voxel_index1D_type index1D_to_check; index3D_to_1D(index3D_to_check, num_voxels_dim, index1D_to_check); voxel_index1D_type adjc_compact_index_to_check = d_full_voxel_index_map[index1D_to_check]; // assert(adjc_compact_index_to_check != INVALID_INDEX_1D); uint8_t adj_config_index; if (is_ambiguous_config(d_compact_voxel_info[adjc_compact_index_to_check].config(), adj_config_index)) { // voxel_face_index_type adj_ambiguous_face = config_2B_3B_ambiguous_face[adj_config_index]; // assert(opposite_face_lut[cur_ambiguous_face] == adj_ambiguous_face); adjacent_compact_index = adjc_compact_index_to_check; return true; } return false; } // Correct some of the voxels when it and its adjacent voxel are having ambiguous configs that will // result in non-manifold. Returns the actual number of vertices, including both iso-vertex and // intersection vertex between voxel bipolar edge and iso-surface. __global__ void correct_voxels_info_kern(_VoxelInfo* d_compact_voxel_info, unsigned compact_size, const voxel_index1D_type* d_full_voxel_index_map, const uint3 num_voxels_dim) { unsigned compact_index = __mul24(blockIdx.y, gridDim.x) + blockIdx.x; compact_index = __mul24(compact_index, blockDim.x) + threadIdx.x; if (compact_index >= compact_size) return; _VoxelInfo vx_info(d_compact_voxel_info[compact_index]); uint8_t ambiguous_config_index = INVALID_UINT8; // if ((vx_info.use_lut2()) || (!is_ambiguous_config(vx_info.config(), ambiguous_config_index))) if (!is_ambiguous_config(vx_info.config(), ambiguous_config_index)) { return; } voxel_index1D_type adjacent_compact_index; if (is_adjacent_ambiguous_config(adjacent_compact_index, vx_info.index1D(), ambiguous_config_index, d_compact_voxel_info, d_full_voxel_index_map, num_voxels_dim)) { printf("compact_index %d uses lut2!\n", compact_index); d_compact_voxel_info[compact_index].encode_use_lut2(true); // d_compact_voxel_info[adjacent_compact_index].encode_use_lut2(true); } } void launch_correct_voxels_info(_VoxelInfo* d_compact_voxel_info, unsigned num_compact_voxels, const voxel_index1D_type* d_full_voxel_index_map, const uint3 num_voxels_dim, const dim3 blocks_dim3, const dim3 threads_dim3) { correct_voxels_info_kern<<<blocks_dim3, threads_dim3>>>(d_compact_voxel_info, num_compact_voxels, d_full_voxel_index_map, num_voxels_dim); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); } __global__ void calc_num_vertices_per_voxel_kern(_VoxelInfo* d_compact_voxel_info, unsigned compact_size) { unsigned compact_index = __mul24(blockIdx.y, gridDim.x) + blockIdx.x; compact_index = __mul24(compact_index, blockDim.x) + threadIdx.x; if (compact_index >= compact_size) return; _VoxelInfo vx_info(d_compact_voxel_info[compact_index]); uint8_t num_voxel_vertices = 0; // num iso-vertices if (vx_info.use_lut2()) { // num_voxel_vertices += num_vertex_lut2[vx_info.config()]; num_voxel_vertices += tex1Dfetch(num_vertex_lut2_tex, vx_info.config()); } else { // num_voxel_vertices += num_vertex_lut1[vx_info.config()]; num_voxel_vertices += tex1Dfetch(num_vertex_lut1_tex, vx_info.config()); } // num edge iso-surface intersection vertices num_voxel_vertices += vx_info.num_edge_vertices(); d_compact_voxel_info[compact_index].set_num_vertices(num_voxel_vertices); } void launch_calc_num_vertices_per_voxel(_VoxelInfo* d_compact_voxel_info, unsigned num_compact_voxels, const dim3 blocks_dim3, const dim3 threads_dim3) { calc_num_vertices_per_voxel_kern<<<blocks_dim3, threads_dim3>>>(d_compact_voxel_info, num_compact_voxels); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); } __global__ void set_vertices_begin_kern(_VoxelInfo* d_compact_voxel_info, const vertex_index_type* d_vertices_begin_scan, unsigned compact_size) { unsigned compact_index = __mul24(blockIdx.y, gridDim.x) + blockIdx.x; compact_index = __mul24(compact_index, blockDim.x) + threadIdx.x; if (compact_index >= compact_size) return; d_compact_voxel_info[compact_index].set_vertex_begin(d_vertices_begin_scan[compact_index]); } void launch_set_vertices_begin(_VoxelInfo* d_compact_voxel_info, const vertex_index_type* d_vertices_begin_scan, unsigned compact_size, dim3 blocks_dim3, dim3 threads_dim3) { set_vertices_begin_kern<<<blocks_dim3, threads_dim3>>>(d_compact_voxel_info, d_vertices_begin_scan, compact_size); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); } __device__ void decode_edge_belong_voxel_entry(uint8_t entry, int8_t& x_offset, int8_t& y_offset, int8_t& z_offset, uint8_t& belonged_edge_index) { if (entry == LOCAL_EDGE_ENTRY) return; // extract the edge belonged_edge_index = 0x0f & entry; auto get_offset = [](uint8_t first_bit) { switch (first_bit) { case 0x00: return (int8_t)0; case 0x80: return (int8_t)-1; default: assert(false); return (int8_t)0xff; } }; uint8_t first_bit = entry & 0x80; x_offset = get_offset(first_bit); entry <<= 1; first_bit = entry & 0x80; y_offset = get_offset(first_bit); entry <<= 1; first_bit = entry & 0x80; z_offset = get_offset(first_bit); } // Sample the intersection vertices positions between voxel bipolar edges and iso-surface. // Each voxel is only responsible for its local edges, namely 6, 9 and 10. __global__ void sample_edge_intersection_vertices_kern(float3* d_vertices, const _VoxelInfo* d_compact_voxel_info, const unsigned compact_size, const float* d_scalar_grid, const uint3 num_voxels_dim, const float3 xyz_min, const float3 xyz_range, const float iso_value) { unsigned compact_index = __mul24(blockIdx.y, gridDim.x) + blockIdx.x; compact_index = __mul24(compact_index, blockDim.x) + threadIdx.x; if (compact_index >= compact_size) return; uint3 index3D = index1D_to_3D(d_compact_voxel_info[compact_index].index1D(), num_voxels_dim); vertex_index_type vx_edge_vertex_index = d_compact_voxel_info[compact_index].edge_vertex_begin(); float x1 = ijk_to_xyz(index3D.x + 1, num_voxels_dim.x, xyz_range.x, xyz_min.x); float y1 = ijk_to_xyz(index3D.y + 1, num_voxels_dim.y, xyz_range.y, xyz_min.y); float z1 = ijk_to_xyz(index3D.z + 1, num_voxels_dim.z, xyz_range.z, xyz_min.z); float val6 = d_scalar_grid[index3D_to_1D(index3D.x + 1, index3D.y + 1, index3D.z + 1, num_voxels_dim.x + 1, num_voxels_dim.y + 1)]; float xyz_changed = ijk_to_xyz(index3D.z, num_voxels_dim.z, xyz_range.z, xyz_min.z); if (d_compact_voxel_info[compact_index].is_edge_bipolar(6)) { // edge 6, pt 2 & 6 d_vertices[vx_edge_vertex_index] = lerp_float3(make_float3(x1, y1, xyz_changed), make_float3(x1, y1, z1), d_scalar_grid[index3D_to_1D(index3D.x + 1, index3D.y + 1, index3D.z, num_voxels_dim.x + 1, num_voxels_dim.y + 1)], val6, iso_value); ++vx_edge_vertex_index; } xyz_changed = ijk_to_xyz(index3D.y, num_voxels_dim.y, xyz_range.y, xyz_min.y); if (d_compact_voxel_info[compact_index].is_edge_bipolar(9)) { // edge 9, pt 5 & 6 d_vertices[vx_edge_vertex_index] = lerp_float3(make_float3(x1, xyz_changed, z1), make_float3(x1, y1, z1), d_scalar_grid[index3D_to_1D(index3D.x + 1, index3D.y, index3D.z + 1, num_voxels_dim.x + 1, num_voxels_dim.y + 1)], val6, iso_value); ++vx_edge_vertex_index; } xyz_changed = ijk_to_xyz(index3D.x, num_voxels_dim.x, xyz_range.x, xyz_min.x); if (d_compact_voxel_info[compact_index].is_edge_bipolar(10)) { // edge 10, pt 6 & 7 d_vertices[vx_edge_vertex_index] = lerp_float3(make_float3(x1, y1, z1), make_float3(xyz_changed, y1, z1), val6, d_scalar_grid[index3D_to_1D(index3D.x, index3D.y + 1, index3D.z + 1, num_voxels_dim.x + 1, num_voxels_dim.y + 1)], iso_value); ++vx_edge_vertex_index; } } // Calculate the iso vertices positions in each voxel. __global__ void calc_iso_vertices_kern(float3* d_vertices, const _VoxelInfo* d_compact_voxel_info, const unsigned compact_size, const voxel_index1D_type* d_full_voxel_index_map, const uint3 num_voxels_dim) { unsigned compact_index = __mul24(blockIdx.y, gridDim.x) + blockIdx.x; compact_index = __mul24(compact_index, blockDim.x) + threadIdx.x; if (compact_index >= compact_size) return; extern __shared__ _VoxelInfo sh_vx_info[]; sh_vx_info[threadIdx.x] = d_compact_voxel_info[compact_index]; // _VoxelInfo vx_info(d_compact_voxel_info[compact_index]); uint3 index3D = index1D_to_3D(sh_vx_info[threadIdx.x].index1D(), num_voxels_dim); uint8_t iso_vertex_num_incident[4] = {0, 0, 0, 0}; for (voxel_edge_index_type edge = 0; edge < VOXEL_NUM_EDGES; ++edge) { iso_vertex_m_type iso_vertex_m = sh_vx_info[threadIdx.x].iso_vertex_m_by_edge(edge); if (iso_vertex_m == NO_VERTEX) { continue; } // uint8_t entry = edge_belonged_voxel_lut[edge]; uint8_t entry = tex1Dfetch(edge_belonged_voxel_lut_tex, edge); voxel_edge_index_type belonged_edge = 0xff; voxel_index1D_type belonged_index1D = INVALID_INDEX_1D; if (entry == LOCAL_EDGE_ENTRY) { // edge belongs to current voxel belonged_index1D = sh_vx_info[threadIdx.x].index1D(); belonged_edge = edge; } else { int8_t x_offset = 0xff, y_offset = 0xff, z_offset = 0xff; decode_edge_belong_voxel_entry(entry, x_offset, y_offset, z_offset, belonged_edge); bool exceed_boundary = (x_offset < 0 && index3D.x == 0) || (y_offset < 0 && index3D.y == 0) || (z_offset < 0 && index3D.z == 0); if (exceed_boundary) { continue; } belonged_index1D = index3D_to_1D(index3D.x + x_offset, index3D.y + y_offset, index3D.z + z_offset, num_voxels_dim.x, num_voxels_dim.y); } // Get the 'belonged_voxel' which manages 'belonged_edge' vertex_index_type edge_intersect_vertex_index = d_compact_voxel_info[d_full_voxel_index_map[belonged_index1D]].edge_vertex_index(belonged_edge); vertex_index_type iso_vertex_index = sh_vx_info[threadIdx.x].iso_vertex_index(iso_vertex_m); if (iso_vertex_num_incident[iso_vertex_m] == 0) { // If this is the first time we see 'iso_vertex_m', we just assign it d_vertices[iso_vertex_index] = d_vertices[edge_intersect_vertex_index]; } else { // Otherwise we increase it d_vertices[iso_vertex_index] += d_vertices[edge_intersect_vertex_index]; } ++iso_vertex_num_incident[iso_vertex_m]; } // For each iso-vertex managed by 'vx_info', calculate its new position by averaging its // associated edges intersection vertex positions. for (iso_vertex_m_type iso_vertex_m = 0; iso_vertex_m < sh_vx_info[threadIdx.x].num_iso_vertices(); ++iso_vertex_m) { vertex_index_type iso_vertex_index = sh_vx_info[threadIdx.x].iso_vertex_index(iso_vertex_m); if (iso_vertex_num_incident[iso_vertex_m]) { d_vertices[iso_vertex_index] /= (float)(iso_vertex_num_incident[iso_vertex_m]); } } } class CircularEdgeRange { public: class CircularEdgeIterator { public: typedef CircularEdgeIterator iterator_type; __device__ CircularEdgeIterator(voxel_edge_index_type edge, bool ccw) : m_lut_index(get_lut_index_by_edge(edge)), m_cur_state(0), m_ccw(ccw) { } // For end iterator __device__ CircularEdgeIterator(voxel_edge_index_type edge) : m_lut_index(get_lut_index_by_edge(edge)), m_cur_state(4), m_ccw(true) { } // We are using CircularEdgeIterator itself, it does not represent any data underlying it. However, // for range object to work in c++11, we have to define dereference opreator*(). Therefore we let // it to dereference to itself. __device__ const CircularEdgeIterator& operator*() const { return *this; } // We've been lazy here and only compares 'm_lut_index' and 'm_cur_state'. // It's not absolutely safe, but we don't expect the client should use this class at all! __device__ bool operator==(const iterator_type& other) const { return (m_lut_index == other.m_lut_index) && (m_cur_state == other.m_cur_state); } __device__ bool operator!=(const iterator_type& other) const { return !(this->operator==(other)); } __device__ iterator_type& operator++() { if (m_cur_state < 4) ++m_cur_state; return (*this); } // Retrieve the information of the adjacent voxel that shares the edge, along with // the edge index in that voxel, in circular order __device__ void retrieve(uint3& circular_index3D, voxel_edge_index_type& circular_edge, const uint3& src_index3D) const { // ccw order: 0, 1, 2, 3 // cw order: 0, 3, 2, 1 // cw[i] = (3 - ccw[i] + 1) % 4 = (4 - ccw[i]) % 4 if (m_ccw) { // circular_edge = circular_edge_lut[m_lut_index][ccw_order[m_cur_state]]; circular_edge = tex1Dfetch(circular_edge_lut_tex, m_lut_index + m_cur_state); } else { // circular_edge = circular_edge_lut[m_lut_index][cw_order[m_cur_state]]; circular_edge = tex1Dfetch(circular_edge_lut_tex, m_lut_index + ((4 - m_cur_state) % 4)); } // reverse calculate the adjacent voxel that shares the edge // uint8_t entry = edge_belonged_voxel_lut[circular_edge]; uint8_t entry = tex1Dfetch(edge_belonged_voxel_lut_tex, circular_edge); if (entry == LOCAL_EDGE_ENTRY) { circular_index3D = src_index3D; } else { int8_t x_offset, y_offset, z_offset; voxel_edge_index_type src_edge; decode_edge_belong_voxel_entry(entry, x_offset, y_offset, z_offset, src_edge); assert(get_lut_index_by_edge(src_edge) == m_lut_index); x_offset = -x_offset; y_offset = -y_offset; z_offset = -z_offset; circular_index3D = src_index3D; circular_index3D.x += x_offset; circular_index3D.y += y_offset; circular_index3D.z += z_offset; } } private: __device__ uint8_t get_lut_index_by_edge(voxel_edge_index_type edge) const { if (edge == 6) return 0; else if (edge == 9) return 4; // 1; else if (edge == 10) return 8; // 2; assert(false); } uint8_t m_lut_index; uint8_t m_cur_state; bool m_ccw; }; __device__ CircularEdgeRange(voxel_edge_index_type edge, bool ccw = true) : m_edge(edge), m_ccw(ccw) { } __device__ CircularEdgeIterator begin() const { return {m_edge, m_ccw}; } __device__ CircularEdgeIterator end() const { return {m_edge}; } private: uint8_t m_edge; bool m_ccw; }; // Check, when we want to retrieve all the four voxels sharing the same 'edge', if any of these voxels // will actually exceed the boundary. Notice that all the circular edges are carefully designed so that // the adjacent voxels will only increase their position along the positive axis direction. __device__ bool circular_edge_exceed_boundary(voxel_edge_index_type edge, const uint3& index3D, const uint3& num_voxels_dim) { switch (edge) { case 6: return (index3D.x + 1 >= num_voxels_dim.x) || (index3D.y + 1 >= num_voxels_dim.y); case 9: return (index3D.x + 1 >= num_voxels_dim.x) || (index3D.z + 1 >= num_voxels_dim.z); case 10: return (index3D.y + 1 >= num_voxels_dim.y) || (index3D.z + 1 >= num_voxels_dim.z); default: assert(false); } } __device__ void project_vertices_by_shared_edge(float2* projected_vertex_pos, voxel_edge_index_type edge, const vertex_index_type* iso_vertex_indices, const float3* compact_vertices) { if (edge == 6) { for (uint8_t i = 0; i < 4; ++i) { projected_vertex_pos[i] = xy(compact_vertices[iso_vertex_indices[i]]); } } else if (edge == 9) { for (uint8_t i = 0; i < 4; ++i) { projected_vertex_pos[i] = xz(compact_vertices[iso_vertex_indices[i]]); } } else if (edge == 10) { for (uint8_t i = 0; i < 4; ++i) { projected_vertex_pos[i] = yz(compact_vertices[iso_vertex_indices[i]]); } } else { assert(false); } } inline __device__ int8_t calc_cross_z_sign(const float2& p_left, const float2& p_mid, const float2& p_right) { float dx1 = p_right.x - p_mid.x, dy1 = p_right.y - p_mid.y; float dx2 = p_left.x - p_mid.x, dy2 = p_left.y - p_mid.y; float cross_z = dx1 * dy2 - dx2 * dy1; return cross_z >= 0 ? 1 : -1; } __device__ void calc_quadrilateral_signs(const float2* pts, uint8_t& pos_info, uint8_t& neg_info) { pos_info = 0x00; neg_info = 0x00; auto encode_sign_info = [&](uint8_t& info, uint8_t index) { // info: // bit 3-0, count of pos/neg signs // bit 7-4, index info &= 0x0f; info += 1; index = (index & 0x0f) << 4; info |= index; }; auto calc_sign = [&](uint8_t index) { int8_t sign = calc_cross_z_sign(pts[(index + 4 - 1) % 4], pts[index], pts[(index + 1) % 4]); if (sign == 1) { encode_sign_info(pos_info, index); } else { encode_sign_info(neg_info, index); } }; for (uint8_t i = 0; i < 4; ++i) { calc_sign(i); } } // The only case for this is when (pos_info & 0x0f) == (pos_info & 0x0f) == 2 __device__ bool is_quadrilateral_complex(uint8_t pos_info, uint8_t neg_info) { return (pos_info & 0x0f) == (neg_info & 0x0f); } // is_quadrilateral_convex function acts a bit weird. It tests if the four points // in 'pts' form a convex quadrilateral. If they does, then 'split_index' will not // be changed. Otherwise if they form a concave quadrilateral, 'split_index' stores // the index of the point (in range [0, 3]) that causes the concavity. __device__ bool is_quadrilateral_convex(uint8_t pos_info, uint8_t neg_info, uint8_t& unique_index) { if (((pos_info & 0x0f) == 0) || ((neg_info & 0x0f) == 0)) { return true; } else if ((pos_info & 0x0f) < (neg_info & 0x0f)) { unique_index = (pos_info & 0xf0) >> 4; } else if ((neg_info & 0x0f) < (pos_info & 0x0f)) { unique_index = (neg_info & 0xf0) >> 4; } else { assert(false); } return false; } __device__ bool is_quadrilateral_convex(const float2* pts, uint8_t& unique_index) { uint8_t pos_info = 0x00, neg_info = 0x00; calc_quadrilateral_signs(pts, pos_info, neg_info); return is_quadrilateral_convex(pos_info, neg_info, unique_index); } __device__ float calc_radian(const float2& p_left, const float2& p_mid, const float2& p_right) { float2 v_ml = p_left - p_mid; normalize(v_ml); float2 v_mr = p_right - p_mid; normalize(v_mr); float theta = acosf(v_ml.x * v_mr.x + v_ml.y * v_mr.y); return theta; } __device__ void find_quadrilateral_split(const float2* pts, uint8_t pos_info, uint8_t neg_info, uint8_t& split0, uint8_t& split1) { uint8_t split_index; if (is_quadrilateral_convex(pos_info, neg_info, split_index)) { // If it is convex, then we split the quadrilateral with the diagonal that connects the // point that forms the largest angle. float radians[4] = { calc_radian(pts[3], pts[0], pts[1]), calc_radian(pts[0], pts[1], pts[2]), calc_radian(pts[1], pts[2], pts[3]), calc_radian(pts[2], pts[3], pts[0]) }; uint8_t max_radian_index = 0; for (uint8_t i = 1; i < 4; ++i) { if (radians[i] > radians[max_radian_index]) max_radian_index = i; } split_index = max_radian_index; // split_index = (uint8_t)argmax(radian0, radian1, radian2, radian3); } split0 = split_index; split1 = (split0 + 2) % 4; // pts.size(); } __device__ void find_quadrilateral_split(const float2* pts, uint8_t& split0, uint8_t& split1) { uint8_t pos_info = 0x00, neg_info = 0x00; calc_quadrilateral_signs(pts, pos_info, neg_info); find_quadrilateral_split(pts, pos_info, neg_info, split0, split1); } __device__ void get_circular_vertices_by_edge(vertex_index_type* iso_vertex_indices, const voxel_edge_index_type edge, const uint3& index3D, const _VoxelInfo& vx_info, const _VoxelInfo* d_compact_voxel_info, const voxel_index1D_type* d_full_voxel_index_map, const uint3& num_voxels_dim) { uint8_t iter = 0; for (auto circular_edge_iter : CircularEdgeRange(edge, vx_info.is_edge_ccw(edge))) { uint3 circular_index3D; voxel_edge_index_type circular_edge; circular_edge_iter.retrieve(circular_index3D, circular_edge, index3D); voxel_index1D_type circular_index1D = index3D_to_1D(circular_index3D, num_voxels_dim); assert(d_full_voxel_index_map[circular_index1D] != INVALID_INDEX_1D); const _VoxelInfo& circular_vx_info = d_compact_voxel_info[d_full_voxel_index_map[circular_index1D]]; iso_vertex_m_type circular_iso_vertex_m = circular_vx_info.iso_vertex_m_by_edge(circular_edge); assert(circular_iso_vertex_m != NO_VERTEX); vertex_index_type circular_iso_vertex_index = circular_vx_info.iso_vertex_index(circular_iso_vertex_m); iso_vertex_indices[iter] = circular_iso_vertex_index; ++iter; } } template <typename Vec> __device__ bool is_inside_triangle(const Vec& p0, const Vec& p1, const Vec& p2, const Vec& pt, float& alpha, float& beta, float& gamma) { Vec v0(p1 - p0), v1(p2 - p0), v2(pt - p0); float d00 = dot(v0, v0); float d10 = dot(v1, v0); float d11 = dot(v1, v1); float d20 = dot(v2, v0); float d21 = dot(v2, v1); float denom_inv = d00 * d11 - d10 * d10; denom_inv = 1.0f / denom_inv; beta = (d11 * d20 - d10 * d21) * denom_inv; gamma = (d00 * d21 - d10 * d20) * denom_inv; alpha = 1.0f - beta - gamma; return (-1e-4 < beta) && (-1e-4 < gamma) && (beta + gamma < 1.0 + 1e-4); } __global__ void smooth_edge_vertices(float3* d_vertices, const _VoxelInfo* d_compact_voxel_info, const unsigned compact_size, const voxel_index1D_type* d_full_voxel_index_map, const float3 xyz_min, const float3 xyz_range, const uint3 num_voxels_dim) { unsigned compact_index = __mul24(blockIdx.y, gridDim.x) + blockIdx.x; compact_index = __mul24(compact_index, blockDim.x) + threadIdx.x; if (compact_index >= compact_size) return; extern __shared__ float2 sh_projected_vertex_pos[]; float2* projected_vertex_pos = sh_projected_vertex_pos + threadIdx.x * 4; _VoxelInfo vx_info(d_compact_voxel_info[compact_index]); uint3 index3D = index1D_to_3D(vx_info.index1D(), num_voxels_dim); for (uint8_t edge_iter = 0; edge_iter < VOXEL_NUM_LOCAL_EDGES; ++edge_iter) { // voxel_edge_index_type edge = voxel_local_edges[edge_iter]; voxel_edge_index_type edge = tex1Dfetch(voxel_local_edges_tex, edge_iter); if ((!vx_info.is_edge_bipolar(edge)) || circular_edge_exceed_boundary(edge, index3D, num_voxels_dim)) { continue; } vertex_index_type iso_vertex_indices[4] = {INVALID_UINT8, INVALID_UINT8, INVALID_UINT8, INVALID_UINT8}; get_circular_vertices_by_edge(iso_vertex_indices, edge, index3D, vx_info, d_compact_voxel_info, d_full_voxel_index_map, num_voxels_dim); project_vertices_by_shared_edge(projected_vertex_pos, edge, iso_vertex_indices, d_vertices); uint8_t pos_info = 0x00, neg_info = 0x00; calc_quadrilateral_signs(projected_vertex_pos, pos_info, neg_info); if (is_quadrilateral_complex(pos_info, neg_info)) { continue; } uint8_t split0 = INVALID_UINT8, split1 = INVALID_UINT8; find_quadrilateral_split(projected_vertex_pos, pos_info, neg_info, split0, split1); float x1 = ijk_to_xyz(index3D.x + 1, num_voxels_dim.x, xyz_range.x, xyz_min.x); float y1 = ijk_to_xyz(index3D.y + 1, num_voxels_dim.y, xyz_range.y, xyz_min.y); float z1 = ijk_to_xyz(index3D.z + 1, num_voxels_dim.z, xyz_range.z, xyz_min.z); float2 origin; if (edge == 6) origin = make_float2(x1, y1); else if (edge == 9) origin = make_float2(x1, z1); else origin = make_float2(y1, z1); float alpha, beta, gamma; if (is_inside_triangle(projected_vertex_pos[split0], projected_vertex_pos[(split0 + 1) % 4], projected_vertex_pos[split1], origin, alpha, beta, gamma)) { float3& edge_vertex = d_vertices[vx_info.edge_vertex_index(edge)]; edge_vertex = alpha * d_vertices[iso_vertex_indices[split0]]; edge_vertex += beta * d_vertices[iso_vertex_indices[(split0 + 1) % 4]]; edge_vertex += gamma * d_vertices[iso_vertex_indices[split1]]; } else if (is_inside_triangle(projected_vertex_pos[split1], projected_vertex_pos[(split1 + 1) % 4], projected_vertex_pos[split0], origin, alpha, beta, gamma)) { float3& edge_vertex = d_vertices[vx_info.edge_vertex_index(edge)]; edge_vertex = alpha * d_vertices[iso_vertex_indices[split1]]; edge_vertex += beta * d_vertices[iso_vertex_indices[(split1 + 1) % 4]]; edge_vertex += gamma * d_vertices[iso_vertex_indices[split0]]; } } } __global__ void calc_num_triangles_per_voxel_kern(unsigned* d_num_triangles, const _VoxelInfo* d_compact_voxel_info, const unsigned compact_size, const uint3 num_voxels_dim) { unsigned compact_index = __mul24(blockIdx.y, gridDim.x) + blockIdx.x; compact_index = __mul24(compact_index, blockDim.x) + threadIdx.x; if (compact_index >= compact_size) return; _VoxelInfo vx_info(d_compact_voxel_info[compact_index]); uint3 index3D = index1D_to_3D(vx_info.index1D(), num_voxels_dim); uint8_t vx_num_triangles = 0; for (uint8_t edge_iter = 0; edge_iter < VOXEL_NUM_LOCAL_EDGES; ++edge_iter) { // voxel_edge_index_type edge = voxel_local_edges[edge_iter]; voxel_edge_index_type edge = tex1Dfetch(voxel_local_edges_tex, edge_iter); if ((!vx_info.is_edge_bipolar(edge)) || circular_edge_exceed_boundary(edge, index3D, num_voxels_dim)) { continue; } vx_num_triangles += 2; } d_num_triangles[compact_index] = (unsigned)vx_num_triangles; } // Genreate the actual triangles information of the mesh. __global__ void generate_triangles_kern(uint3* d_triangles, const unsigned* d_triangles_scan, const _VoxelInfo* d_compact_voxel_info, const unsigned compact_size, const voxel_index1D_type* d_full_voxel_index_map, const uint3 num_voxels_dim) { unsigned compact_index = __mul24(blockIdx.y, gridDim.x) + blockIdx.x; compact_index = __mul24(compact_index, blockDim.x) + threadIdx.x; if (compact_index >= compact_size) return; _VoxelInfo vx_info(d_compact_voxel_info[compact_index]); uint3 index3D = index1D_to_3D(vx_info.index1D(), num_voxels_dim); unsigned vx_triangle_index = d_triangles_scan[compact_index]; for (uint8_t edge_iter = 0; edge_iter < VOXEL_NUM_LOCAL_EDGES; ++edge_iter) { // voxel_edge_index_type edge = voxel_local_edges[edge_iter]; voxel_edge_index_type edge = tex1Dfetch(voxel_local_edges_tex, edge_iter); if ((!vx_info.is_edge_bipolar(edge)) || circular_edge_exceed_boundary(edge, index3D, num_voxels_dim)) { continue; } vertex_index_type iso_vertex_indices[4] = {INVALID_UINT8, INVALID_UINT8, INVALID_UINT8, INVALID_UINT8}; get_circular_vertices_by_edge(iso_vertex_indices, edge, index3D, vx_info, d_compact_voxel_info, d_full_voxel_index_map, num_voxels_dim); uint3 triangle = make_uint3(iso_vertex_indices[0], iso_vertex_indices[1], iso_vertex_indices[2]); d_triangles[vx_triangle_index] = triangle; ++vx_triangle_index; triangle = make_uint3(iso_vertex_indices[2], iso_vertex_indices[3], iso_vertex_indices[0]); d_triangles[vx_triangle_index] = triangle; ++vx_triangle_index; } } inline void get_num_voxels_dim_from_scalar_grid(uint3& num_voxels_dim, const scalar_grid_type& h_scalar_grid) { num_voxels_dim.x = h_scalar_grid.dim_x() - 1; num_voxels_dim.y = h_scalar_grid.dim_y() - 1; num_voxels_dim.z = h_scalar_grid.dim_z() - 1; } class _VoxelInfoToNumVerticesUniOp { public: typedef _VoxelInfo argument_type; typedef unsigned result_type; __device__ result_type operator()(const argument_type& vx_info) const { return (unsigned)(vx_info.num_vertices()); } }; void run_dmc(std::vector<float3>& vertices, std::vector<uint3>& triangles, const scalar_grid_type& h_scalar_grid, const float3& xyz_min, const float3& xyz_max, float iso_value, unsigned num_smooth) { uint3 num_voxels_dim; get_num_voxels_dim_from_scalar_grid(num_voxels_dim, h_scalar_grid); const size_t num_total_voxels = num_voxels_dim.x * num_voxels_dim.y * num_voxels_dim.z; float* d_scalar_grid; checkCudaErrors(cudaMalloc(&d_scalar_grid, sizeof(float) * h_scalar_grid.size())); checkCudaErrors(cudaMemcpy(d_scalar_grid, h_scalar_grid.data(), sizeof(float) * h_scalar_grid.size(), cudaMemcpyHostToDevice)); flag_type* d_voxel_flags; checkCudaErrors(cudaMalloc(&d_voxel_flags, sizeof(flag_type) * num_total_voxels)); checkCudaErrors(cudaMemset(d_voxel_flags, 0, sizeof(unsigned) * num_total_voxels)); dim3 threads_dim3(16, 16, 1); dim3 blocks_dim3((num_voxels_dim.x + threads_dim3.x - 1) / threads_dim3.x, (num_voxels_dim.y + threads_dim3.y - 1) / threads_dim3.y, (num_voxels_dim.z + threads_dim3.z - 1) / threads_dim3.z); launch_flag_active_voxels(d_voxel_flags, d_scalar_grid, num_voxels_dim, iso_value, blocks_dim3, threads_dim3); // print_d_arr(d_voxel_flags, num_total_voxels, "voxel flag: "); size_t num_compact_voxels = launch_thrust_count(d_voxel_flags, num_total_voxels); unsigned* d_voxel_flags_scan; checkCudaErrors(cudaMalloc(&d_voxel_flags_scan, sizeof(unsigned) * num_total_voxels)); checkCudaErrors(cudaMemset(d_voxel_flags_scan, 0, sizeof(unsigned) * num_total_voxels)); launch_thrust_scan(d_voxel_flags_scan, d_voxel_flags, num_total_voxels); // print_d_arr(d_voxel_flags_scan, num_total_voxels, "flags scan: "); // thrust::device_vector<_VoxelInfo> d_compact_voxel_info_vec(num_compact_voxels); _VoxelInfo* d_compact_voxel_info; // = thrust::raw_pointer_cast(d_compact_voxel_info_vec.data()); checkCudaErrors(cudaMalloc(&d_compact_voxel_info, sizeof(_VoxelInfo) * num_compact_voxels)); checkCudaErrors(cudaMemset(d_compact_voxel_info, 0xff, sizeof(_VoxelInfo) * num_compact_voxels)); voxel_index1D_type* d_full_voxel_index_map; checkCudaErrors(cudaMalloc(&d_full_voxel_index_map, sizeof(voxel_index1D_type) * num_total_voxels)); checkCudaErrors(cudaMemset(d_full_voxel_index_map, 0xff, sizeof(voxel_index1D_type) * num_total_voxels)); launch_compact_voxel_flags(d_compact_voxel_info, d_full_voxel_index_map, num_voxels_dim, d_voxel_flags, d_voxel_flags_scan, num_total_voxels, blocks_dim3, threads_dim3); // print_d_arr(d_full_voxel_index_map, num_total_voxels, "full voxel map: "); threads_dim3 = dim3(128, 1, 1); blocks_dim3 = dim3((num_compact_voxels + 127) / 128, 1, 1); while (blocks_dim3.x > 32768) { blocks_dim3.x /= 2; blocks_dim3.y *= 2; } checkCudaErrors(cudaFree(d_voxel_flags)); checkCudaErrors(cudaFree(d_voxel_flags_scan)); launch_init_voxels_info(d_compact_voxel_info, num_compact_voxels, d_scalar_grid, num_voxels_dim, iso_value, blocks_dim3, threads_dim3); launch_correct_voxels_info(d_compact_voxel_info, num_compact_voxels, d_full_voxel_index_map, num_voxels_dim, blocks_dim3, threads_dim3); launch_calc_num_vertices_per_voxel(d_compact_voxel_info, num_compact_voxels, blocks_dim3, threads_dim3); // print_d_arr(d_compact_voxel_info, num_compact_voxels, "vx_info: "); unsigned num_vertices = thrust::transform_reduce(thrust::device, d_compact_voxel_info, d_compact_voxel_info + num_compact_voxels, _VoxelInfoToNumVerticesUniOp(), 0, thrust::plus<unsigned>()); unsigned* d_vertices_begin_scan; checkCudaErrors(cudaMalloc(&d_vertices_begin_scan, sizeof(unsigned) * num_compact_voxels)); checkCudaErrors(cudaMemset(d_vertices_begin_scan, 0x00, sizeof(unsigned) * num_compact_voxels)); launch_thrust_transform_scan(d_vertices_begin_scan, d_compact_voxel_info, num_compact_voxels, _VoxelInfoToNumVerticesUniOp()); launch_set_vertices_begin(d_compact_voxel_info, d_vertices_begin_scan, num_compact_voxels, blocks_dim3, threads_dim3); // print_d_arr(d_vertices_begin_scan, num_compact_voxels, "vertices begin: "); checkCudaErrors(cudaFree(d_vertices_begin_scan)); // print_d_arr(d_compact_voxel_info, num_compact_voxels, "vx_info: "); float3* d_vertices; checkCudaErrors(cudaMalloc(&d_vertices, sizeof(float3) * num_vertices)); float3 xyz_range = xyz_max - xyz_min; sample_edge_intersection_vertices_kern<<<blocks_dim3, threads_dim3>>>(d_vertices, d_compact_voxel_info, num_compact_voxels, d_scalar_grid, num_voxels_dim, xyz_min, xyz_range, iso_value); checkCudaErrors(cudaFree(d_scalar_grid)); calc_iso_vertices_kern<<<blocks_dim3, threads_dim3, threads_dim3.x * sizeof(_VoxelInfo)>>>(d_vertices, d_compact_voxel_info, num_compact_voxels, d_full_voxel_index_map, num_voxels_dim); for (unsigned smooth_iter = 0; smooth_iter < num_smooth; ++ smooth_iter) { smooth_edge_vertices<<<blocks_dim3, threads_dim3, threads_dim3.x * sizeof(float2) * 4>>>(d_vertices, d_compact_voxel_info, num_compact_voxels, d_full_voxel_index_map, xyz_min, xyz_range, num_voxels_dim); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); calc_iso_vertices_kern<<<blocks_dim3, threads_dim3, threads_dim3.x * sizeof(_VoxelInfo)>>>(d_vertices, d_compact_voxel_info, num_compact_voxels, d_full_voxel_index_map, num_voxels_dim); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); std::cout << "done for smooth iteration: " << smooth_iter << std::endl; } // print_d_arr(d_vertices, num_vertices, "all vertices: "); unsigned* d_num_triangles; checkCudaErrors(cudaMalloc(&d_num_triangles, sizeof(unsigned) * num_compact_voxels)); checkCudaErrors(cudaMemset(d_num_triangles, 0, sizeof(unsigned) * num_compact_voxels)); calc_num_triangles_per_voxel_kern<<<blocks_dim3, threads_dim3>>>(d_num_triangles, d_compact_voxel_info, num_compact_voxels, num_voxels_dim); size_t num_triangles = launch_thrust_count(d_num_triangles, num_compact_voxels); unsigned* d_triangles_scan; checkCudaErrors(cudaMalloc(&d_triangles_scan, sizeof(unsigned) * num_compact_voxels)); checkCudaErrors(cudaMemset(d_triangles_scan, 0, sizeof(unsigned) * num_compact_voxels)); launch_thrust_scan(d_triangles_scan, d_num_triangles, num_compact_voxels); checkCudaErrors(cudaFree(d_num_triangles)); uint3* d_triangles; checkCudaErrors(cudaMalloc(&d_triangles, sizeof(uint3) * num_triangles)); checkCudaErrors(cudaMemset(d_triangles, 0xff, sizeof(uint3) * num_triangles)); generate_triangles_kern<<<blocks_dim3, threads_dim3>>>(d_triangles, d_triangles_scan, d_compact_voxel_info, num_compact_voxels, d_full_voxel_index_map, num_voxels_dim); // print_d_arr(d_triangles, num_triangles, "all triangles: "); checkCudaErrors(cudaFree(d_compact_voxel_info)); checkCudaErrors(cudaFree(d_full_voxel_index_map)); checkCudaErrors(cudaFree(d_triangles_scan)); vertices.clear(); triangles.clear(); vertices.resize(num_vertices); checkCudaErrors(cudaMemcpy(vertices.data(), d_vertices, sizeof(float3) * num_vertices, cudaMemcpyDeviceToHost)); checkCudaErrors(cudaFree(d_vertices)); triangles.resize(num_triangles); checkCudaErrors(cudaMemcpy(triangles.data(), d_triangles, sizeof(uint3) * num_triangles, cudaMemcpyDeviceToHost)); checkCudaErrors(cudaFree(d_triangles)); std::cout << "Dual Marching Cubes done!" << std::endl; } }; // namespace dmc /* class Isosurface { public: virtual ~Isosurface() = default; virtual float value(float x, float y, float z) const = 0; }; class SphereSurface : public Isosurface { public: float value(float x, float y, float z) const override { return sqrtf(x * x + y * y + z * z); } }; class GyroidSurface : public Isosurface { public: float value(float x, float y, float z) const override { return 2.0 * (cosf(x) * sinf(y) + cosf(y) * sinf(z) + cosf(z) * sinf(x)); } }; void dump_obj(const char* filename, const std::vector<float3>& compact_vertices, const std::vector<uint3>& compact_triangles) { std::ofstream of(filename); for (const auto& v : compact_vertices) of << "v " << v.x << " " << v.y << " " << v.z << std::endl; for (const auto& t: compact_triangles) of << "f " << t.x + 1 << " " << t.y + 1 << " " << t.z + 1 << std::endl; } void test_dmc() { using namespace utils; using namespace dmc; iso_vertex_m_type* d_config_edge_lut1, * d_config_edge_lut2; uint8_t* d_num_vertex_lut1, * d_num_vertex_lut2; voxel_config_type* d_config_2B_3B_lut; voxel_face_index_type* d_config_2B_3B_ambiguous_face; voxel_face_index_type* d_opposite_face_lut; check_dir_type* d_face_to_check_dir_lut; uint8_t* d_edge_belonged_voxel_lut; voxel_edge_index_type* d_circular_edge_lut; voxel_edge_index_type* d_voxel_local_edges; setup_device_luts(&d_config_edge_lut1, &d_config_edge_lut2, &d_num_vertex_lut1, &d_num_vertex_lut2, &d_config_2B_3B_lut, &d_config_2B_3B_ambiguous_face, &d_opposite_face_lut, &d_face_to_check_dir_lut, &d_edge_belonged_voxel_lut, &d_circular_edge_lut, &d_voxel_local_edges); SphereSurface surface; // GyroidSurface surface; float3 xyz_min = make_float3(-5, -5, -5); float3 xyz_max = make_float3(5, 5, 5); float3 xyz_range = xyz_max - xyz_min; float iso_value = 4.1f; unsigned resolution = 20; Array3D<float> scalar_grid(resolution + 1, resolution + 1, resolution + 1); for (unsigned k = 0; k < scalar_grid.dim_z(); ++k) { float z = ijk_to_xyz(k, resolution, xyz_range.z, xyz_min.z); for (unsigned j = 0; j < scalar_grid.dim_y(); ++j) { float y = ijk_to_xyz(j, resolution, xyz_range.y, xyz_min.y); for (unsigned i = 0; i < scalar_grid.dim_x(); ++i) { float x = ijk_to_xyz(i, resolution, xyz_range.x, xyz_min.x); scalar_grid(i, j, k) = surface.value(x, y, z); } } } std::vector<float3> compact_vertices; std::vector<uint3> compact_triangles; dmc::run_dmc(compact_vertices, compact_triangles, scalar_grid, xyz_min, xyz_max, iso_value, 15); dump_obj("sphere.obj", compact_vertices, compact_triangles); for (const auto& vertex : compact_vertices) { std::cout << "v " << vertex.x << " " << vertex.y << " " << vertex.z << std::endl; } for (const auto& tri : compact_triangles) { std::cout << "f " << tri.x+1 << " " << tri.y+1 << " " << tri.z+1 << std::endl; } } int main() { test_dmc(); return 0; } */
79c252a85e4e57f9e8ba3b641d8c0021b17e5196.hip
// !!! This is a file automatically generated by hipify!!! #include <THH/THHTensorMathCompareT.cuh> #include <THH/THHTensor.hpp> #include <THH/generic/THHTensorMathCompareT.hip> #include <THH/THHGenerateLongType.h>
79c252a85e4e57f9e8ba3b641d8c0021b17e5196.cu
#include <THC/THCTensorMathCompareT.cuh> #include <THC/THCTensor.hpp> #include <THC/generic/THCTensorMathCompareT.cu> #include <THC/THCGenerateLongType.h>
c2e4c23b2cb7dee78ddff70b7983066454f5179d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <math.h> #include <vector> #include <iomanip> #include <sstream> #include <string> #include <fstream> #include <thread> #include <ctime> #include <stdio.h> #define BLOCK_SIZE (128) #define WORK_SIZE_BITS 16 #define SEEDS_PER_CALL ((1ULL << (WORK_SIZE_BITS)) * (BLOCK_SIZE)) #define GPU_ASSERT(code) gpuAssert((code), __FILE__, __LINE__) inline void gpuAssert(hipError_t code, const char *file, int line) { if (code != hipSuccess) { fprintf(stderr, "GPUassert: %s (code %d) %s %d\n", hipGetErrorString(code), code, file, line); exit(code); } } __device__ uint64_t hardcoded = 8682522807148012UL * 181783497276652981UL; __device__ int binarySearch(int64_t* values, int64_t value, int start, int end){ int low = 0; int high = end - 1; int mid = 0; if(high < value || low > value) return -1; while(low <= high){ mid = low + ((high - low)/2); if(values[mid] > value) high = mid - 1; else if((values[mid] < value)) low = mid - 1; else return mid; } return -1; } /*__global__ __launch_bounds__(BLOCK_SIZE,2) static void threadWork(int64_t* values, int size, uint64_t offset, uint32_t* counter, uint64_t* buffer){ int64_t Time = (blockIdx.x * blockDim.x + threadIdx.x) + offset; int64_t scrambledTime = hardcoded ^ Time; if(binarySearch(values, scrambledTime, 0, size) != -1){ buffer[atomicAdd(counter, 1)] = Time; return; } }*/ __global__ __launch_bounds__(BLOCK_SIZE,2) static void threadWork(int64_t* values, int size, uint64_t offset, uint32_t* counter, uint64_t* buffer){ int64_t Time = (blockIdx.x * blockDim.x + threadIdx.x) + offset; int64_t scrambledTime = hardcoded ^ Time; if(binarySearch(values, scrambledTime, 0, size) != -1){ buffer[atomicAdd(counter, 1)] = Time; return; } } uint64_t* buffer; uint32_t* counter; std::vector<int64_t> structureSeeds; int64_t* structSeedsArr; int main(int argc, char **argv ){ time_t start = time(NULL); FILE* fp = fopen("seananners-middlestep.txt", "w+"); std::fstream infile; infile.open("seananners.txt", std::ios::in); std::string line; while(std::getline(infile, line)){ int64_t structureSeed = 0; std::istringstream iss(line); if(!(iss >> structureSeed)){break;} structureSeeds.push_back(structureSeed); } infile.close(); double seconds_per_structure_seed = 0.0; std::vector<std::thread> threads; int thread = 0; int curr = 0; uint64_t startValue = 0; uint64_t total = 281474976710656; int tmpCount = 0; int tmpSize = structureSeeds.size(); GPU_ASSERT(hipMallocManaged(&buffer, sizeof(uint64_t) * SEEDS_PER_CALL)); GPU_ASSERT(hipPeekAtLastError()); GPU_ASSERT(hipMallocManaged(&counter, sizeof(uint32_t))); GPU_ASSERT(hipPeekAtLastError()); GPU_ASSERT(hipMallocManaged(&structSeedsArr, sizeof(uint64_t) * tmpSize)); GPU_ASSERT(hipPeekAtLastError()); printf("test1\n"); for(int i = 0; i <= structureSeeds.size(); i++){ structSeedsArr[i] = structureSeeds[i]; } printf("test2\n"); printf("test3\n"); hipSetDevice(0); GPU_ASSERT(hipPeekAtLastError()); GPU_ASSERT(hipDeviceSynchronize()); uint64_t countOut = 0; uint64_t tempCount; printf("test4\n"); for(uint64_t offset = startValue; offset <= total; offset += SEEDS_PER_CALL){ hipLaunchKernelGGL(( threadWork), dim3(1ULL<<WORK_SIZE_BITS),dim3(BLOCK_SIZE), 0, 0, 0, 0, structSeedsArr, tmpSize, offset, counter, buffer); GPU_ASSERT(hipPeekAtLastError()); GPU_ASSERT(hipDeviceSynchronize()); for(int i = 0; i < *counter; i++){ uint64_t seed = buffer[i]; if(seed != 0) fprintf(fp, "%lld\n", seed); } *counter = 0; if(countOut >= 100000000000){ time_t tempTime = time(NULL); uint64_t tempDiff = tempTime - start; uint64_t sps = (uint64_t)(offset - startValue)/tempDiff; double percent = ((double)offset/(double)total) * 100.0; printf("Seeds Per Second: %lld\tProgress: %f\n", sps, percent); countOut = 0; } countOut += SEEDS_PER_CALL; } time_t end = time(NULL); uint64_t diff = end - start; double seedsPerSec = (double)total/(double)diff; printf("Time taken: %lld\nSeeds per second: %15.9f", diff, seedsPerSec); fclose(fp); return 0; }
c2e4c23b2cb7dee78ddff70b7983066454f5179d.cu
#include <iostream> #include <math.h> #include <vector> #include <iomanip> #include <sstream> #include <string> #include <fstream> #include <thread> #include <ctime> #include <stdio.h> #define BLOCK_SIZE (128) #define WORK_SIZE_BITS 16 #define SEEDS_PER_CALL ((1ULL << (WORK_SIZE_BITS)) * (BLOCK_SIZE)) #define GPU_ASSERT(code) gpuAssert((code), __FILE__, __LINE__) inline void gpuAssert(cudaError_t code, const char *file, int line) { if (code != cudaSuccess) { fprintf(stderr, "GPUassert: %s (code %d) %s %d\n", cudaGetErrorString(code), code, file, line); exit(code); } } __device__ uint64_t hardcoded = 8682522807148012UL * 181783497276652981UL; __device__ int binarySearch(int64_t* values, int64_t value, int start, int end){ int low = 0; int high = end - 1; int mid = 0; if(high < value || low > value) return -1; while(low <= high){ mid = low + ((high - low)/2); if(values[mid] > value) high = mid - 1; else if((values[mid] < value)) low = mid - 1; else return mid; } return -1; } /*__global__ __launch_bounds__(BLOCK_SIZE,2) static void threadWork(int64_t* values, int size, uint64_t offset, uint32_t* counter, uint64_t* buffer){ int64_t Time = (blockIdx.x * blockDim.x + threadIdx.x) + offset; int64_t scrambledTime = hardcoded ^ Time; if(binarySearch(values, scrambledTime, 0, size) != -1){ buffer[atomicAdd(counter, 1)] = Time; return; } }*/ __global__ __launch_bounds__(BLOCK_SIZE,2) static void threadWork(int64_t* values, int size, uint64_t offset, uint32_t* counter, uint64_t* buffer){ int64_t Time = (blockIdx.x * blockDim.x + threadIdx.x) + offset; int64_t scrambledTime = hardcoded ^ Time; if(binarySearch(values, scrambledTime, 0, size) != -1){ buffer[atomicAdd(counter, 1)] = Time; return; } } uint64_t* buffer; uint32_t* counter; std::vector<int64_t> structureSeeds; int64_t* structSeedsArr; int main(int argc, char **argv ){ time_t start = time(NULL); FILE* fp = fopen("seananners-middlestep.txt", "w+"); std::fstream infile; infile.open("seananners.txt", std::ios::in); std::string line; while(std::getline(infile, line)){ int64_t structureSeed = 0; std::istringstream iss(line); if(!(iss >> structureSeed)){break;} structureSeeds.push_back(structureSeed); } infile.close(); double seconds_per_structure_seed = 0.0; std::vector<std::thread> threads; int thread = 0; int curr = 0; uint64_t startValue = 0; uint64_t total = 281474976710656; int tmpCount = 0; int tmpSize = structureSeeds.size(); GPU_ASSERT(cudaMallocManaged(&buffer, sizeof(uint64_t) * SEEDS_PER_CALL)); GPU_ASSERT(cudaPeekAtLastError()); GPU_ASSERT(cudaMallocManaged(&counter, sizeof(uint32_t))); GPU_ASSERT(cudaPeekAtLastError()); GPU_ASSERT(cudaMallocManaged(&structSeedsArr, sizeof(uint64_t) * tmpSize)); GPU_ASSERT(cudaPeekAtLastError()); printf("test1\n"); for(int i = 0; i <= structureSeeds.size(); i++){ structSeedsArr[i] = structureSeeds[i]; } printf("test2\n"); printf("test3\n"); cudaSetDevice(0); GPU_ASSERT(cudaPeekAtLastError()); GPU_ASSERT(cudaDeviceSynchronize()); uint64_t countOut = 0; uint64_t tempCount; printf("test4\n"); for(uint64_t offset = startValue; offset <= total; offset += SEEDS_PER_CALL){ threadWork<<<1ULL<<WORK_SIZE_BITS,BLOCK_SIZE>>>(structSeedsArr, tmpSize, offset, counter, buffer); GPU_ASSERT(cudaPeekAtLastError()); GPU_ASSERT(cudaDeviceSynchronize()); for(int i = 0; i < *counter; i++){ uint64_t seed = buffer[i]; if(seed != 0) fprintf(fp, "%lld\n", seed); } *counter = 0; if(countOut >= 100000000000){ time_t tempTime = time(NULL); uint64_t tempDiff = tempTime - start; uint64_t sps = (uint64_t)(offset - startValue)/tempDiff; double percent = ((double)offset/(double)total) * 100.0; printf("Seeds Per Second: %lld\tProgress: %f\n", sps, percent); countOut = 0; } countOut += SEEDS_PER_CALL; } time_t end = time(NULL); uint64_t diff = end - start; double seedsPerSec = (double)total/(double)diff; printf("Time taken: %lld\nSeeds per second: %15.9f", diff, seedsPerSec); fclose(fp); return 0; }
f55712fe55ca8f6281746ff64e496e0692e5b21a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /****************************************************************************** *cr *cr (C) Copyright 2010 The Board of Trustees of the *cr University of Illinois *cr All Rights Reserved *cr ******************************************************************************/ // Define your kernels in this file you may use more than one kernel if you // need to // INSERT KERNEL(S) HERE __global__ void histo_kernel(unsigned int *buffer, long size, unsigned int *histo, unsigned int num_bins) { extern __shared__ unsigned int histo_private[]; unsigned int i = threadIdx.x + blockIdx.x * blockDim.x; // stride is total number of threads unsigned int stride = blockDim.x * gridDim.x; // All threads handle blockDim.x * gridDim.x // consecutive elements //inititialize private histogram for (int j = 0; j < (num_bins-1)/blockDim.x+1; ++j) if (blockDim.x*j+threadIdx.x<num_bins) histo_private[blockDim.x*j+threadIdx.x]=0; __syncthreads(); //populate private histogram while (i < size) { atomicAdd(&(histo_private[buffer[i]]), 1); i += stride; } __syncthreads(); //Transfer data from shared memories to global memory for (int k = 0; k < (num_bins-1)/blockDim.x+1; ++k) if (blockDim.x*k+threadIdx.x<num_bins) atomicAdd(&(histo[blockDim.x*k+threadIdx.x]), histo_private[blockDim.x*k+threadIdx.x]); } /****************************************************************************** Setup and invoke your kernel(s) in this function. You may also allocate more GPU memory if you need to *******************************************************************************/ void histogram(unsigned int* input, unsigned int* bins, unsigned int num_elements, unsigned int num_bins) { // INSERT CODE HERE const int BLOCK_SIZE = 512; hipLaunchKernelGGL(( histo_kernel), dim3((num_elements-1)/BLOCK_SIZE+1),dim3(BLOCK_SIZE),num_bins*sizeof(unsigned int), 0, input,num_elements,bins,num_bins); }
f55712fe55ca8f6281746ff64e496e0692e5b21a.cu
/****************************************************************************** *cr *cr (C) Copyright 2010 The Board of Trustees of the *cr University of Illinois *cr All Rights Reserved *cr ******************************************************************************/ // Define your kernels in this file you may use more than one kernel if you // need to // INSERT KERNEL(S) HERE __global__ void histo_kernel(unsigned int *buffer, long size, unsigned int *histo, unsigned int num_bins) { extern __shared__ unsigned int histo_private[]; unsigned int i = threadIdx.x + blockIdx.x * blockDim.x; // stride is total number of threads unsigned int stride = blockDim.x * gridDim.x; // All threads handle blockDim.x * gridDim.x // consecutive elements //inititialize private histogram for (int j = 0; j < (num_bins-1)/blockDim.x+1; ++j) if (blockDim.x*j+threadIdx.x<num_bins) histo_private[blockDim.x*j+threadIdx.x]=0; __syncthreads(); //populate private histogram while (i < size) { atomicAdd(&(histo_private[buffer[i]]), 1); i += stride; } __syncthreads(); //Transfer data from shared memories to global memory for (int k = 0; k < (num_bins-1)/blockDim.x+1; ++k) if (blockDim.x*k+threadIdx.x<num_bins) atomicAdd(&(histo[blockDim.x*k+threadIdx.x]), histo_private[blockDim.x*k+threadIdx.x]); } /****************************************************************************** Setup and invoke your kernel(s) in this function. You may also allocate more GPU memory if you need to *******************************************************************************/ void histogram(unsigned int* input, unsigned int* bins, unsigned int num_elements, unsigned int num_bins) { // INSERT CODE HERE const int BLOCK_SIZE = 512; histo_kernel<<<(num_elements-1)/BLOCK_SIZE+1,BLOCK_SIZE,num_bins*sizeof(unsigned int)>>>(input,num_elements,bins,num_bins); }
minusKernel.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void minusKernel(float* A, int size){ int id = blockIdx.x * blockDim.x + threadIdx.x; if(id < size){ A[id] = 1 - A[id]; } }
minusKernel.cu
#include "includes.h" __global__ void minusKernel(float* A, int size){ int id = blockIdx.x * blockDim.x + threadIdx.x; if(id < size){ A[id] = 1 - A[id]; } }
35ca7a6d6c81d643a176111f022d91e3edbe6388.hip
// !!! This is a file automatically generated by hipify!!! #include <THH/THHTensorRandom.h> #include <THH/THHDeviceUtils.cuh> #include <THH/THHGeneral.h> #include <THH/THHTensorCopy.h> #include <THH/THHTensorMath.h> #include <THH/THHReduceApplyUtils.cuh> #include <THH/THHTensorRandom.cuh> #include <ATen/Config.h> #include <thrust/functional.h> #define MAX_NUM_BLOCKS 64 #define BLOCK_SIZE 256 // NB: ROCm compiler seems to have a bug where __host__ functions must be // explicitly specified extern "C" otherwise ROCm compiler doesn't respect it. // See https://github.com/RadeonOpenCompute/hcc/issues/839 extern "C" __host__ void THCRandom_getRNGState(at::Generator *gen_, THByteTensor *rng_state) { auto gen = at::check_generator<at::CUDAGenerator>(gen_); std::lock_guard<std::mutex> lock(gen->mutex_); // The RNG state comprises the seed, and an offset used for Philox. // The following line is just here for BC reason. sizeof hiprandStateMtgp32_t is 4120. // It used to be static const size_t states_size = MAX_NUM_BLOCKS * sizeof(hiprandStateMtgp32_t); // MAX_NUM_BLOCKS was 200 and sizeof(hiprandStateMtgp32_t) is 4120. Hardcoding these numbers here // because this is just host side code and we don't want to worry about linking with cuda static const size_t states_size = 200 * sizeof(4120); static const size_t seed_size = sizeof(uint64_t); static const size_t offset_size = sizeof(int64_t); static const size_t total_size = states_size + seed_size + offset_size; THByteTensor_resize1d(rng_state, total_size); THArgCheck(THByteTensor_nElement(rng_state) == total_size, 1, "RNG state is wrong size"); THArgCheck(THByteTensor_isContiguous(rng_state), 1, "RNG state must be contiguous"); // since curandStateMTGP is not used anymore, fill gen_states of THCGenerator with deterministic garbage value of -1 // gen_states in THCGenerator struct was an array of curandStateMtgp32s. memset(THByteTensor_data(rng_state), -1, states_size); auto current_seed = gen->current_seed(); auto offset = static_cast<int64_t>(gen->philox_offset_per_thread()); // Note that old THCGeneratorState had offset as std::atomic<int64_t> memcpy(THByteTensor_data(rng_state) + states_size, &current_seed, seed_size); memcpy(THByteTensor_data(rng_state) + states_size + seed_size, &offset, offset_size); } extern "C" __host__ void THCRandom_setRNGState(at::Generator *gen_, THByteTensor *rng_state) { auto gen = at::check_generator<at::CUDAGenerator>(gen_); std::lock_guard<std::mutex> lock(gen->mutex_); static const size_t states_size = 200 * sizeof(4120); // this line is just here for BC reason static const size_t seed_size = sizeof(uint64_t); static const size_t offset_size = sizeof(int64_t); static const size_t total_size = states_size + seed_size + offset_size; bool no_philox_seed = false; if (THByteTensor_nElement(rng_state) == total_size - offset_size) { no_philox_seed = true; } else { THArgCheck(THByteTensor_nElement(rng_state) == total_size, 1, "RNG state is wrong size"); } THArgCheck(THByteTensor_isContiguous(rng_state), 1, "RNG state must be contiguous"); uint64_t input_seed; memcpy(&input_seed, THByteTensor_data(rng_state) + states_size, seed_size); gen->set_current_seed(input_seed); int64_t philox_offset = 0; if (!no_philox_seed) { memcpy(&philox_offset, THByteTensor_data(rng_state) + states_size + seed_size, offset_size); } gen->set_philox_offset_per_thread(static_cast<uint64_t>(philox_offset)); } #include <THH/generic/THHTensorRandom.hip> #include <THH/THHGenerateAllTypes.h> #include <THH/generic/THHTensorRandom.hip> #include <THH/THHGenerateBoolType.h>
35ca7a6d6c81d643a176111f022d91e3edbe6388.cu
#include <THC/THCTensorRandom.h> #include <THC/THCDeviceUtils.cuh> #include <THC/THCGeneral.h> #include <THC/THCTensorCopy.h> #include <THC/THCTensorMath.h> #include <THC/THCReduceApplyUtils.cuh> #include <THC/THCTensorRandom.cuh> #include <ATen/Config.h> #include <thrust/functional.h> #define MAX_NUM_BLOCKS 200 #define BLOCK_SIZE 256 // NB: ROCm compiler seems to have a bug where __host__ functions must be // explicitly specified extern "C" otherwise ROCm compiler doesn't respect it. // See https://github.com/RadeonOpenCompute/hcc/issues/839 extern "C" __host__ void THCRandom_getRNGState(at::Generator *gen_, THByteTensor *rng_state) { auto gen = at::check_generator<at::CUDAGenerator>(gen_); std::lock_guard<std::mutex> lock(gen->mutex_); // The RNG state comprises the seed, and an offset used for Philox. // The following line is just here for BC reason. sizeof curandStateMtgp32 is 4120. // It used to be static const size_t states_size = MAX_NUM_BLOCKS * sizeof(curandStateMtgp32); // MAX_NUM_BLOCKS was 200 and sizeof(curandStateMtgp32) is 4120. Hardcoding these numbers here // because this is just host side code and we don't want to worry about linking with cuda static const size_t states_size = 200 * sizeof(4120); static const size_t seed_size = sizeof(uint64_t); static const size_t offset_size = sizeof(int64_t); static const size_t total_size = states_size + seed_size + offset_size; THByteTensor_resize1d(rng_state, total_size); THArgCheck(THByteTensor_nElement(rng_state) == total_size, 1, "RNG state is wrong size"); THArgCheck(THByteTensor_isContiguous(rng_state), 1, "RNG state must be contiguous"); // since curandStateMTGP is not used anymore, fill gen_states of THCGenerator with deterministic garbage value of -1 // gen_states in THCGenerator struct was an array of curandStateMtgp32s. memset(THByteTensor_data(rng_state), -1, states_size); auto current_seed = gen->current_seed(); auto offset = static_cast<int64_t>(gen->philox_offset_per_thread()); // Note that old THCGeneratorState had offset as std::atomic<int64_t> memcpy(THByteTensor_data(rng_state) + states_size, &current_seed, seed_size); memcpy(THByteTensor_data(rng_state) + states_size + seed_size, &offset, offset_size); } extern "C" __host__ void THCRandom_setRNGState(at::Generator *gen_, THByteTensor *rng_state) { auto gen = at::check_generator<at::CUDAGenerator>(gen_); std::lock_guard<std::mutex> lock(gen->mutex_); static const size_t states_size = 200 * sizeof(4120); // this line is just here for BC reason static const size_t seed_size = sizeof(uint64_t); static const size_t offset_size = sizeof(int64_t); static const size_t total_size = states_size + seed_size + offset_size; bool no_philox_seed = false; if (THByteTensor_nElement(rng_state) == total_size - offset_size) { no_philox_seed = true; } else { THArgCheck(THByteTensor_nElement(rng_state) == total_size, 1, "RNG state is wrong size"); } THArgCheck(THByteTensor_isContiguous(rng_state), 1, "RNG state must be contiguous"); uint64_t input_seed; memcpy(&input_seed, THByteTensor_data(rng_state) + states_size, seed_size); gen->set_current_seed(input_seed); int64_t philox_offset = 0; if (!no_philox_seed) { memcpy(&philox_offset, THByteTensor_data(rng_state) + states_size + seed_size, offset_size); } gen->set_philox_offset_per_thread(static_cast<uint64_t>(philox_offset)); } #include <THC/generic/THCTensorRandom.cu> #include <THC/THCGenerateAllTypes.h> #include <THC/generic/THCTensorRandom.cu> #include <THC/THCGenerateBoolType.h>
6e6f0eeb63f2e694226cf1652c30cb33aed27377.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <hip/hip_runtime.h> #include <hip/hip_vector_types.h> #include "device_launch_parameters.h" #include "cutil_math.h" #define M_PI 3.14159265359f #define width 512 #define height 384 #define samples 4096 struct Ray { float3 origin; float3 direction; __device__ Ray(float3 o, float3 d) : origin(o), direction(d) {} }; enum Material { Diffuse, Specular, Refraction }; struct Sphere { float radius; float3 position, emission, color; Material material; __device__ float intersect_sphere(const Ray &r) const { float3 op = r.origin - position; float t, epsilon = 0.0001f; float b = dot(op, r.direction); float discriminant = b * b - dot(op, op) + radius * radius; if (discriminant < 0) return 0; else discriminant = sqrtf(discriminant); t = -b - discriminant; if (t > epsilon) return t; else { t = -b + discriminant; if (t > epsilon) return t; else return 0; } } }; // Scene __constant__ Sphere spheres[] = { { 1e5f,{ 1e5f + 1.0f, 40.8f, 81.6f },{ 0.0f, 0.0f, 0.0f },{ 0.75f, 0.25f, 0.25f }, Diffuse }, //Left { 1e5f,{ -1e5f + 99.0f, 40.8f, 81.6f },{ 0.0f, 0.0f, 0.0f },{ .25f, .25f, .75f }, Diffuse }, //Rght { 1e5f,{ 50.0f, 40.8f, 1e5f },{ 0.0f, 0.0f, 0.0f },{ .75f, .75f, .75f }, Diffuse }, //Back { 1e5f,{ 50.0f, 40.8f, -1e5f + 600.0f },{ 0.0f, 0.0f, 0.0f },{ 1.00f, 1.00f, 1.00f }, Diffuse }, //Frnt { 1e5f,{ 50.0f, 1e5f, 81.6f },{ 0.0f, 0.0f, 0.0f },{ .75f, .75f, .75f }, Diffuse }, //Botm { 1e5f,{ 50.0f, -1e5f + 81.6f, 81.6f },{ 0.0f, 0.0f, 0.0f },{ .75f, .75f, .75f }, Diffuse }, //Top { 16.5f,{ 27.0f, 16.5f, 47.0f },{ 0.0f, 0.0f, 0.0f },{ 1.0f, 1.0f, 1.0f }, Specular}, // small sphere 1 { 16.5f,{ 73.0f, 16.5f, 78.0f },{ 0.0f, 0.0f, 0.0f },{ 1.0f, 1.0f, 1.0f }, Refraction }, // small sphere 2 //{ 16.5f,{ 50.0f, 16.5f, 90.0f}, { 0.0f, 0.0f, 0.0f },{ 1.0f, 1.0f, 1.0f }, Refraction }, //small sphere 3 { 600.0f,{ 50.0f, 681.6f - .77f, 81.6f },{ 2.0f, 1.8f, 1.6f },{ 0.0f, 0.0f, 0.0f }, Diffuse } // Light }; __device__ inline bool intersect_scene(const Ray &r, float &t, int &id) { float n = sizeof(spheres) / sizeof(Sphere), d, inf = t = 1e20; for (int i = int(n); i--;) if ((d = spheres[i].intersect_sphere(r)) && d < t) { t = d; id = i; } return t < inf; } __device__ static float getrandom(unsigned int *seed0, unsigned int *seed1) { *seed0 = 36969 * ((*seed0) & 65535) + ((*seed0) >> 16); *seed1 = 18000 * ((*seed1) & 65535) + ((*seed1) >> 16); unsigned int ires = ((*seed0) << 16) + (*seed1); union { float f; unsigned int ui; } res; res.ui = (ires & 0x007fffff) | 0x40000000; return (res.f - 2.f) / 2.f; } __device__ float3 radiance(Ray &r, unsigned int *s1, unsigned int *s2) { float3 accucolor = make_float3(0.0f, 0.0f, 0.0f); float3 mask = make_float3(1.0f, 1.0f, 1.0f); // ray bounce loop no recursionin device for (int bounces = 0; bounces < 8; bounces++) { float t; int id = 0; //miss if (!intersect_scene(r, t, id)) return make_float3(0.0f, 0.0f, 0.0f); const Sphere &obj = spheres[id]; float3 x = r.origin + r.direction*t; float3 n = normalize(x - obj.position); float3 nl = dot(n, r.direction) < 0 ? n : n * -1; //emissive accucolor += mask * obj.emission; //diffuse if (obj.material == Diffuse) { float r1 = 2 * M_PI * getrandom(s1, s2); float r2 = getrandom(s1, s2); float r2s = sqrtf(r2); float3 w = nl; float3 u = normalize(cross((fabs(w.x) > .1 ? make_float3(0, 1, 0) : make_float3(1, 0, 0)), w)); float3 v = cross(w, u); float3 d = normalize(u*cos(r1)*r2s + v * sin(r1)*r2s + w * sqrtf(1 - r2)); r.origin = x + nl * 0.05f; //offset for self intersection r.direction = d; mask *= obj.color; mask *= dot(d, nl); // weigh light contribution using cosine of angle between incident light and normal mask *= 2; // fudge factor } //specular else if (obj.material == Specular) { //r.origin = x + nl * 0.07f; r.direction = r.direction - n * 2 * dot(n, r.direction); r.origin = x + r.direction * 0.07f; mask *= obj.color; mask *= dot(r.direction, nl); mask *= 2; } //refraction else { double n1, n2, n3; double cosI = dot(n, r.direction); if (cosI > 0.0) { n1 = 1.5; n2 = 1.0; n = -n; } else { n1 = 1.0; n2 = 1.5; cosI = -cosI; } n3 = n1 / n2; double sinT2 = n3 * n3*(1.0 - cosI * cosI); double cosT = sqrt(1.0 - sinT2); //fernesel equations double rn = (n1*cosI - n2 * cosT) / (n1*cosI + n2 * cosT); double rt = (n2*cosI - n1 * cosT) / (n2*cosI + n2 * cosT); rn *= rn; rt *= rt; double refl = (rn + rt)*0.5; double trans = 1.0 - refl; if (n3 == 1.0) { mask *= obj.color; mask *= dot(r.direction, nl); mask *= 2; } //total internal reflection if (cosT*cosT < 0.0) { r.origin = x + nl * 0.07f; r.direction = r.direction - n * 2 * dot(n, r.direction); mask *= obj.color; mask *= dot(r.direction, nl); mask *= 2; } //refracton else { //r.origin = x + r.direction * 0.07f; r.direction = n3 * r.direction + (n3*cosI - cosT)*n; r.origin = x + r.direction * 0.07f; mask *= obj.color; mask *= dot(r.direction, nl); mask *= 2; } } } return accucolor; } __global__ void render_kernel(float3 *output) { unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; unsigned int y = blockIdx.y*blockDim.y + threadIdx.y; unsigned int i = (height - y - 1)*width + x; unsigned int s1 = x; unsigned int s2 = y; Ray cam(make_float3(50, 52, 295.6), normalize(make_float3(0, -0.042612, -1))); float3 cx = make_float3(width * .5135 / height, 0.0f, 0.0f); float3 cy = normalize(cross(cx, cam.direction)) * .5135; float3 r; r = make_float3(0.0f); for (int s = 0; s < samples; s++) { float3 d = cam.direction + cx * ((.25 + x) / width - .5) + cy * ((.25 + y) / height - .5); r = r + radiance(Ray(cam.origin + d * 40, normalize(d)), &s1, &s2)*(1. / samples); } output[i] = make_float3(clamp(r.x, 0.0f, 1.0f), clamp(r.y, 0.0f, 1.0f), clamp(r.z, 0.0f, 1.0f)); } inline float clamp(float x) { return x < 0.0f ? 0.0f : x > 1.0f ? 1.0f : x; } inline int toInt(float x) { return int(pow(clamp(x), 1 / 2.2) * 255 + .5); } int main() { float3* output_h = new float3[width*height]; float3* output_d; hipMalloc(&output_d, width * height * sizeof(float3)); dim3 block(8, 8, 1); dim3 grid(width / block.x, height / block.y, 1); printf("CUDA initialised.\nStart rendering...\n"); render_kernel << < grid, block >> >(output_d); hipMemcpy(output_h, output_d, width * height * sizeof(float3), hipMemcpyDeviceToHost); hipFree(output_d); printf("Done!\n"); FILE *f = fopen("smallptcuda.ppm", "w"); fprintf(f, "P3\n%d %d\n%d\n", width, height, 255); for (int i = 0; i < width*height; i++) fprintf(f, "%d %d %d ", toInt(output_h[i].x), toInt(output_h[i].y), toInt(output_h[i].z)); printf("Saved image to 'smallptcuda.ppm'\n"); delete[] output_h; system("PAUSE"); }
6e6f0eeb63f2e694226cf1652c30cb33aed27377.cu
#include <iostream> #include <cuda_runtime.h> #include <vector_types.h> #include "device_launch_parameters.h" #include "cutil_math.h" #define M_PI 3.14159265359f #define width 512 #define height 384 #define samples 4096 struct Ray { float3 origin; float3 direction; __device__ Ray(float3 o, float3 d) : origin(o), direction(d) {} }; enum Material { Diffuse, Specular, Refraction }; struct Sphere { float radius; float3 position, emission, color; Material material; __device__ float intersect_sphere(const Ray &r) const { float3 op = r.origin - position; float t, epsilon = 0.0001f; float b = dot(op, r.direction); float discriminant = b * b - dot(op, op) + radius * radius; if (discriminant < 0) return 0; else discriminant = sqrtf(discriminant); t = -b - discriminant; if (t > epsilon) return t; else { t = -b + discriminant; if (t > epsilon) return t; else return 0; } } }; // Scene __constant__ Sphere spheres[] = { { 1e5f,{ 1e5f + 1.0f, 40.8f, 81.6f },{ 0.0f, 0.0f, 0.0f },{ 0.75f, 0.25f, 0.25f }, Diffuse }, //Left { 1e5f,{ -1e5f + 99.0f, 40.8f, 81.6f },{ 0.0f, 0.0f, 0.0f },{ .25f, .25f, .75f }, Diffuse }, //Rght { 1e5f,{ 50.0f, 40.8f, 1e5f },{ 0.0f, 0.0f, 0.0f },{ .75f, .75f, .75f }, Diffuse }, //Back { 1e5f,{ 50.0f, 40.8f, -1e5f + 600.0f },{ 0.0f, 0.0f, 0.0f },{ 1.00f, 1.00f, 1.00f }, Diffuse }, //Frnt { 1e5f,{ 50.0f, 1e5f, 81.6f },{ 0.0f, 0.0f, 0.0f },{ .75f, .75f, .75f }, Diffuse }, //Botm { 1e5f,{ 50.0f, -1e5f + 81.6f, 81.6f },{ 0.0f, 0.0f, 0.0f },{ .75f, .75f, .75f }, Diffuse }, //Top { 16.5f,{ 27.0f, 16.5f, 47.0f },{ 0.0f, 0.0f, 0.0f },{ 1.0f, 1.0f, 1.0f }, Specular}, // small sphere 1 { 16.5f,{ 73.0f, 16.5f, 78.0f },{ 0.0f, 0.0f, 0.0f },{ 1.0f, 1.0f, 1.0f }, Refraction }, // small sphere 2 //{ 16.5f,{ 50.0f, 16.5f, 90.0f}, { 0.0f, 0.0f, 0.0f },{ 1.0f, 1.0f, 1.0f }, Refraction }, //small sphere 3 { 600.0f,{ 50.0f, 681.6f - .77f, 81.6f },{ 2.0f, 1.8f, 1.6f },{ 0.0f, 0.0f, 0.0f }, Diffuse } // Light }; __device__ inline bool intersect_scene(const Ray &r, float &t, int &id) { float n = sizeof(spheres) / sizeof(Sphere), d, inf = t = 1e20; for (int i = int(n); i--;) if ((d = spheres[i].intersect_sphere(r)) && d < t) { t = d; id = i; } return t < inf; } __device__ static float getrandom(unsigned int *seed0, unsigned int *seed1) { *seed0 = 36969 * ((*seed0) & 65535) + ((*seed0) >> 16); *seed1 = 18000 * ((*seed1) & 65535) + ((*seed1) >> 16); unsigned int ires = ((*seed0) << 16) + (*seed1); union { float f; unsigned int ui; } res; res.ui = (ires & 0x007fffff) | 0x40000000; return (res.f - 2.f) / 2.f; } __device__ float3 radiance(Ray &r, unsigned int *s1, unsigned int *s2) { float3 accucolor = make_float3(0.0f, 0.0f, 0.0f); float3 mask = make_float3(1.0f, 1.0f, 1.0f); // ray bounce loop no recursionin device for (int bounces = 0; bounces < 8; bounces++) { float t; int id = 0; //miss if (!intersect_scene(r, t, id)) return make_float3(0.0f, 0.0f, 0.0f); const Sphere &obj = spheres[id]; float3 x = r.origin + r.direction*t; float3 n = normalize(x - obj.position); float3 nl = dot(n, r.direction) < 0 ? n : n * -1; //emissive accucolor += mask * obj.emission; //diffuse if (obj.material == Diffuse) { float r1 = 2 * M_PI * getrandom(s1, s2); float r2 = getrandom(s1, s2); float r2s = sqrtf(r2); float3 w = nl; float3 u = normalize(cross((fabs(w.x) > .1 ? make_float3(0, 1, 0) : make_float3(1, 0, 0)), w)); float3 v = cross(w, u); float3 d = normalize(u*cos(r1)*r2s + v * sin(r1)*r2s + w * sqrtf(1 - r2)); r.origin = x + nl * 0.05f; //offset for self intersection r.direction = d; mask *= obj.color; mask *= dot(d, nl); // weigh light contribution using cosine of angle between incident light and normal mask *= 2; // fudge factor } //specular else if (obj.material == Specular) { //r.origin = x + nl * 0.07f; r.direction = r.direction - n * 2 * dot(n, r.direction); r.origin = x + r.direction * 0.07f; mask *= obj.color; mask *= dot(r.direction, nl); mask *= 2; } //refraction else { double n1, n2, n3; double cosI = dot(n, r.direction); if (cosI > 0.0) { n1 = 1.5; n2 = 1.0; n = -n; } else { n1 = 1.0; n2 = 1.5; cosI = -cosI; } n3 = n1 / n2; double sinT2 = n3 * n3*(1.0 - cosI * cosI); double cosT = sqrt(1.0 - sinT2); //fernesel equations double rn = (n1*cosI - n2 * cosT) / (n1*cosI + n2 * cosT); double rt = (n2*cosI - n1 * cosT) / (n2*cosI + n2 * cosT); rn *= rn; rt *= rt; double refl = (rn + rt)*0.5; double trans = 1.0 - refl; if (n3 == 1.0) { mask *= obj.color; mask *= dot(r.direction, nl); mask *= 2; } //total internal reflection if (cosT*cosT < 0.0) { r.origin = x + nl * 0.07f; r.direction = r.direction - n * 2 * dot(n, r.direction); mask *= obj.color; mask *= dot(r.direction, nl); mask *= 2; } //refracton else { //r.origin = x + r.direction * 0.07f; r.direction = n3 * r.direction + (n3*cosI - cosT)*n; r.origin = x + r.direction * 0.07f; mask *= obj.color; mask *= dot(r.direction, nl); mask *= 2; } } } return accucolor; } __global__ void render_kernel(float3 *output) { unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; unsigned int y = blockIdx.y*blockDim.y + threadIdx.y; unsigned int i = (height - y - 1)*width + x; unsigned int s1 = x; unsigned int s2 = y; Ray cam(make_float3(50, 52, 295.6), normalize(make_float3(0, -0.042612, -1))); float3 cx = make_float3(width * .5135 / height, 0.0f, 0.0f); float3 cy = normalize(cross(cx, cam.direction)) * .5135; float3 r; r = make_float3(0.0f); for (int s = 0; s < samples; s++) { float3 d = cam.direction + cx * ((.25 + x) / width - .5) + cy * ((.25 + y) / height - .5); r = r + radiance(Ray(cam.origin + d * 40, normalize(d)), &s1, &s2)*(1. / samples); } output[i] = make_float3(clamp(r.x, 0.0f, 1.0f), clamp(r.y, 0.0f, 1.0f), clamp(r.z, 0.0f, 1.0f)); } inline float clamp(float x) { return x < 0.0f ? 0.0f : x > 1.0f ? 1.0f : x; } inline int toInt(float x) { return int(pow(clamp(x), 1 / 2.2) * 255 + .5); } int main() { float3* output_h = new float3[width*height]; float3* output_d; cudaMalloc(&output_d, width * height * sizeof(float3)); dim3 block(8, 8, 1); dim3 grid(width / block.x, height / block.y, 1); printf("CUDA initialised.\nStart rendering...\n"); render_kernel << < grid, block >> >(output_d); cudaMemcpy(output_h, output_d, width * height * sizeof(float3), cudaMemcpyDeviceToHost); cudaFree(output_d); printf("Done!\n"); FILE *f = fopen("smallptcuda.ppm", "w"); fprintf(f, "P3\n%d %d\n%d\n", width, height, 255); for (int i = 0; i < width*height; i++) fprintf(f, "%d %d %d ", toInt(output_h[i].x), toInt(output_h[i].y), toInt(output_h[i].z)); printf("Saved image to 'smallptcuda.ppm'\n"); delete[] output_h; system("PAUSE"); }
56898aa4cd96df565579a5d7261d6dd491499f5c.hip
// !!! This is a file automatically generated by hipify!!! /* * Software License Agreement (BSD License) * * Point Cloud Library (PCL) - www.pointclouds.org * Copyright (C) 2009-2010, NVIDIA Corporation, all rights reserved. * Third party copyrights are property of their respective owners. * * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of Willow Garage, Inc. nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * $Id: $ * Ported to PCL by Koen Buys : Attention Work in progress! */ #include <vector> #include <hip/hip_runtime.h> #include "NPP_staging.hpp" texture<Ncv8u, 1, hipReadModeElementType> tex8u; texture<Ncv32u, 1, hipReadModeElementType> tex32u; texture<uint2, 1, hipReadModeElementType> tex64u; //============================================================================== // // CUDA streams handling // //============================================================================== static hipStream_t nppStream = 0; hipStream_t nppStGetActiveCUDAstream(void) { return nppStream; } hipStream_t nppStSetActiveCUDAstream(hipStream_t cudaStream) { hipStream_t tmp = nppStream; nppStream = cudaStream; return tmp; } //============================================================================== // // BlockScan.cuh // //============================================================================== NCV_CT_ASSERT(K_WARP_SIZE == 32); //this is required for the manual unroll of the loop in warpScanInclusive //Almost the same as naive scan1Inclusive, but doesn't need __syncthreads() //assuming size <= WARP_SIZE and size is power of 2 template <class T> inline __device__ T warpScanInclusive(T idata, volatile T *s_Data) { Ncv32u pos = 2 * threadIdx.x - (threadIdx.x & (K_WARP_SIZE - 1)); s_Data[pos] = 0; pos += K_WARP_SIZE; s_Data[pos] = idata; //for(Ncv32u offset = 1; offset < K_WARP_SIZE; offset <<= 1) //{ // s_Data[pos] += s_Data[pos - offset]; //} s_Data[pos] += s_Data[pos - 1]; s_Data[pos] += s_Data[pos - 2]; s_Data[pos] += s_Data[pos - 4]; s_Data[pos] += s_Data[pos - 8]; s_Data[pos] += s_Data[pos - 16]; return s_Data[pos]; } template <class T> inline __device__ T warpScanExclusive(T idata, volatile T *s_Data) { return warpScanInclusive(idata, s_Data) - idata; } template <class T, Ncv32u tiNumScanThreads> inline __device__ T blockScanInclusive(T idata, volatile T *s_Data) { if (tiNumScanThreads > K_WARP_SIZE) { //Bottom-level inclusive warp scan T warpResult = warpScanInclusive(idata, s_Data); //Save top elements of each warp for exclusive warp scan //sync to wait for warp scans to complete (because s_Data is being overwritten) __syncthreads(); if( (threadIdx.x & (K_WARP_SIZE - 1)) == (K_WARP_SIZE - 1) ) { s_Data[threadIdx.x >> K_LOG2_WARP_SIZE] = warpResult; } //wait for warp scans to complete __syncthreads(); if( threadIdx.x < (tiNumScanThreads / K_WARP_SIZE) ) { //grab top warp elements T val = s_Data[threadIdx.x]; //calculate exclusive scan and write back to shared memory s_Data[threadIdx.x] = warpScanExclusive(val, s_Data); } //return updated warp scans with exclusive scan results __syncthreads(); return warpResult + s_Data[threadIdx.x >> K_LOG2_WARP_SIZE]; } else { return warpScanInclusive(idata, s_Data); } } //============================================================================== // // IntegralImage.cu // //============================================================================== const Ncv32u NUM_SCAN_THREADS = 256; const Ncv32u LOG2_NUM_SCAN_THREADS = 8; template<class T_in, class T_out> struct _scanElemOp { template<bool tbDoSqr> static inline __host__ __device__ T_out scanElemOp(T_in elem) { return scanElemOp( elem, Int2Type<(int)tbDoSqr>() ); } private: template <int v> struct Int2Type { enum { value = v }; }; static inline __host__ __device__ T_out scanElemOp(T_in elem, Int2Type<0>) { return (T_out)elem; } static inline __host__ __device__ T_out scanElemOp(T_in elem, Int2Type<1>) { return (T_out)(elem*elem); } }; template<class T> inline __device__ T readElem(T *d_src, Ncv32u texOffs, Ncv32u srcStride, Ncv32u curElemOffs); template<> inline __device__ Ncv8u readElem<Ncv8u>(Ncv8u *d_src, Ncv32u texOffs, Ncv32u srcStride, Ncv32u curElemOffs) { return tex1Dfetch(tex8u, texOffs + srcStride * blockIdx.x + curElemOffs); } template<> inline __device__ Ncv32u readElem<Ncv32u>(Ncv32u *d_src, Ncv32u texOffs, Ncv32u srcStride, Ncv32u curElemOffs) { return d_src[curElemOffs]; } template<> inline __device__ Ncv32f readElem<Ncv32f>(Ncv32f *d_src, Ncv32u texOffs, Ncv32u srcStride, Ncv32u curElemOffs) { return d_src[curElemOffs]; } /** * \brief Segmented scan kernel * * Calculates per-row prefix scans of the input image. * Out-of-bounds safe: reads 'size' elements, writes 'size+1' elements * * \tparam T_in Type of input image elements * \tparam T_out Type of output image elements * \tparam T_op Defines an operation to be performed on the input image pixels * * \param d_src [IN] Source image pointer * \param srcWidth [IN] Source image width * \param srcStride [IN] Source image stride * \param d_II [OUT] Output image pointer * \param IIstride [IN] Output image stride * * \return None */ template <class T_in, class T_out, bool tbDoSqr> __global__ void scanRows(T_in *d_src, Ncv32u texOffs, Ncv32u srcWidth, Ncv32u srcStride, T_out *d_II, Ncv32u IIstride) { //advance pointers to the current line if (sizeof(T_in) != 1) { d_src += srcStride * blockIdx.x; } //for initial image 8bit source we use texref tex8u d_II += IIstride * blockIdx.x; Ncv32u numBuckets = (srcWidth + NUM_SCAN_THREADS - 1) >> LOG2_NUM_SCAN_THREADS; Ncv32u offsetX = 0; __shared__ T_out shmem[NUM_SCAN_THREADS * 2]; __shared__ T_out carryElem; carryElem = 0; __syncthreads(); while (numBuckets--) { Ncv32u curElemOffs = offsetX + threadIdx.x; T_out curScanElem; T_in curElem; T_out curElemMod; if (curElemOffs < srcWidth) { //load elements curElem = readElem<T_in>(d_src, texOffs, srcStride, curElemOffs); } curElemMod = _scanElemOp<T_in, T_out>::scanElemOp<tbDoSqr>(curElem); //inclusive scan curScanElem = blockScanInclusive<T_out, NUM_SCAN_THREADS>(curElemMod, shmem); if (curElemOffs <= srcWidth) { //make scan exclusive and write the bucket to the output buffer d_II[curElemOffs] = carryElem + curScanElem - curElemMod; offsetX += NUM_SCAN_THREADS; } //remember last element for subsequent buckets adjustment __syncthreads(); if (threadIdx.x == NUM_SCAN_THREADS-1) { carryElem += curScanElem; } __syncthreads(); } if (offsetX == srcWidth && !threadIdx.x) { d_II[offsetX] = carryElem; } } template <bool tbDoSqr, class T_in, class T_out> NCVStatus scanRowsWrapperDevice(T_in *d_src, Ncv32u srcStride, T_out *d_dst, Ncv32u dstStride, NcvSize32u roi) { hipChannelFormatDesc cfdTex; size_t alignmentOffset = 0; if (sizeof(T_in) == 1) { cfdTex = hipCreateChannelDesc<Ncv8u>(); ncvAssertCUDAReturn(hipBindTexture(&alignmentOffset, tex8u, d_src, cfdTex, roi.height * srcStride), NPPST_TEXTURE_BIND_ERROR); if (alignmentOffset > 0) { ncvAssertCUDAReturn(hipUnbindTexture(tex8u), NCV_CUDA_ERROR); ncvAssertCUDAReturn(hipBindTexture(&alignmentOffset, tex8u, d_src, cfdTex, alignmentOffset + roi.height * srcStride), NPPST_TEXTURE_BIND_ERROR); } } hipLaunchKernelGGL(( scanRows <T_in, T_out, tbDoSqr>) , dim3(roi.height), dim3(NUM_SCAN_THREADS), 0, nppStGetActiveCUDAstream(), d_src, (Ncv32u)alignmentOffset, roi.width, srcStride, d_dst, dstStride); ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); return NPPST_SUCCESS; } static Ncv32u getPaddedDimension(Ncv32u dim, Ncv32u elemTypeSize, Ncv32u allocatorAlignment) { Ncv32u alignMask = allocatorAlignment-1; Ncv32u inverseAlignMask = ~alignMask; Ncv32u dimBytes = dim * elemTypeSize; Ncv32u pitch = (dimBytes + alignMask) & inverseAlignMask; Ncv32u PaddedDim = pitch / elemTypeSize; return PaddedDim; } template <class T_in, class T_out> NCVStatus ncvIntegralImage_device(T_in *d_src, Ncv32u srcStep, T_out *d_dst, Ncv32u dstStep, NcvSize32u roi, INCVMemAllocator &gpuAllocator) { ncvAssertReturn(sizeof(T_out) == sizeof(Ncv32u), NPPST_MEM_INTERNAL_ERROR); ncvAssertReturn(gpuAllocator.memType() == NCVMemoryTypeDevice || gpuAllocator.memType() == NCVMemoryTypeNone, NPPST_MEM_RESIDENCE_ERROR); ncvAssertReturn(gpuAllocator.isInitialized(), NPPST_MEM_INTERNAL_ERROR); ncvAssertReturn((d_src != NULL && d_dst != NULL) || gpuAllocator.isCounting(), NPPST_NULL_POINTER_ERROR); ncvAssertReturn(roi.width > 0 && roi.height > 0, NPPST_INVALID_ROI); ncvAssertReturn(srcStep >= roi.width * sizeof(T_in) && dstStep >= (roi.width + 1) * sizeof(T_out) && srcStep % sizeof(T_in) == 0 && dstStep % sizeof(T_out) == 0, NPPST_INVALID_STEP); srcStep /= sizeof(T_in); dstStep /= sizeof(T_out); Ncv32u WidthII = roi.width + 1; Ncv32u HeightII = roi.height + 1; Ncv32u PaddedWidthII32 = getPaddedDimension(WidthII, sizeof(Ncv32u), gpuAllocator.alignment()); Ncv32u PaddedHeightII32 = getPaddedDimension(HeightII, sizeof(Ncv32u), gpuAllocator.alignment()); NCVMatrixAlloc<T_out> Tmp32_1(gpuAllocator, PaddedWidthII32, PaddedHeightII32); ncvAssertReturn(gpuAllocator.isCounting() || Tmp32_1.isMemAllocated(), NPPST_MEM_INTERNAL_ERROR); NCVMatrixAlloc<T_out> Tmp32_2(gpuAllocator, PaddedHeightII32, PaddedWidthII32); ncvAssertReturn(gpuAllocator.isCounting() || Tmp32_2.isMemAllocated(), NPPST_MEM_INTERNAL_ERROR); ncvAssertReturn(Tmp32_1.pitch() * Tmp32_1.height() == Tmp32_2.pitch() * Tmp32_2.height(), NPPST_MEM_INTERNAL_ERROR); NCVStatus ncvStat; NCV_SET_SKIP_COND(gpuAllocator.isCounting()); NCV_SKIP_COND_BEGIN ncvStat = scanRowsWrapperDevice <false> (d_src, srcStep, Tmp32_1.ptr(), PaddedWidthII32, roi); ncvAssertReturnNcvStat(ncvStat); ncvStat = nppiStTranspose_32u_C1R((Ncv32u *)Tmp32_1.ptr(), PaddedWidthII32*sizeof(Ncv32u), (Ncv32u *)Tmp32_2.ptr(), PaddedHeightII32*sizeof(Ncv32u), NcvSize32u(WidthII, roi.height)); ncvAssertReturnNcvStat(ncvStat); ncvStat = scanRowsWrapperDevice <false> (Tmp32_2.ptr(), PaddedHeightII32, Tmp32_1.ptr(), PaddedHeightII32, NcvSize32u(roi.height, WidthII)); ncvAssertReturnNcvStat(ncvStat); ncvStat = nppiStTranspose_32u_C1R((Ncv32u *)Tmp32_1.ptr(), PaddedHeightII32*sizeof(Ncv32u), (Ncv32u *)d_dst, dstStep*sizeof(Ncv32u), NcvSize32u(HeightII, WidthII)); ncvAssertReturnNcvStat(ncvStat); NCV_SKIP_COND_END return NPPST_SUCCESS; } NCVStatus ncvSquaredIntegralImage_device(Ncv8u *d_src, Ncv32u srcStep, Ncv64u *d_dst, Ncv32u dstStep, NcvSize32u roi, INCVMemAllocator &gpuAllocator) { ncvAssertReturn(gpuAllocator.isInitialized(), NPPST_MEM_INTERNAL_ERROR); ncvAssertReturn(gpuAllocator.memType() == NCVMemoryTypeDevice || gpuAllocator.memType() == NCVMemoryTypeNone, NPPST_MEM_RESIDENCE_ERROR); ncvAssertReturn((d_src != NULL && d_dst != NULL) || gpuAllocator.isCounting(), NPPST_NULL_POINTER_ERROR); ncvAssertReturn(roi.width > 0 && roi.height > 0, NPPST_INVALID_ROI); ncvAssertReturn(srcStep >= roi.width && dstStep >= (roi.width + 1) * sizeof(Ncv64u) && dstStep % sizeof(Ncv64u) == 0, NPPST_INVALID_STEP); dstStep /= sizeof(Ncv64u); Ncv32u WidthII = roi.width + 1; Ncv32u HeightII = roi.height + 1; Ncv32u PaddedWidthII32 = getPaddedDimension(WidthII, sizeof(Ncv32u), gpuAllocator.alignment()); Ncv32u PaddedHeightII32 = getPaddedDimension(HeightII, sizeof(Ncv32u), gpuAllocator.alignment()); Ncv32u PaddedWidthII64 = getPaddedDimension(WidthII, sizeof(Ncv64u), gpuAllocator.alignment()); Ncv32u PaddedHeightII64 = getPaddedDimension(HeightII, sizeof(Ncv64u), gpuAllocator.alignment()); Ncv32u PaddedWidthMax = PaddedWidthII32 > PaddedWidthII64 ? PaddedWidthII32 : PaddedWidthII64; Ncv32u PaddedHeightMax = PaddedHeightII32 > PaddedHeightII64 ? PaddedHeightII32 : PaddedHeightII64; NCVMatrixAlloc<Ncv32u> Tmp32_1(gpuAllocator, PaddedWidthII32, PaddedHeightII32); ncvAssertReturn(Tmp32_1.isMemAllocated(), NPPST_MEM_INTERNAL_ERROR); NCVMatrixAlloc<Ncv64u> Tmp64(gpuAllocator, PaddedWidthMax, PaddedHeightMax); ncvAssertReturn(Tmp64.isMemAllocated(), NPPST_MEM_INTERNAL_ERROR); NCVMatrixReuse<Ncv32u> Tmp32_2(Tmp64.getSegment(), gpuAllocator.alignment(), PaddedWidthII32, PaddedHeightII32); ncvAssertReturn(Tmp32_2.isMemReused(), NPPST_MEM_INTERNAL_ERROR); NCVMatrixReuse<Ncv64u> Tmp64_2(Tmp64.getSegment(), gpuAllocator.alignment(), PaddedWidthII64, PaddedHeightII64); ncvAssertReturn(Tmp64_2.isMemReused(), NPPST_MEM_INTERNAL_ERROR); NCVStatus ncvStat; NCV_SET_SKIP_COND(gpuAllocator.isCounting()); NCV_SKIP_COND_BEGIN ncvStat = scanRowsWrapperDevice <true, Ncv8u, Ncv32u> (d_src, srcStep, Tmp32_2.ptr(), PaddedWidthII32, roi); ncvAssertReturnNcvStat(ncvStat); ncvStat = nppiStTranspose_32u_C1R(Tmp32_2.ptr(), PaddedWidthII32*sizeof(Ncv32u), Tmp32_1.ptr(), PaddedHeightII32*sizeof(Ncv32u), NcvSize32u(WidthII, roi.height)); ncvAssertReturnNcvStat(ncvStat); ncvStat = scanRowsWrapperDevice <false, Ncv32u, Ncv64u> (Tmp32_1.ptr(), PaddedHeightII32, Tmp64_2.ptr(), PaddedHeightII64, NcvSize32u(roi.height, WidthII)); ncvAssertReturnNcvStat(ncvStat); ncvStat = nppiStTranspose_64u_C1R(Tmp64_2.ptr(), PaddedHeightII64*sizeof(Ncv64u), d_dst, dstStep*sizeof(Ncv64u), NcvSize32u(HeightII, WidthII)); ncvAssertReturnNcvStat(ncvStat); NCV_SKIP_COND_END return NPPST_SUCCESS; } NCVStatus nppiStIntegralGetSize_8u32u(NcvSize32u roiSize, Ncv32u *pBufsize, hipDeviceProp_t &devProp) { ncvAssertReturn(pBufsize != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn(roiSize.width > 0 && roiSize.height > 0, NPPST_INVALID_ROI); NCVMemStackAllocator gpuCounter(static_cast<Ncv32u>(devProp.textureAlignment)); ncvAssertReturn(gpuCounter.isInitialized(), NPPST_MEM_INTERNAL_ERROR); NCVStatus ncvStat = ncvIntegralImage_device((Ncv8u*)NULL, roiSize.width, (Ncv32u*)NULL, (roiSize.width+1) * sizeof(Ncv32u), roiSize, gpuCounter); ncvAssertReturnNcvStat(ncvStat); *pBufsize = (Ncv32u)gpuCounter.maxSize(); return NPPST_SUCCESS; } NCVStatus nppiStIntegralGetSize_32f32f(NcvSize32u roiSize, Ncv32u *pBufsize, hipDeviceProp_t &devProp) { ncvAssertReturn(pBufsize != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn(roiSize.width > 0 && roiSize.height > 0, NPPST_INVALID_ROI); NCVMemStackAllocator gpuCounter(static_cast<Ncv32u>(devProp.textureAlignment)); ncvAssertReturn(gpuCounter.isInitialized(), NPPST_MEM_INTERNAL_ERROR); NCVStatus ncvStat = ncvIntegralImage_device((Ncv32f*)NULL, roiSize.width * sizeof(Ncv32f), (Ncv32f*)NULL, (roiSize.width+1) * sizeof(Ncv32f), roiSize, gpuCounter); ncvAssertReturnNcvStat(ncvStat); *pBufsize = (Ncv32u)gpuCounter.maxSize(); return NPPST_SUCCESS; } NCVStatus nppiStSqrIntegralGetSize_8u64u(NcvSize32u roiSize, Ncv32u *pBufsize, hipDeviceProp_t &devProp) { ncvAssertReturn(pBufsize != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn(roiSize.width > 0 && roiSize.height > 0, NPPST_INVALID_ROI); NCVMemStackAllocator gpuCounter(static_cast<Ncv32u>(devProp.textureAlignment)); ncvAssertReturn(gpuCounter.isInitialized(), NPPST_MEM_INTERNAL_ERROR); NCVStatus ncvStat = ncvSquaredIntegralImage_device(NULL, roiSize.width, NULL, (roiSize.width+1) * sizeof(Ncv64u), roiSize, gpuCounter); ncvAssertReturnNcvStat(ncvStat); *pBufsize = (Ncv32u)gpuCounter.maxSize(); return NPPST_SUCCESS; } NCVStatus nppiStIntegral_8u32u_C1R(Ncv8u *d_src, Ncv32u srcStep, Ncv32u *d_dst, Ncv32u dstStep, NcvSize32u roiSize, Ncv8u *pBuffer, Ncv32u bufSize, hipDeviceProp_t &devProp) { NCVMemStackAllocator gpuAllocator(NCVMemoryTypeDevice, bufSize, static_cast<Ncv32u>(devProp.textureAlignment), pBuffer); ncvAssertReturn(gpuAllocator.isInitialized(), NPPST_MEM_INTERNAL_ERROR); NCVStatus ncvStat = ncvIntegralImage_device(d_src, srcStep, d_dst, dstStep, roiSize, gpuAllocator); ncvAssertReturnNcvStat(ncvStat); return NPPST_SUCCESS; } NCVStatus nppiStIntegral_32f32f_C1R(Ncv32f *d_src, Ncv32u srcStep, Ncv32f *d_dst, Ncv32u dstStep, NcvSize32u roiSize, Ncv8u *pBuffer, Ncv32u bufSize, hipDeviceProp_t &devProp) { NCVMemStackAllocator gpuAllocator(NCVMemoryTypeDevice, bufSize, static_cast<Ncv32u>(devProp.textureAlignment), pBuffer); ncvAssertReturn(gpuAllocator.isInitialized(), NPPST_MEM_INTERNAL_ERROR); NCVStatus ncvStat = ncvIntegralImage_device(d_src, srcStep, d_dst, dstStep, roiSize, gpuAllocator); ncvAssertReturnNcvStat(ncvStat); return NPPST_SUCCESS; } NCVStatus nppiStSqrIntegral_8u64u_C1R(Ncv8u *d_src, Ncv32u srcStep, Ncv64u *d_dst, Ncv32u dstStep, NcvSize32u roiSize, Ncv8u *pBuffer, Ncv32u bufSize, hipDeviceProp_t &devProp) { NCVMemStackAllocator gpuAllocator(NCVMemoryTypeDevice, bufSize, static_cast<Ncv32u>(devProp.textureAlignment), pBuffer); ncvAssertReturn(gpuAllocator.isInitialized(), NPPST_MEM_INTERNAL_ERROR); NCVStatus ncvStat = ncvSquaredIntegralImage_device(d_src, srcStep, d_dst, dstStep, roiSize, gpuAllocator); ncvAssertReturnNcvStat(ncvStat); return NPPST_SUCCESS; } NCVStatus nppiStIntegral_8u32u_C1R_host(Ncv8u *h_src, Ncv32u srcStep, Ncv32u *h_dst, Ncv32u dstStep, NcvSize32u roiSize) { ncvAssertReturn(h_src != NULL && h_dst != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn(roiSize.width > 0 && roiSize.height > 0, NPPST_INVALID_ROI); ncvAssertReturn(srcStep >= roiSize.width && dstStep >= (roiSize.width + 1) * sizeof(Ncv32u) && dstStep % sizeof(Ncv32u) == 0, NPPST_INVALID_STEP); dstStep /= sizeof(Ncv32u); Ncv32u WidthII = roiSize.width + 1; Ncv32u HeightII = roiSize.height + 1; memset(h_dst, 0, WidthII * sizeof(Ncv32u)); for (Ncv32u i=1; i<HeightII; i++) { h_dst[i * dstStep] = 0; for (Ncv32u j=1; j<WidthII; j++) { Ncv32u top = h_dst[(i-1) * dstStep + j]; Ncv32u left = h_dst[i * dstStep + (j - 1)]; Ncv32u topleft = h_dst[(i - 1) * dstStep + (j - 1)]; Ncv32u elem = h_src[(i - 1) * srcStep + (j - 1)]; h_dst[i * dstStep + j] = elem + left - topleft + top; } } return NPPST_SUCCESS; } NCVStatus nppiStIntegral_32f32f_C1R_host(Ncv32f *h_src, Ncv32u srcStep, Ncv32f *h_dst, Ncv32u dstStep, NcvSize32u roiSize) { ncvAssertReturn(h_src != NULL && h_dst != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn(roiSize.width > 0 && roiSize.height > 0, NPPST_INVALID_ROI); ncvAssertReturn(srcStep >= roiSize.width * sizeof(Ncv32f) && dstStep >= (roiSize.width + 1) * sizeof(Ncv32f) && srcStep % sizeof(Ncv32f) == 0 && dstStep % sizeof(Ncv32f) == 0, NPPST_INVALID_STEP); srcStep /= sizeof(Ncv32f); dstStep /= sizeof(Ncv32f); Ncv32u WidthII = roiSize.width + 1; Ncv32u HeightII = roiSize.height + 1; memset(h_dst, 0, WidthII * sizeof(Ncv32u)); for (Ncv32u i=1; i<HeightII; i++) { h_dst[i * dstStep] = 0.0f; for (Ncv32u j=1; j<WidthII; j++) { Ncv32f top = h_dst[(i-1) * dstStep + j]; Ncv32f left = h_dst[i * dstStep + (j - 1)]; Ncv32f topleft = h_dst[(i - 1) * dstStep + (j - 1)]; Ncv32f elem = h_src[(i - 1) * srcStep + (j - 1)]; h_dst[i * dstStep + j] = elem + left - topleft + top; } } return NPPST_SUCCESS; } NCVStatus nppiStSqrIntegral_8u64u_C1R_host(Ncv8u *h_src, Ncv32u srcStep, Ncv64u *h_dst, Ncv32u dstStep, NcvSize32u roiSize) { ncvAssertReturn(h_src != NULL && h_dst != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn(roiSize.width > 0 && roiSize.height > 0, NPPST_INVALID_ROI); ncvAssertReturn(srcStep >= roiSize.width && dstStep >= (roiSize.width + 1) * sizeof(Ncv64u) && dstStep % sizeof(Ncv64u) == 0, NPPST_INVALID_STEP); dstStep /= sizeof(Ncv64u); Ncv32u WidthII = roiSize.width + 1; Ncv32u HeightII = roiSize.height + 1; memset(h_dst, 0, WidthII * sizeof(Ncv64u)); for (Ncv32u i=1; i<HeightII; i++) { h_dst[i * dstStep] = 0; for (Ncv32u j=1; j<WidthII; j++) { Ncv64u top = h_dst[(i-1) * dstStep + j]; Ncv64u left = h_dst[i * dstStep + (j - 1)]; Ncv64u topleft = h_dst[(i - 1) * dstStep + (j - 1)]; Ncv64u elem = h_src[(i - 1) * srcStep + (j - 1)]; h_dst[i * dstStep + j] = elem*elem + left - topleft + top; } } return NPPST_SUCCESS; } //============================================================================== // // Decimate.cu // //============================================================================== const Ncv32u NUM_DOWNSAMPLE_NEAREST_THREADS_X = 32; const Ncv32u NUM_DOWNSAMPLE_NEAREST_THREADS_Y = 8; template<class T, NcvBool tbCacheTexture> __device__ T getElem_Decimate(Ncv32u x, T *d_src); template<> __device__ Ncv32u getElem_Decimate<Ncv32u, true>(Ncv32u x, Ncv32u *d_src) { return tex1Dfetch(tex32u, x); } template<> __device__ Ncv32u getElem_Decimate<Ncv32u, false>(Ncv32u x, Ncv32u *d_src) { return d_src[x]; } template<> __device__ Ncv64u getElem_Decimate<Ncv64u, true>(Ncv32u x, Ncv64u *d_src) { uint2 tmp = tex1Dfetch(tex64u, x); Ncv64u res = (Ncv64u)tmp.y; res <<= 32; res |= tmp.x; return res; } template<> __device__ Ncv64u getElem_Decimate<Ncv64u, false>(Ncv32u x, Ncv64u *d_src) { return d_src[x]; } template <class T, NcvBool tbCacheTexture> __global__ void decimate_C1R(T *d_src, Ncv32u srcStep, T *d_dst, Ncv32u dstStep, NcvSize32u dstRoi, Ncv32u scale) { int curX = blockIdx.x * blockDim.x + threadIdx.x; int curY = blockIdx.y * blockDim.y + threadIdx.y; if (curX >= dstRoi.width || curY >= dstRoi.height) { return; } d_dst[curY * dstStep + curX] = getElem_Decimate<T, tbCacheTexture>((curY * srcStep + curX) * scale, d_src); } template <class T> static NCVStatus decimateWrapperDevice(T *d_src, Ncv32u srcStep, T *d_dst, Ncv32u dstStep, NcvSize32u srcRoi, Ncv32u scale, NcvBool readThruTexture) { ncvAssertReturn(d_src != NULL && d_dst != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn(srcRoi.width > 0 && srcRoi.height > 0, NPPST_INVALID_ROI); ncvAssertReturn(scale != 0, NPPST_INVALID_SCALE); ncvAssertReturn(srcStep >= (Ncv32u)(srcRoi.width) * sizeof(T) && dstStep >= (Ncv32u)(srcRoi.width * sizeof(T) / scale), NPPST_INVALID_STEP); srcStep /= sizeof(T); dstStep /= sizeof(T); NcvSize32u dstRoi; dstRoi.width = srcRoi.width / scale; dstRoi.height = srcRoi.height / scale; dim3 grid((dstRoi.width + NUM_DOWNSAMPLE_NEAREST_THREADS_X - 1) / NUM_DOWNSAMPLE_NEAREST_THREADS_X, (dstRoi.height + NUM_DOWNSAMPLE_NEAREST_THREADS_Y - 1) / NUM_DOWNSAMPLE_NEAREST_THREADS_Y); dim3 block(NUM_DOWNSAMPLE_NEAREST_THREADS_X, NUM_DOWNSAMPLE_NEAREST_THREADS_Y); if (!readThruTexture) { hipLaunchKernelGGL(( decimate_C1R <T, false>) , dim3(grid), dim3(block), 0, nppStGetActiveCUDAstream(), d_src, srcStep, d_dst, dstStep, dstRoi, scale); } else { hipChannelFormatDesc cfdTexSrc; if (sizeof(T) == sizeof(Ncv32u)) { cfdTexSrc = hipCreateChannelDesc<Ncv32u>(); size_t alignmentOffset; ncvAssertCUDAReturn(hipBindTexture(&alignmentOffset, tex32u, d_src, cfdTexSrc, srcRoi.height * srcStep * sizeof(T)), NPPST_TEXTURE_BIND_ERROR); ncvAssertReturn(alignmentOffset==0, NPPST_TEXTURE_BIND_ERROR); } else { cfdTexSrc = hipCreateChannelDesc<uint2>(); size_t alignmentOffset; ncvAssertCUDAReturn(hipBindTexture(&alignmentOffset, tex64u, d_src, cfdTexSrc, srcRoi.height * srcStep * sizeof(T)), NPPST_TEXTURE_BIND_ERROR); ncvAssertReturn(alignmentOffset==0, NPPST_TEXTURE_BIND_ERROR); } hipLaunchKernelGGL(( decimate_C1R <T, true>) , dim3(grid), dim3(block), 0, nppStGetActiveCUDAstream(), d_src, srcStep, d_dst, dstStep, dstRoi, scale); } ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); return NPPST_SUCCESS; } template <class T> static NCVStatus decimateWrapperHost(T *h_src, Ncv32u srcStep, T *h_dst, Ncv32u dstStep, NcvSize32u srcRoi, Ncv32u scale) { ncvAssertReturn(h_src != NULL && h_dst != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn(srcRoi.width != 0 && srcRoi.height != 0, NPPST_INVALID_ROI); ncvAssertReturn(scale != 0, NPPST_INVALID_SCALE); ncvAssertReturn(srcStep >= (Ncv32u)(srcRoi.width) * sizeof(T) && dstStep >= (Ncv32u)(srcRoi.width * sizeof(T) / scale) && srcStep % sizeof(T) == 0 && dstStep % sizeof(T) == 0, NPPST_INVALID_STEP); srcStep /= sizeof(T); dstStep /= sizeof(T); NcvSize32u dstRoi; dstRoi.width = srcRoi.width / scale; dstRoi.height = srcRoi.height / scale; for (Ncv32u i=0; i<dstRoi.height; i++) { for (Ncv32u j=0; j<dstRoi.width; j++) { h_dst[i*dstStep+j] = h_src[i*scale*srcStep + j*scale]; } } return NPPST_SUCCESS; } #define implementNppDecimate(bit, typ) \ NCVStatus nppiStDecimate_##bit##typ##_C1R(Ncv##bit##typ *d_src, Ncv32u srcStep, \ Ncv##bit##typ *d_dst, Ncv32u dstStep, \ NcvSize32u srcRoi, Ncv32u scale, NcvBool readThruTexture) \ { \ return decimateWrapperDevice<Ncv##bit##u>((Ncv##bit##u *)d_src, srcStep, \ (Ncv##bit##u *)d_dst, dstStep, \ srcRoi, scale, readThruTexture); \ } #define implementNppDecimateHost(bit, typ) \ NCVStatus nppiStDecimate_##bit##typ##_C1R_host(Ncv##bit##typ *h_src, Ncv32u srcStep, \ Ncv##bit##typ *h_dst, Ncv32u dstStep, \ NcvSize32u srcRoi, Ncv32u scale) \ { \ return decimateWrapperHost<Ncv##bit##u>((Ncv##bit##u *)h_src, srcStep, \ (Ncv##bit##u *)h_dst, dstStep, \ srcRoi, scale); \ } implementNppDecimate(32, u) implementNppDecimate(32, s) implementNppDecimate(32, f) implementNppDecimate(64, u) implementNppDecimate(64, s) implementNppDecimate(64, f) implementNppDecimateHost(32, u) implementNppDecimateHost(32, s) implementNppDecimateHost(32, f) implementNppDecimateHost(64, u) implementNppDecimateHost(64, s) implementNppDecimateHost(64, f) //============================================================================== // // RectStdDev.cu // //============================================================================== const Ncv32u NUM_RECTSTDDEV_THREADS = 128; template <NcvBool tbCacheTexture> __device__ Ncv32u getElemSum(Ncv32u x, Ncv32u *d_sum) { if (tbCacheTexture) { return tex1Dfetch(tex32u, x); } else { return d_sum[x]; } } template <NcvBool tbCacheTexture> __device__ Ncv64u getElemSqSum(Ncv32u x, Ncv64u *d_sqsum) { if (tbCacheTexture) { uint2 tmp = tex1Dfetch(tex64u, x); Ncv64u res = (Ncv64u)tmp.y; res <<= 32; res |= tmp.x; return res; } else { return d_sqsum[x]; } } template <NcvBool tbCacheTexture> __global__ void rectStdDev_32f_C1R(Ncv32u *d_sum, Ncv32u sumStep, Ncv64u *d_sqsum, Ncv32u sqsumStep, Ncv32f *d_norm, Ncv32u normStep, NcvSize32u roi, NcvRect32u rect, Ncv32f invRectArea) { Ncv32u x_offs = blockIdx.x * NUM_RECTSTDDEV_THREADS + threadIdx.x; if (x_offs >= roi.width) { return; } Ncv32u sum_offset = blockIdx.y * sumStep + x_offs; Ncv32u sqsum_offset = blockIdx.y * sqsumStep + x_offs; //OPT: try swapping order (could change cache hit/miss ratio) Ncv32u sum_tl = getElemSum<tbCacheTexture>(sum_offset + rect.y * sumStep + rect.x, d_sum); Ncv32u sum_bl = getElemSum<tbCacheTexture>(sum_offset + (rect.y + rect.height) * sumStep + rect.x, d_sum); Ncv32u sum_tr = getElemSum<tbCacheTexture>(sum_offset + rect.y * sumStep + rect.x + rect.width, d_sum); Ncv32u sum_br = getElemSum<tbCacheTexture>(sum_offset + (rect.y + rect.height) * sumStep + rect.x + rect.width, d_sum); Ncv32u sum_val = sum_br + sum_tl - sum_tr - sum_bl; Ncv64u sqsum_tl, sqsum_bl, sqsum_tr, sqsum_br; sqsum_tl = getElemSqSum<tbCacheTexture>(sqsum_offset + rect.y * sqsumStep + rect.x, d_sqsum); sqsum_bl = getElemSqSum<tbCacheTexture>(sqsum_offset + (rect.y + rect.height) * sqsumStep + rect.x, d_sqsum); sqsum_tr = getElemSqSum<tbCacheTexture>(sqsum_offset + rect.y * sqsumStep + rect.x + rect.width, d_sqsum); sqsum_br = getElemSqSum<tbCacheTexture>(sqsum_offset + (rect.y + rect.height) * sqsumStep + rect.x + rect.width, d_sqsum); Ncv64u sqsum_val = sqsum_br + sqsum_tl - sqsum_tr - sqsum_bl; Ncv32f mean = sum_val * invRectArea; ////////////////////////////////////////////////////////////////////////// // sqsum_val_res = sqsum_val / rectArea ////////////////////////////////////////////////////////////////////////// Ncv32f sqsum_val_1 = __ull2float_rz(sqsum_val); Ncv64u sqsum_val_2 = __float2ull_rz(sqsum_val_1); Ncv64u sqsum_val_3 = sqsum_val - sqsum_val_2; Ncv32f sqsum_val_4 = __ull2float_rn(sqsum_val_3); sqsum_val_1 *= invRectArea; sqsum_val_4 *= invRectArea; Ncv32f sqsum_val_res = sqsum_val_1 + sqsum_val_4; ////////////////////////////////////////////////////////////////////////// // variance = sqsum_val_res - mean * mean ////////////////////////////////////////////////////////////////////////// #if defined DISABLE_MAD_SELECTIVELY Ncv32f variance = sqsum_val_2 - __fmul_rn(mean, mean); #else Ncv32f variance = sqsum_val_res - mean * mean; #endif ////////////////////////////////////////////////////////////////////////// // stddev = sqrtf(variance) ////////////////////////////////////////////////////////////////////////// //Ncv32f stddev = sqrtf(variance); Ncv32f stddev = __fsqrt_rn(variance); d_norm[blockIdx.y * normStep + x_offs] = stddev; } NCVStatus nppiStRectStdDev_32f_C1R(Ncv32u *d_sum, Ncv32u sumStep, Ncv64u *d_sqsum, Ncv32u sqsumStep, Ncv32f *d_norm, Ncv32u normStep, NcvSize32u roi, NcvRect32u rect, Ncv32f scaleArea, NcvBool readThruTexture) { ncvAssertReturn(d_sum != NULL && d_sqsum != NULL && d_norm != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn(roi.width > 0 && roi.height > 0, NPPST_INVALID_ROI); ncvAssertReturn(sumStep >= (Ncv32u)(roi.width + rect.x + rect.width - 1) * sizeof(Ncv32u) && sqsumStep >= (Ncv32u)(roi.width + rect.x + rect.width - 1) * sizeof(Ncv64u) && normStep >= (Ncv32u)roi.width * sizeof(Ncv32f) && sumStep % sizeof(Ncv32u) == 0 && sqsumStep % sizeof(Ncv64u) == 0 && normStep % sizeof(Ncv32f) == 0, NPPST_INVALID_STEP); ncvAssertReturn(scaleArea >= 1.0f, NPPST_INVALID_SCALE); sumStep /= sizeof(Ncv32u); sqsumStep /= sizeof(Ncv64u); normStep /= sizeof(Ncv32f); Ncv32f rectArea = rect.width * rect.height * scaleArea; Ncv32f invRectArea = 1.0f / rectArea; dim3 grid(((roi.width + NUM_RECTSTDDEV_THREADS - 1) / NUM_RECTSTDDEV_THREADS), roi.height); dim3 block(NUM_RECTSTDDEV_THREADS); if (!readThruTexture) { hipLaunchKernelGGL(( rectStdDev_32f_C1R <false>) , dim3(grid), dim3(block), 0, nppStGetActiveCUDAstream(), d_sum, sumStep, d_sqsum, sqsumStep, d_norm, normStep, roi, rect, invRectArea); } else { hipChannelFormatDesc cfdTexSrc; hipChannelFormatDesc cfdTexSqr; cfdTexSrc = hipCreateChannelDesc<Ncv32u>(); cfdTexSqr = hipCreateChannelDesc<uint2>(); size_t alignmentOffset; ncvAssertCUDAReturn(hipBindTexture(&alignmentOffset, tex32u, d_sum, cfdTexSrc, (roi.height + rect.y + rect.height) * sumStep * sizeof(Ncv32u)), NPPST_TEXTURE_BIND_ERROR); ncvAssertReturn(alignmentOffset==0, NPPST_TEXTURE_BIND_ERROR); ncvAssertCUDAReturn(hipBindTexture(&alignmentOffset, tex64u, d_sqsum, cfdTexSqr, (roi.height + rect.y + rect.height) * sqsumStep * sizeof(Ncv64u)), NPPST_TEXTURE_BIND_ERROR); ncvAssertReturn(alignmentOffset==0, NPPST_TEXTURE_BIND_ERROR); hipLaunchKernelGGL(( rectStdDev_32f_C1R <true>) , dim3(grid), dim3(block), 0, nppStGetActiveCUDAstream(), NULL, sumStep, NULL, sqsumStep, d_norm, normStep, roi, rect, invRectArea); } ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); return NPPST_SUCCESS; } NCVStatus nppiStRectStdDev_32f_C1R_host(Ncv32u *h_sum, Ncv32u sumStep, Ncv64u *h_sqsum, Ncv32u sqsumStep, Ncv32f *h_norm, Ncv32u normStep, NcvSize32u roi, NcvRect32u rect, Ncv32f scaleArea) { ncvAssertReturn(h_sum != NULL && h_sqsum != NULL && h_norm != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn(roi.width > 0 && roi.height > 0, NPPST_INVALID_ROI); ncvAssertReturn(sumStep >= (Ncv32u)(roi.width + rect.x + rect.width - 1) * sizeof(Ncv32u) && sqsumStep >= (Ncv32u)(roi.width + rect.x + rect.width - 1) * sizeof(Ncv64u) && normStep >= (Ncv32u)roi.width * sizeof(Ncv32f) && sumStep % sizeof(Ncv32u) == 0 && sqsumStep % sizeof(Ncv64u) == 0 && normStep % sizeof(Ncv32f) == 0, NPPST_INVALID_STEP); ncvAssertReturn(scaleArea >= 1.0f, NPPST_INVALID_SCALE); sumStep /= sizeof(Ncv32u); sqsumStep /= sizeof(Ncv64u); normStep /= sizeof(Ncv32f); Ncv32f rectArea = rect.width * rect.height * scaleArea; Ncv32f invRectArea = 1.0f / rectArea; for (Ncv32u i=0; i<roi.height; i++) { for (Ncv32u j=0; j<roi.width; j++) { Ncv32u sum_offset = i * sumStep + j; Ncv32u sqsum_offset = i * sqsumStep + j; Ncv32u sum_tl = h_sum[sum_offset + rect.y * sumStep + rect.x]; Ncv32u sum_bl = h_sum[sum_offset + (rect.y + rect.height) * sumStep + rect.x]; Ncv32u sum_tr = h_sum[sum_offset + rect.y * sumStep + rect.x + rect.width]; Ncv32u sum_br = h_sum[sum_offset + (rect.y + rect.height) * sumStep + rect.x + rect.width]; Ncv64f sum_val = sum_br + sum_tl - sum_tr - sum_bl; Ncv64u sqsum_tl = h_sqsum[sqsum_offset + rect.y * sqsumStep + rect.x]; Ncv64u sqsum_bl = h_sqsum[sqsum_offset + (rect.y + rect.height) * sqsumStep + rect.x]; Ncv64u sqsum_tr = h_sqsum[sqsum_offset + rect.y * sqsumStep + rect.x + rect.width]; Ncv64u sqsum_br = h_sqsum[sqsum_offset + (rect.y + rect.height) * sqsumStep + rect.x + rect.width]; Ncv64f sqsum_val = (Ncv64f)(sqsum_br + sqsum_tl - sqsum_tr - sqsum_bl); Ncv64f mean = sum_val * invRectArea; Ncv64f sqsum_val_2 = sqsum_val / rectArea; Ncv64f variance = sqsum_val_2 - mean * mean; h_norm[i * normStep + j] = (Ncv32f)sqrt(variance); } } return NPPST_SUCCESS; } //============================================================================== // // Transpose.cu // //============================================================================== const Ncv32u TRANSPOSE_TILE_DIM = 16; const Ncv32u TRANSPOSE_BLOCK_ROWS = 16; /** * \brief Matrix transpose kernel * * Calculates transpose of the input image * \see TRANSPOSE_TILE_DIM * * \tparam T_in Type of input image elements * \tparam T_out Type of output image elements * * \param d_src [IN] Source image pointer * \param srcStride [IN] Source image stride * \param d_dst [OUT] Output image pointer * \param dstStride [IN] Output image stride * * \return None */ template <class T> __global__ void transpose(T *d_src, Ncv32u srcStride, T *d_dst, Ncv32u dstStride, NcvSize32u srcRoi) { __shared__ T tile[TRANSPOSE_TILE_DIM][TRANSPOSE_TILE_DIM+1]; Ncv32u blockIdx_x, blockIdx_y; // do diagonal reordering if (gridDim.x == gridDim.y) { blockIdx_y = blockIdx.x; blockIdx_x = (blockIdx.x + blockIdx.y) % gridDim.x; } else { Ncv32u bid = blockIdx.x + gridDim.x * blockIdx.y; blockIdx_y = bid % gridDim.y; blockIdx_x = ((bid / gridDim.y) + blockIdx_y) % gridDim.x; } Ncv32u xIndex = blockIdx_x * TRANSPOSE_TILE_DIM + threadIdx.x; Ncv32u yIndex = blockIdx_y * TRANSPOSE_TILE_DIM + threadIdx.y; Ncv32u index_gmem = xIndex + yIndex * srcStride; if (xIndex < srcRoi.width) { for (Ncv32u i=0; i<TRANSPOSE_TILE_DIM; i+=TRANSPOSE_BLOCK_ROWS) { if (yIndex + i < srcRoi.height) { tile[threadIdx.y+i][threadIdx.x] = d_src[index_gmem+i*srcStride]; } } } __syncthreads(); xIndex = blockIdx_y * TRANSPOSE_TILE_DIM + threadIdx.x; yIndex = blockIdx_x * TRANSPOSE_TILE_DIM + threadIdx.y; index_gmem = xIndex + yIndex * dstStride; if (xIndex < srcRoi.height) { for (Ncv32u i=0; i<TRANSPOSE_TILE_DIM; i+=TRANSPOSE_BLOCK_ROWS) { if (yIndex + i < srcRoi.width) { d_dst[index_gmem+i*dstStride] = tile[threadIdx.x][threadIdx.y+i]; } } } } template <class T> NCVStatus transposeWrapperDevice(T *d_src, Ncv32u srcStride, T *d_dst, Ncv32u dstStride, NcvSize32u srcRoi) { ncvAssertReturn(d_src != NULL && d_dst != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn(srcRoi.width > 0 && srcRoi.height > 0, NPPST_INVALID_ROI); ncvAssertReturn(srcStride >= srcRoi.width * sizeof(T) && dstStride >= srcRoi.height * sizeof(T) && srcStride % sizeof(T) == 0 && dstStride % sizeof(T) == 0, NPPST_INVALID_STEP); srcStride /= sizeof(T); dstStride /= sizeof(T); dim3 grid((srcRoi.width + TRANSPOSE_TILE_DIM - 1) / TRANSPOSE_TILE_DIM, (srcRoi.height + TRANSPOSE_TILE_DIM - 1) / TRANSPOSE_TILE_DIM); dim3 block(TRANSPOSE_TILE_DIM, TRANSPOSE_TILE_DIM); hipLaunchKernelGGL(( transpose <T>) , dim3(grid), dim3(block), 0, nppStGetActiveCUDAstream(), d_src, srcStride, d_dst, dstStride, srcRoi); ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); return NPPST_SUCCESS; } template <class T> static NCVStatus transposeWrapperHost(T *h_src, Ncv32u srcStride, T *h_dst, Ncv32u dstStride, NcvSize32u srcRoi) { ncvAssertReturn(h_src != NULL && h_dst != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn(srcRoi.width > 0 && srcRoi.height > 0, NPPST_INVALID_ROI); ncvAssertReturn(srcStride >= srcRoi.width * sizeof(T) && dstStride >= srcRoi.height * sizeof(T) && srcStride % sizeof(T) == 0 && dstStride % sizeof(T) == 0, NPPST_INVALID_STEP); srcStride /= sizeof(T); dstStride /= sizeof(T); for (Ncv32u i=0; i<srcRoi.height; i++) { for (Ncv32u j=0; j<srcRoi.width; j++) { h_dst[j*dstStride+i] = h_src[i*srcStride + j]; } } return NPPST_SUCCESS; } #define implementNppTranspose(bit, typ) \ NCVStatus nppiStTranspose_##bit##typ##_C1R(Ncv##bit##typ *d_src, Ncv32u srcStep, \ Ncv##bit##typ *d_dst, Ncv32u dstStep, NcvSize32u srcRoi) \ { \ return transposeWrapperDevice<Ncv##bit##u>((Ncv##bit##u *)d_src, srcStep, \ (Ncv##bit##u *)d_dst, dstStep, srcRoi); \ } #define implementNppTransposeHost(bit, typ) \ NCVStatus nppiStTranspose_##bit##typ##_C1R_host(Ncv##bit##typ *h_src, Ncv32u srcStep, \ Ncv##bit##typ *h_dst, Ncv32u dstStep, \ NcvSize32u srcRoi) \ { \ return transposeWrapperHost<Ncv##bit##u>((Ncv##bit##u *)h_src, srcStep, \ (Ncv##bit##u *)h_dst, dstStep, srcRoi); \ } implementNppTranspose(32,u) implementNppTranspose(32,s) implementNppTranspose(32,f) implementNppTranspose(64,u) implementNppTranspose(64,s) implementNppTranspose(64,f) implementNppTransposeHost(32,u) implementNppTransposeHost(32,s) implementNppTransposeHost(32,f) implementNppTransposeHost(64,u) implementNppTransposeHost(64,s) implementNppTransposeHost(64,f) NCVStatus nppiStTranspose_128_C1R(void *d_src, Ncv32u srcStep, void *d_dst, Ncv32u dstStep, NcvSize32u srcRoi) { return transposeWrapperDevice<uint4>((uint4 *)d_src, srcStep, (uint4 *)d_dst, dstStep, srcRoi); } NCVStatus nppiStTranspose_128_C1R_host(void *d_src, Ncv32u srcStep, void *d_dst, Ncv32u dstStep, NcvSize32u srcRoi) { return transposeWrapperHost<uint4>((uint4 *)d_src, srcStep, (uint4 *)d_dst, dstStep, srcRoi); } //============================================================================== // // Compact.cu // //============================================================================== const Ncv32u NUM_REMOVE_THREADS = 256; template <bool bRemove, bool bWritePartial> __global__ void removePass1Scan(Ncv32u *d_src, Ncv32u srcLen, Ncv32u *d_offsets, Ncv32u *d_blockSums, Ncv32u elemRemove) { Ncv32u blockId = blockIdx.y * 65535 + blockIdx.x; Ncv32u elemAddrIn = blockId * NUM_REMOVE_THREADS + threadIdx.x; if (elemAddrIn > srcLen + blockDim.x) { return; } __shared__ Ncv32u shmem[NUM_REMOVE_THREADS * 2]; Ncv32u scanElem = 0; if (elemAddrIn < srcLen) { if (bRemove) { scanElem = (d_src[elemAddrIn] != elemRemove) ? 1 : 0; } else { scanElem = d_src[elemAddrIn]; } } Ncv32u localScanInc = blockScanInclusive<Ncv32u, NUM_REMOVE_THREADS>(scanElem, shmem); __syncthreads(); if (elemAddrIn < srcLen) { if (threadIdx.x == NUM_REMOVE_THREADS-1 && bWritePartial) { d_blockSums[blockId] = localScanInc; } if (bRemove) { d_offsets[elemAddrIn] = localScanInc - scanElem; } else { d_src[elemAddrIn] = localScanInc - scanElem; } } } __global__ void removePass2Adjust(Ncv32u *d_offsets, Ncv32u srcLen, Ncv32u *d_blockSums) { Ncv32u blockId = blockIdx.y * 65535 + blockIdx.x; Ncv32u elemAddrIn = blockId * NUM_REMOVE_THREADS + threadIdx.x; if (elemAddrIn >= srcLen) { return; } __shared__ Ncv32u valOffs; valOffs = d_blockSums[blockId]; __syncthreads(); d_offsets[elemAddrIn] += valOffs; } __global__ void removePass3Compact(Ncv32u *d_src, Ncv32u srcLen, Ncv32u *d_offsets, Ncv32u *d_dst, Ncv32u elemRemove, Ncv32u *dstLenValue) { Ncv32u blockId = blockIdx.y * 65535 + blockIdx.x; Ncv32u elemAddrIn = blockId * NUM_REMOVE_THREADS + threadIdx.x; if (elemAddrIn >= srcLen) { return; } Ncv32u elem = d_src[elemAddrIn]; Ncv32u elemAddrOut = d_offsets[elemAddrIn]; if (elem != elemRemove) { d_dst[elemAddrOut] = elem; } if (elemAddrIn == srcLen-1) { if (elem != elemRemove) { *dstLenValue = elemAddrOut + 1; } else { *dstLenValue = elemAddrOut; } } } NCVStatus compactVector_32u_device(Ncv32u *d_src, Ncv32u srcLen, Ncv32u *d_dst, Ncv32u *dstLenPinned, Ncv32u elemRemove, INCVMemAllocator &gpuAllocator) { ncvAssertReturn(gpuAllocator.isInitialized(), NPPST_MEM_INTERNAL_ERROR); ncvAssertReturn((d_src != NULL && d_dst != NULL) || gpuAllocator.isCounting(), NPPST_NULL_POINTER_ERROR); if (srcLen == 0) { if (dstLenPinned != NULL) { *dstLenPinned = 0; } return NPPST_SUCCESS; } std::vector<Ncv32u> partSumNums; std::vector<Ncv32u> partSumOffsets; Ncv32u partSumLastNum = srcLen; Ncv32u partSumLastOffs = 0; do { partSumNums.push_back(partSumLastNum); partSumOffsets.push_back(partSumLastOffs); Ncv32u curPartSumAlignedLength = alignUp(partSumLastNum * sizeof(Ncv32u), gpuAllocator.alignment()) / sizeof(Ncv32u); partSumLastOffs += curPartSumAlignedLength; partSumLastNum = (partSumLastNum + NUM_REMOVE_THREADS - 1) / NUM_REMOVE_THREADS; } while (partSumLastNum>1); partSumNums.push_back(partSumLastNum); partSumOffsets.push_back(partSumLastOffs); NCVVectorAlloc<Ncv32u> d_hierSums(gpuAllocator, partSumLastOffs+1); ncvAssertReturn(gpuAllocator.isCounting() || d_hierSums.isMemAllocated(), NPPST_MEM_INTERNAL_ERROR); NCVVectorAlloc<Ncv32u> d_numDstElements(gpuAllocator, 1); ncvAssertReturn(gpuAllocator.isCounting() || d_numDstElements.isMemAllocated(), NPPST_MEM_INTERNAL_ERROR); NCV_SET_SKIP_COND(gpuAllocator.isCounting()); NCV_SKIP_COND_BEGIN dim3 block(NUM_REMOVE_THREADS); //calculate zero-level partial sums for indices calculation if (partSumNums.size() > 2) { dim3 grid(partSumNums[1]); if (grid.x > 65535) { grid.y = (grid.x + 65534) / 65535; grid.x = 65535; } hipLaunchKernelGGL(( removePass1Scan <true, true>) , dim3(grid), dim3(block), 0, nppStGetActiveCUDAstream(), d_src, srcLen, d_hierSums.ptr(), d_hierSums.ptr() + partSumOffsets[1], elemRemove); ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); //calculate hierarchical partial sums for (Ncv32u i=1; i<partSumNums.size()-1; i++) { dim3 grid(partSumNums[i+1]); if (grid.x > 65535) { grid.y = (grid.x + 65534) / 65535; grid.x = 65535; } if (grid.x != 1) { hipLaunchKernelGGL(( removePass1Scan <false, true>) , dim3(grid), dim3(block), 0, nppStGetActiveCUDAstream(), d_hierSums.ptr() + partSumOffsets[i], partSumNums[i], NULL, d_hierSums.ptr() + partSumOffsets[i+1], 0); } else { hipLaunchKernelGGL(( removePass1Scan <false, false>) , dim3(grid), dim3(block), 0, nppStGetActiveCUDAstream(), d_hierSums.ptr() + partSumOffsets[i], partSumNums[i], NULL, NULL, 0); } ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); } //adjust hierarchical partial sums for (Ncv32s i=(Ncv32s)partSumNums.size()-3; i>=0; i--) { dim3 grid(partSumNums[i+1]); if (grid.x > 65535) { grid.y = (grid.x + 65534) / 65535; grid.x = 65535; } hipLaunchKernelGGL(( removePass2Adjust) , dim3(grid), dim3(block), 0, nppStGetActiveCUDAstream(), d_hierSums.ptr() + partSumOffsets[i], partSumNums[i], d_hierSums.ptr() + partSumOffsets[i+1]); ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); } } else { dim3 grid(partSumNums[1]); hipLaunchKernelGGL(( removePass1Scan <true, false>) , dim3(grid), dim3(block), 0, nppStGetActiveCUDAstream(), d_src, srcLen, d_hierSums.ptr(), NULL, elemRemove); ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); } //compact source vector using indices dim3 grid(partSumNums[1]); if (grid.x > 65535) { grid.y = (grid.x + 65534) / 65535; grid.x = 65535; } hipLaunchKernelGGL(( removePass3Compact) , dim3(grid), dim3(block), 0, nppStGetActiveCUDAstream(), d_src, srcLen, d_hierSums.ptr(), d_dst, elemRemove, d_numDstElements.ptr()); ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); //get number of dst elements if (dstLenPinned != NULL) { ncvAssertCUDAReturn(hipMemcpyAsync(dstLenPinned, d_numDstElements.ptr(), sizeof(Ncv32u), hipMemcpyDeviceToHost, nppStGetActiveCUDAstream()), NPPST_MEM_RESIDENCE_ERROR); ncvAssertCUDAReturn(hipStreamSynchronize(nppStGetActiveCUDAstream()), NPPST_MEM_RESIDENCE_ERROR); } NCV_SKIP_COND_END return NPPST_SUCCESS; } NCVStatus nppsStCompactGetSize_32u(Ncv32u srcLen, Ncv32u *pBufsize, hipDeviceProp_t &devProp) { ncvAssertReturn(pBufsize != NULL, NPPST_NULL_POINTER_ERROR); if (srcLen == 0) { *pBufsize = 0; return NPPST_SUCCESS; } NCVMemStackAllocator gpuCounter(static_cast<Ncv32u>(devProp.textureAlignment)); ncvAssertReturn(gpuCounter.isInitialized(), NPPST_MEM_INTERNAL_ERROR); NCVStatus ncvStat = compactVector_32u_device(NULL, srcLen, NULL, NULL, 0xC001C0DE, gpuCounter); ncvAssertReturnNcvStat(ncvStat); *pBufsize = (Ncv32u)gpuCounter.maxSize(); return NPPST_SUCCESS; } NCVStatus nppsStCompactGetSize_32s(Ncv32u srcLen, Ncv32u *pBufsize, hipDeviceProp_t &devProp) { return nppsStCompactGetSize_32u(srcLen, pBufsize, devProp); } NCVStatus nppsStCompactGetSize_32f(Ncv32u srcLen, Ncv32u *pBufsize, hipDeviceProp_t &devProp) { return nppsStCompactGetSize_32u(srcLen, pBufsize, devProp); } NCVStatus nppsStCompact_32u(Ncv32u *d_src, Ncv32u srcLen, Ncv32u *d_dst, Ncv32u *p_dstLen, Ncv32u elemRemove, Ncv8u *pBuffer, Ncv32u bufSize, hipDeviceProp_t &devProp) { NCVMemStackAllocator gpuAllocator(NCVMemoryTypeDevice, bufSize, static_cast<Ncv32u>(devProp.textureAlignment), pBuffer); ncvAssertReturn(gpuAllocator.isInitialized(), NPPST_MEM_INTERNAL_ERROR); NCVStatus ncvStat = compactVector_32u_device(d_src, srcLen, d_dst, p_dstLen, elemRemove, gpuAllocator); ncvAssertReturnNcvStat(ncvStat); return NPPST_SUCCESS; } NCVStatus nppsStCompact_32s(Ncv32s *d_src, Ncv32u srcLen, Ncv32s *d_dst, Ncv32u *p_dstLen, Ncv32s elemRemove, Ncv8u *pBuffer, Ncv32u bufSize, hipDeviceProp_t &devProp) { return nppsStCompact_32u((Ncv32u *)d_src, srcLen, (Ncv32u *)d_dst, p_dstLen, *(Ncv32u *)&elemRemove, pBuffer, bufSize, devProp); } NCVStatus nppsStCompact_32f(Ncv32f *d_src, Ncv32u srcLen, Ncv32f *d_dst, Ncv32u *p_dstLen, Ncv32f elemRemove, Ncv8u *pBuffer, Ncv32u bufSize, hipDeviceProp_t &devProp) { return nppsStCompact_32u((Ncv32u *)d_src, srcLen, (Ncv32u *)d_dst, p_dstLen, *(Ncv32u *)&elemRemove, pBuffer, bufSize, devProp); } NCVStatus nppsStCompact_32u_host(Ncv32u *h_src, Ncv32u srcLen, Ncv32u *h_dst, Ncv32u *dstLen, Ncv32u elemRemove) { ncvAssertReturn(h_src != NULL && h_dst != NULL, NPPST_NULL_POINTER_ERROR); if (srcLen == 0) { if (dstLen != NULL) { *dstLen = 0; } return NPPST_SUCCESS; } Ncv32u dstIndex = 0; for (Ncv32u srcIndex=0; srcIndex<srcLen; srcIndex++) { if (h_src[srcIndex] != elemRemove) { h_dst[dstIndex++] = h_src[srcIndex]; } } if (dstLen != NULL) { *dstLen = dstIndex; } return NPPST_SUCCESS; } NCVStatus nppsStCompact_32s_host(Ncv32s *h_src, Ncv32u srcLen, Ncv32s *h_dst, Ncv32u *dstLen, Ncv32s elemRemove) { return nppsStCompact_32u_host((Ncv32u *)h_src, srcLen, (Ncv32u *)h_dst, dstLen, *(Ncv32u *)&elemRemove); } NCVStatus nppsStCompact_32f_host(Ncv32f *h_src, Ncv32u srcLen, Ncv32f *h_dst, Ncv32u *dstLen, Ncv32f elemRemove) { return nppsStCompact_32u_host((Ncv32u *)h_src, srcLen, (Ncv32u *)h_dst, dstLen, *(Ncv32u *)&elemRemove); } //============================================================================== // // Filter.cu // //============================================================================== texture <float, 1, hipReadModeElementType> texSrc; texture <float, 1, hipReadModeElementType> texKernel; __forceinline__ __device__ float getValueMirrorRow(const int rowOffset, int i, int w) { if (i < 0) i = 1 - i; if (i >= w) i = w + w - i - 1; return tex1Dfetch (texSrc, rowOffset + i); } __forceinline__ __device__ float getValueMirrorColumn(const int offset, const int rowStep, int j, int h) { if (j < 0) j = 1 - j; if (j >= h) j = h + h - j - 1; return tex1Dfetch (texSrc, offset + j * rowStep); } __global__ void FilterRowBorderMirror_32f_C1R(Ncv32u srcStep, Ncv32f *pDst, NcvSize32u dstSize, Ncv32u dstStep, NcvRect32u roi, Ncv32s nKernelSize, Ncv32s nAnchor, Ncv32f multiplier) { // position within ROI const int ix = blockDim.x * blockIdx.x + threadIdx.x; const int iy = blockDim.y * blockIdx.y + threadIdx.y; if (ix >= roi.width || iy >= roi.height) { return; } const int p = nKernelSize - nAnchor - 1; const int j = roi.y + iy; const int rowOffset = j * srcStep + roi.x; float sum = 0.0f; for (int m = 0; m < nKernelSize; ++m) { sum += getValueMirrorRow (rowOffset, ix + m - p, roi.width) * tex1Dfetch (texKernel, m); } pDst[iy * dstStep + ix] = sum * multiplier; } __global__ void FilterColumnBorderMirror_32f_C1R(Ncv32u srcStep, Ncv32f *pDst, NcvSize32u dstSize, Ncv32u dstStep, NcvRect32u roi, Ncv32s nKernelSize, Ncv32s nAnchor, Ncv32f multiplier) { const int ix = blockDim.x * blockIdx.x + threadIdx.x; const int iy = blockDim.y * blockIdx.y + threadIdx.y; if (ix >= roi.width || iy >= roi.height) { return; } const int p = nKernelSize - nAnchor - 1; const int i = roi.x + ix; const int offset = i + roi.y * srcStep; float sum = 0.0f; for (int m = 0; m < nKernelSize; ++m) { sum += getValueMirrorColumn (offset, srcStep, iy + m - p, roi.height) * tex1Dfetch (texKernel, m); } pDst[ix + iy * dstStep] = sum * multiplier; } NCVStatus nppiStFilterRowBorder_32f_C1R(const Ncv32f *pSrc, NcvSize32u srcSize, Ncv32u nSrcStep, Ncv32f *pDst, NcvSize32u dstSize, Ncv32u nDstStep, NcvRect32u oROI, NppStBorderType borderType, const Ncv32f *pKernel, Ncv32s nKernelSize, Ncv32s nAnchor, Ncv32f multiplier) { ncvAssertReturn (pSrc != NULL && pDst != NULL && pKernel != NULL, NCV_NULL_PTR); ncvAssertReturn (oROI.width > 0 && oROI.height > 0, NPPST_INVALID_ROI); ncvAssertReturn (srcSize.width * sizeof (Ncv32f) <= nSrcStep && dstSize.width * sizeof (Ncv32f) <= nDstStep && oROI.width * sizeof (Ncv32f) <= nSrcStep && oROI.width * sizeof (Ncv32f) <= nDstStep && nSrcStep % sizeof (Ncv32f) == 0 && nDstStep % sizeof (Ncv32f) == 0, NPPST_INVALID_STEP); Ncv32u srcStep = nSrcStep / sizeof (Ncv32f); Ncv32u dstStep = nDstStep / sizeof (Ncv32f); // adjust ROI size to be within source image if (oROI.x + oROI.width > srcSize.width) { oROI.width = srcSize.width - oROI.x; } if (oROI.y + oROI.height > srcSize.height) { oROI.height = srcSize.height - oROI.y; } hipChannelFormatDesc floatChannel = hipCreateChannelDesc <float> (); texSrc.normalized = false; texKernel.normalized = false; hipBindTexture (0, texSrc, pSrc, floatChannel, srcSize.height * nSrcStep); hipBindTexture (0, texKernel, pKernel, floatChannel, nKernelSize * sizeof (Ncv32f)); dim3 ctaSize (32, 6); dim3 gridSize ((oROI.width + ctaSize.x - 1) / ctaSize.x, (oROI.height + ctaSize.y - 1) / ctaSize.y); switch (borderType) { case nppStBorderNone: return NPPST_ERROR; case nppStBorderClamp: return NPPST_ERROR; case nppStBorderWrap: return NPPST_ERROR; case nppStBorderMirror: hipLaunchKernelGGL(( FilterRowBorderMirror_32f_C1R) , dim3(gridSize), dim3(ctaSize), 0, nppStGetActiveCUDAstream (), srcStep, pDst, dstSize, dstStep, oROI, nKernelSize, nAnchor, multiplier); ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); break; default: return NPPST_ERROR; } return NPPST_SUCCESS; } NCVStatus nppiStFilterColumnBorder_32f_C1R(const Ncv32f *pSrc, NcvSize32u srcSize, Ncv32u nSrcStep, Ncv32f *pDst, NcvSize32u dstSize, Ncv32u nDstStep, NcvRect32u oROI, NppStBorderType borderType, const Ncv32f *pKernel, Ncv32s nKernelSize, Ncv32s nAnchor, Ncv32f multiplier) { ncvAssertReturn (pSrc != NULL && pDst != NULL && pKernel != NULL, NCV_NULL_PTR); ncvAssertReturn (oROI.width > 0 && oROI.height > 0, NPPST_INVALID_ROI); ncvAssertReturn (srcSize.width * sizeof (Ncv32f) <= nSrcStep && dstSize.width * sizeof (Ncv32f) <= nDstStep && oROI.width * sizeof (Ncv32f) <= nSrcStep && oROI.width * sizeof (Ncv32f) <= nDstStep && nSrcStep % sizeof (Ncv32f) == 0 && nDstStep % sizeof (Ncv32f) == 0, NPPST_INVALID_STEP); Ncv32u srcStep = nSrcStep / sizeof (Ncv32f); Ncv32u dstStep = nDstStep / sizeof (Ncv32f); // adjust ROI size to be within source image if (oROI.x + oROI.width > srcSize.width) { oROI.width = srcSize.width - oROI.x; } if (oROI.y + oROI.height > srcSize.height) { oROI.height = srcSize.height - oROI.y; } hipChannelFormatDesc floatChannel = hipCreateChannelDesc <float> (); texSrc.normalized = false; texKernel.normalized = false; hipBindTexture (0, texSrc, pSrc, floatChannel, srcSize.height * nSrcStep); hipBindTexture (0, texKernel, pKernel, floatChannel, nKernelSize * sizeof (Ncv32f)); dim3 ctaSize (32, 6); dim3 gridSize ((oROI.width + ctaSize.x - 1) / ctaSize.x, (oROI.height + ctaSize.y - 1) / ctaSize.y); switch (borderType) { case nppStBorderClamp: return NPPST_ERROR; case nppStBorderWrap: return NPPST_ERROR; case nppStBorderMirror: hipLaunchKernelGGL(( FilterColumnBorderMirror_32f_C1R) , dim3(gridSize), dim3(ctaSize), 0, nppStGetActiveCUDAstream (), srcStep, pDst, dstSize, dstStep, oROI, nKernelSize, nAnchor, multiplier); ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); break; default: return NPPST_ERROR; } return NPPST_SUCCESS; } //============================================================================== // // FrameInterpolate.cu // //============================================================================== inline Ncv32u iDivUp(Ncv32u num, Ncv32u denom) { return (num + denom - 1)/denom; } texture<float, 2, hipReadModeElementType> tex_src1; texture<float, 2, hipReadModeElementType> tex_src0; __global__ void BlendFramesKernel(const float *u, const float *v, // forward flow const float *ur, const float *vr, // backward flow const float *o0, const float *o1, // coverage masks int w, int h, int s, float theta, float *out) { const int ix = threadIdx.x + blockDim.x * blockIdx.x; const int iy = threadIdx.y + blockDim.y * blockIdx.y; const int pos = ix + s * iy; if (ix >= w || iy >= h) return; float _u = u[pos]; float _v = v[pos]; float _ur = ur[pos]; float _vr = vr[pos]; float x = (float)ix + 0.5f; float y = (float)iy + 0.5f; bool b0 = o0[pos] > 1e-4f; bool b1 = o1[pos] > 1e-4f; if (b0 && b1) { // pixel is visible on both frames out[pos] = tex2D(tex_src0, x - _u * theta, y - _v * theta) * (1.0f - theta) + tex2D(tex_src1, x + _u * (1.0f - theta), y + _v * (1.0f - theta)) * theta; } else if (b0) { // visible on the first frame only out[pos] = tex2D(tex_src0, x - _u * theta, y - _v * theta); } else { // visible on the second frame only out[pos] = tex2D(tex_src1, x - _ur * (1.0f - theta), y - _vr * (1.0f - theta)); } } NCVStatus BlendFrames(const Ncv32f *src0, const Ncv32f *src1, const Ncv32f *ufi, const Ncv32f *vfi, const Ncv32f *ubi, const Ncv32f *vbi, const Ncv32f *o1, const Ncv32f *o2, Ncv32u width, Ncv32u height, Ncv32u stride, Ncv32f theta, Ncv32f *out) { tex_src1.addressMode[0] = hipAddressModeClamp; tex_src1.addressMode[1] = hipAddressModeClamp; tex_src1.filterMode = hipFilterModeLinear; tex_src1.normalized = false; tex_src0.addressMode[0] = hipAddressModeClamp; tex_src0.addressMode[1] = hipAddressModeClamp; tex_src0.filterMode = hipFilterModeLinear; tex_src0.normalized = false; hipChannelFormatDesc desc = hipCreateChannelDesc <float> (); const Ncv32u pitch = stride * sizeof (float); ncvAssertCUDAReturn (hipBindTexture2D (0, tex_src1, src1, desc, width, height, pitch), NPPST_TEXTURE_BIND_ERROR); ncvAssertCUDAReturn (hipBindTexture2D (0, tex_src0, src0, desc, width, height, pitch), NPPST_TEXTURE_BIND_ERROR); dim3 threads (32, 4); dim3 blocks (iDivUp (width, threads.x), iDivUp (height, threads.y)); hipLaunchKernelGGL(( BlendFramesKernel), dim3(blocks), dim3(threads), 0, nppStGetActiveCUDAstream (), ufi, vfi, ubi, vbi, o1, o2, width, height, stride, theta, out); ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); return NPPST_SUCCESS; } NCVStatus nppiStGetInterpolationBufferSize(NcvSize32u srcSize, Ncv32u nStep, Ncv32u *hpSize) { NCVStatus status = NPPST_ERROR; status = nppiStVectorWarpGetBufferSize(srcSize, nStep, hpSize); return status; } NCVStatus nppiStInterpolateFrames(const NppStInterpolationState *pState) { // check state validity ncvAssertReturn (pState->pSrcFrame0 != 0 && pState->pSrcFrame1 != 0 && pState->pFU != 0 && pState->pFV != 0 && pState->pBU != 0 && pState->pBV != 0 && pState->pNewFrame != 0 && pState->ppBuffers[0] != 0 && pState->ppBuffers[1] != 0 && pState->ppBuffers[2] != 0 && pState->ppBuffers[3] != 0 && pState->ppBuffers[4] != 0 && pState->ppBuffers[5] != 0, NPPST_NULL_POINTER_ERROR); ncvAssertReturn (pState->size.width > 0 && pState->size.height > 0, NPPST_ERROR); ncvAssertReturn (pState->nStep >= pState->size.width * sizeof (Ncv32f) && pState->nStep > 0 && pState->nStep % sizeof (Ncv32f) == 0, NPPST_INVALID_STEP); // change notation Ncv32f *cov0 = pState->ppBuffers[0]; Ncv32f *cov1 = pState->ppBuffers[1]; Ncv32f *fwdU = pState->ppBuffers[2]; // forward u Ncv32f *fwdV = pState->ppBuffers[3]; // forward v Ncv32f *bwdU = pState->ppBuffers[4]; // backward u Ncv32f *bwdV = pState->ppBuffers[5]; // backward v // warp flow ncvAssertReturnNcvStat ( nppiStVectorWarp_PSF2x2_32f_C1 (pState->pFU, pState->size, pState->nStep, pState->pFU, pState->pFV, pState->nStep, cov0, pState->pos, fwdU) ); ncvAssertReturnNcvStat ( nppiStVectorWarp_PSF2x2_32f_C1 (pState->pFV, pState->size, pState->nStep, pState->pFU, pState->pFV, pState->nStep, cov0, pState->pos, fwdV) ); // warp backward flow ncvAssertReturnNcvStat ( nppiStVectorWarp_PSF2x2_32f_C1 (pState->pBU, pState->size, pState->nStep, pState->pBU, pState->pBV, pState->nStep, cov1, 1.0f - pState->pos, bwdU) ); ncvAssertReturnNcvStat ( nppiStVectorWarp_PSF2x2_32f_C1 (pState->pBV, pState->size, pState->nStep, pState->pBU, pState->pBV, pState->nStep, cov1, 1.0f - pState->pos, bwdU) ); // interpolate frame ncvAssertReturnNcvStat ( BlendFrames (pState->pSrcFrame0, pState->pSrcFrame1, fwdU, fwdV, bwdU, bwdV, cov0, cov1, pState->size.width, pState->size.height, pState->nStep / sizeof (Ncv32f), pState->pos, pState->pNewFrame) ); return NPPST_SUCCESS; } //============================================================================== // // VectorWarpFrame.cu // //============================================================================== __global__ void ForwardWarpKernel_PSF2x2(const float *u, const float *v, const float *src, const int w, const int h, const int flow_stride, const int image_stride, const float time_scale, float *normalization_factor, float *dst) { int j = threadIdx.x + blockDim.x * blockIdx.x; int i = threadIdx.y + blockDim.y * blockIdx.y; if (i >= h || j >= w) return; int flow_row_offset = i * flow_stride; int image_row_offset = i * image_stride; //bottom left corner of a target pixel float cx = u[flow_row_offset + j] * time_scale + (float)j + 1.0f; float cy = v[flow_row_offset + j] * time_scale + (float)i + 1.0f; // pixel containing bottom left corner float px; float py; float dx = modff (cx, &px); float dy = modff (cy, &py); // target pixel integer coords int tx; int ty; tx = (int) px; ty = (int) py; float value = src[image_row_offset + j]; float weight; // fill pixel containing bottom right corner if (!((tx >= w) || (tx < 0) || (ty >= h) || (ty < 0))) { weight = dx * dy; atomicAdd (dst + ty * image_stride + tx, value * weight); atomicAdd (normalization_factor + ty * image_stride + tx, weight); } // fill pixel containing bottom left corner tx -= 1; if (!((tx >= w) || (tx < 0) || (ty >= h) || (ty < 0))) { weight = (1.0f - dx) * dy; atomicAdd (dst + ty * image_stride + tx, value * weight); atomicAdd (normalization_factor + ty * image_stride + tx, weight); } // fill pixel containing upper left corner ty -= 1; if (!((tx >= w) || (tx < 0) || (ty >= h) || (ty < 0))) { weight = (1.0f - dx) * (1.0f - dy); atomicAdd (dst + ty * image_stride + tx, value * weight); atomicAdd (normalization_factor + ty * image_stride + tx, weight); } // fill pixel containing upper right corner tx += 1; if (!((tx >= w) || (tx < 0) || (ty >= h) || (ty < 0))) { weight = dx * (1.0f - dy); atomicAdd (dst + ty * image_stride + tx, value * weight); atomicAdd (normalization_factor + ty * image_stride + tx, weight); } } __global__ void ForwardWarpKernel_PSF1x1(const float *u, const float *v, const float *src, const int w, const int h, const int flow_stride, const int image_stride, const float time_scale, float *dst) { int j = threadIdx.x + blockDim.x * blockIdx.x; int i = threadIdx.y + blockDim.y * blockIdx.y; if (i >= h || j >= w) return; int flow_row_offset = i * flow_stride; int image_row_offset = i * image_stride; float u_ = u[flow_row_offset + j]; float v_ = v[flow_row_offset + j]; //bottom left corner of target pixel float cx = u_ * time_scale + (float)j + 1.0f; float cy = v_ * time_scale + (float)i + 1.0f; // pixel containing bottom left corner int tx = __float2int_rn (cx); int ty = __float2int_rn (cy); float value = src[image_row_offset + j]; // fill pixel if (!((tx >= w) || (tx < 0) || (ty >= h) || (ty < 0))) { atomicAdd (dst + ty * image_stride + tx, value); } } __global__ void NormalizeKernel(const float *normalization_factor, int w, int h, int s, float *image) { int i = threadIdx.y + blockDim.y * blockIdx.y; int j = threadIdx.x + blockDim.x * blockIdx.x; if (i >= h || j >= w) return; const int pos = i * s + j; float scale = normalization_factor[pos]; float invScale = (scale == 0.0f) ? 1.0f : (1.0f / scale); image[pos] *= invScale; } __global__ void MemsetKernel(const float value, int w, int h, float *image) { int i = threadIdx.y + blockDim.y * blockIdx.y; int j = threadIdx.x + blockDim.x * blockIdx.x; if (i >= h || j >= w) return; const int pos = i * w + j; image[pos] = value; } NCVStatus nppiStVectorWarpGetBufferSize (NcvSize32u srcSize, Ncv32u nSrcStep, Ncv32u *hpSize) { ncvAssertReturn (hpSize != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn (srcSize.width * sizeof (Ncv32f) <= nSrcStep, NPPST_INVALID_STEP); *hpSize = nSrcStep * srcSize.height; return NPPST_SUCCESS; } // does not require normalization NCVStatus nppiStVectorWarp_PSF1x1_32f_C1(const Ncv32f *pSrc, NcvSize32u srcSize, Ncv32u nSrcStep, const Ncv32f *pU, const Ncv32f *pV, Ncv32u nVFStep, Ncv32f timeScale, Ncv32f *pDst) { ncvAssertReturn (pSrc != NULL && pU != NULL && pV != NULL && pDst != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn (srcSize.width * sizeof (Ncv32f) <= nSrcStep && srcSize.width * sizeof (Ncv32f) <= nVFStep, NPPST_INVALID_STEP); Ncv32u srcStep = nSrcStep / sizeof (Ncv32f); Ncv32u vfStep = nVFStep / sizeof (Ncv32f); dim3 ctaSize (32, 6); dim3 gridSize (iDivUp (srcSize.width, ctaSize.x), iDivUp (srcSize.height, ctaSize.y)); hipLaunchKernelGGL(( ForwardWarpKernel_PSF1x1) , dim3(gridSize), dim3(ctaSize), 0, nppStGetActiveCUDAstream(), pU, pV, pSrc, srcSize.width, srcSize.height, vfStep, srcStep, timeScale, pDst); ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); return NPPST_SUCCESS; } NCVStatus nppiStVectorWarp_PSF2x2_32f_C1(const Ncv32f *pSrc, NcvSize32u srcSize, Ncv32u nSrcStep, const Ncv32f *pU, const Ncv32f *pV, Ncv32u nVFStep, Ncv32f *pBuffer, Ncv32f timeScale, Ncv32f *pDst) { ncvAssertReturn (pSrc != NULL && pU != NULL && pV != NULL && pDst != NULL && pBuffer != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn (srcSize.width * sizeof (Ncv32f) <= nSrcStep && srcSize.width * sizeof (Ncv32f) <= nVFStep, NPPST_INVALID_STEP); Ncv32u srcStep = nSrcStep / sizeof (Ncv32f); Ncv32u vfStep = nVFStep / sizeof(Ncv32f); dim3 ctaSize(32, 6); dim3 gridSize (iDivUp (srcSize.width, ctaSize.x), iDivUp (srcSize.height, ctaSize.y)); hipLaunchKernelGGL(( MemsetKernel) , dim3(gridSize), dim3(ctaSize), 0, nppStGetActiveCUDAstream(), 0, srcSize.width, srcSize.height, pBuffer); ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); hipLaunchKernelGGL(( ForwardWarpKernel_PSF2x2) , dim3(gridSize), dim3(ctaSize), 0, nppStGetActiveCUDAstream(), pU, pV, pSrc, srcSize.width, srcSize.height, vfStep, srcStep, timeScale, pBuffer, pDst); ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); hipLaunchKernelGGL(( NormalizeKernel) , dim3(gridSize), dim3(ctaSize), 0, nppStGetActiveCUDAstream(), pBuffer, srcSize.width, srcSize.height, srcStep, pDst); ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); return NPPST_SUCCESS; } //============================================================================== // // Resize.cu // //============================================================================== texture <float, 2, hipReadModeElementType> texSrc2D; __forceinline__ __device__ float processLine(int spos, float xmin, float xmax, int ixmin, int ixmax, float fxmin, float cxmax) { // first element float wsum = 1.0f - xmin + fxmin; float sum = tex1Dfetch(texSrc, spos) * (1.0f - xmin + fxmin); spos++; for (int ix = ixmin + 1; ix < ixmax; ++ix) { sum += tex1Dfetch(texSrc, spos); spos++; wsum += 1.0f; } sum += tex1Dfetch(texSrc, spos) * (cxmax - xmax); wsum += cxmax - xmax; return sum / wsum; } __global__ void resizeSuperSample_32f(NcvSize32u srcSize, Ncv32u srcStep, NcvRect32u srcROI, Ncv32f *dst, NcvSize32u dstSize, Ncv32u dstStep, NcvRect32u dstROI, Ncv32f scaleX, Ncv32f scaleY) { // position within dst ROI const int ix = blockIdx.x * blockDim.x + threadIdx.x; const int iy = blockIdx.y * blockDim.y + threadIdx.y; if (ix >= dstROI.width || iy >= dstROI.height) { return; } float rw = (float) srcROI.width; float rh = (float) srcROI.height; // source position float x = scaleX * (float) ix; float y = scaleY * (float) iy; // x sampling range float xBegin = fmax (x - scaleX, 0.0f); float xEnd = fmin (x + scaleX, rw - 1.0f); // y sampling range float yBegin = fmax (y - scaleY, 0.0f); float yEnd = fmin (y + scaleY, rh - 1.0f); // x range of source samples float floorXBegin = std::floor (xBegin); float ceilXEnd = std::ceil (xEnd); int iXBegin = srcROI.x + (int) floorXBegin; int iXEnd = srcROI.x + (int) ceilXEnd; // y range of source samples float floorYBegin = std::floor (yBegin); float ceilYEnd = std::ceil (yEnd); int iYBegin = srcROI.y + (int) floorYBegin; int iYEnd = srcROI.y + (int) ceilYEnd; // first row int pos = iYBegin * srcStep + iXBegin; float wsum = 1.0f - yBegin + floorYBegin; float sum = processLine (pos, xBegin, xEnd, iXBegin, iXEnd, floorXBegin, ceilXEnd) * (1.0f - yBegin + floorYBegin); pos += srcStep; for (int iy = iYBegin + 1; iy < iYEnd; ++iy) { sum += processLine (pos, xBegin, xEnd, iXBegin, iXEnd, floorXBegin, ceilXEnd); pos += srcStep; wsum += 1.0f; } sum += processLine (pos, xBegin, xEnd, iXBegin, iXEnd, floorXBegin, ceilXEnd) * (ceilYEnd - yEnd); wsum += ceilYEnd - yEnd; sum /= wsum; dst[(ix + dstROI.x) + (iy + dstROI.y) * dstStep] = sum; } // bicubic interpolation __forceinline__ __device__ float bicubicCoeff(float x_) { float x = std::abs(x_); if (x <= 1.0f) { return x * x * (1.5f * x - 2.5f) + 1.0f; } else if (x < 2.0f) { return x * (x * (-0.5f * x + 2.5f) - 4.0f) + 2.0f; } else { return 0.0f; } } __global__ void resizeBicubic(NcvSize32u srcSize, NcvRect32u srcROI, NcvSize32u dstSize, Ncv32u dstStep, Ncv32f *dst, NcvRect32u dstROI, Ncv32f scaleX, Ncv32f scaleY) { const int ix = blockIdx.x * blockDim.x + threadIdx.x; const int iy = blockIdx.y * blockDim.y + threadIdx.y; if (ix >= dstROI.width || iy >= dstROI.height) { return; } const float dx = 1.0f / srcROI.width; const float dy = 1.0f / srcROI.height; float rx = (float) srcROI.x; float ry = (float) srcROI.y; float rw = (float) srcROI.width; float rh = (float) srcROI.height; float x = scaleX * (float) ix; float y = scaleY * (float) iy; // sampling range // border mode is clamp float xmin = fmax (std::ceil (x - 2.0f), 0.0f); float xmax = fmin (std::floor (x + 2.0f), rw - 1.0f); float ymin = fmax (std::ceil (y - 2.0f), 0.0f); float ymax = fmin (std::floor (y + 2.0f), rh - 1.0f); // shift data window to match ROI rx += 0.5f; ry += 0.5f; x += rx; y += ry; xmin += rx; xmax += rx; ymin += ry; ymax += ry; float sum = 0.0f; float wsum = 0.0f; for (float cy = ymin; cy <= ymax; cy += 1.0f) { for (float cx = xmin; cx <= xmax; cx += 1.0f) { float xDist = x - cx; float yDist = y - cy; float wx = bicubicCoeff (xDist); float wy = bicubicCoeff (yDist); wx *= wy; sum += wx * tex2D (texSrc2D, cx * dx, cy * dy); wsum += wx; } } dst[(ix + dstROI.x)+ (iy + dstROI.y) * dstStep] = (!wsum)? 0 : sum / wsum; } NCVStatus nppiStResize_32f_C1R(const Ncv32f *pSrc, NcvSize32u srcSize, Ncv32u nSrcStep, NcvRect32u srcROI, Ncv32f *pDst, NcvSize32u dstSize, Ncv32u nDstStep, NcvRect32u dstROI, Ncv32f xFactor, Ncv32f yFactor, NppStInterpMode interpolation) { NCVStatus status = NPPST_SUCCESS; ncvAssertReturn (pSrc != NULL && pDst != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn (xFactor != 0.0 && yFactor != 0.0, NPPST_INVALID_SCALE); ncvAssertReturn (nSrcStep >= sizeof (Ncv32f) * (Ncv32u) srcSize.width && nDstStep >= sizeof (Ncv32f) * (Ncv32f) dstSize.width, NPPST_INVALID_STEP); Ncv32u srcStep = nSrcStep / sizeof (Ncv32f); Ncv32u dstStep = nDstStep / sizeof (Ncv32f); // TODO: preprocess ROI to prevent out of bounds access if (interpolation == nppStSupersample) { // bind texture hipBindTexture (0, texSrc, pSrc, srcSize.height * nSrcStep); // invoke kernel dim3 ctaSize (32, 6); dim3 gridSize ((dstROI.width + ctaSize.x - 1) / ctaSize.x, (dstROI.height + ctaSize.y - 1) / ctaSize.y); hipLaunchKernelGGL(( resizeSuperSample_32f) , dim3(gridSize), dim3(ctaSize), 0, nppStGetActiveCUDAstream (), srcSize, srcStep, srcROI, pDst, dstSize, dstStep, dstROI, 1.0f / xFactor, 1.0f / yFactor); } else if (interpolation == nppStBicubic) { texSrc2D.addressMode[0] = hipAddressModeMirror; texSrc2D.addressMode[1] = hipAddressModeMirror; texSrc2D.normalized = true; hipChannelFormatDesc desc = hipCreateChannelDesc <float> (); hipBindTexture2D (0, texSrc2D, pSrc, desc, srcSize.width, srcSize.height, nSrcStep); dim3 ctaSize (32, 6); dim3 gridSize ((dstSize.width + ctaSize.x - 1) / ctaSize.x, (dstSize.height + ctaSize.y - 1) / ctaSize.y); hipLaunchKernelGGL(( resizeBicubic) , dim3(gridSize), dim3(ctaSize), 0, nppStGetActiveCUDAstream (), srcSize, srcROI, dstSize, dstStep, pDst, dstROI, 1.0f / xFactor, 1.0f / yFactor); } else { status = NPPST_ERROR; } ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); return status; }
56898aa4cd96df565579a5d7261d6dd491499f5c.cu
/* * Software License Agreement (BSD License) * * Point Cloud Library (PCL) - www.pointclouds.org * Copyright (C) 2009-2010, NVIDIA Corporation, all rights reserved. * Third party copyrights are property of their respective owners. * * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of Willow Garage, Inc. nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * $Id: $ * Ported to PCL by Koen Buys : Attention Work in progress! */ #include <vector> #include <cuda_runtime.h> #include "NPP_staging.hpp" texture<Ncv8u, 1, cudaReadModeElementType> tex8u; texture<Ncv32u, 1, cudaReadModeElementType> tex32u; texture<uint2, 1, cudaReadModeElementType> tex64u; //============================================================================== // // CUDA streams handling // //============================================================================== static cudaStream_t nppStream = 0; cudaStream_t nppStGetActiveCUDAstream(void) { return nppStream; } cudaStream_t nppStSetActiveCUDAstream(cudaStream_t cudaStream) { cudaStream_t tmp = nppStream; nppStream = cudaStream; return tmp; } //============================================================================== // // BlockScan.cuh // //============================================================================== NCV_CT_ASSERT(K_WARP_SIZE == 32); //this is required for the manual unroll of the loop in warpScanInclusive //Almost the same as naive scan1Inclusive, but doesn't need __syncthreads() //assuming size <= WARP_SIZE and size is power of 2 template <class T> inline __device__ T warpScanInclusive(T idata, volatile T *s_Data) { Ncv32u pos = 2 * threadIdx.x - (threadIdx.x & (K_WARP_SIZE - 1)); s_Data[pos] = 0; pos += K_WARP_SIZE; s_Data[pos] = idata; //for(Ncv32u offset = 1; offset < K_WARP_SIZE; offset <<= 1) //{ // s_Data[pos] += s_Data[pos - offset]; //} s_Data[pos] += s_Data[pos - 1]; s_Data[pos] += s_Data[pos - 2]; s_Data[pos] += s_Data[pos - 4]; s_Data[pos] += s_Data[pos - 8]; s_Data[pos] += s_Data[pos - 16]; return s_Data[pos]; } template <class T> inline __device__ T warpScanExclusive(T idata, volatile T *s_Data) { return warpScanInclusive(idata, s_Data) - idata; } template <class T, Ncv32u tiNumScanThreads> inline __device__ T blockScanInclusive(T idata, volatile T *s_Data) { if (tiNumScanThreads > K_WARP_SIZE) { //Bottom-level inclusive warp scan T warpResult = warpScanInclusive(idata, s_Data); //Save top elements of each warp for exclusive warp scan //sync to wait for warp scans to complete (because s_Data is being overwritten) __syncthreads(); if( (threadIdx.x & (K_WARP_SIZE - 1)) == (K_WARP_SIZE - 1) ) { s_Data[threadIdx.x >> K_LOG2_WARP_SIZE] = warpResult; } //wait for warp scans to complete __syncthreads(); if( threadIdx.x < (tiNumScanThreads / K_WARP_SIZE) ) { //grab top warp elements T val = s_Data[threadIdx.x]; //calculate exclusive scan and write back to shared memory s_Data[threadIdx.x] = warpScanExclusive(val, s_Data); } //return updated warp scans with exclusive scan results __syncthreads(); return warpResult + s_Data[threadIdx.x >> K_LOG2_WARP_SIZE]; } else { return warpScanInclusive(idata, s_Data); } } //============================================================================== // // IntegralImage.cu // //============================================================================== const Ncv32u NUM_SCAN_THREADS = 256; const Ncv32u LOG2_NUM_SCAN_THREADS = 8; template<class T_in, class T_out> struct _scanElemOp { template<bool tbDoSqr> static inline __host__ __device__ T_out scanElemOp(T_in elem) { return scanElemOp( elem, Int2Type<(int)tbDoSqr>() ); } private: template <int v> struct Int2Type { enum { value = v }; }; static inline __host__ __device__ T_out scanElemOp(T_in elem, Int2Type<0>) { return (T_out)elem; } static inline __host__ __device__ T_out scanElemOp(T_in elem, Int2Type<1>) { return (T_out)(elem*elem); } }; template<class T> inline __device__ T readElem(T *d_src, Ncv32u texOffs, Ncv32u srcStride, Ncv32u curElemOffs); template<> inline __device__ Ncv8u readElem<Ncv8u>(Ncv8u *d_src, Ncv32u texOffs, Ncv32u srcStride, Ncv32u curElemOffs) { return tex1Dfetch(tex8u, texOffs + srcStride * blockIdx.x + curElemOffs); } template<> inline __device__ Ncv32u readElem<Ncv32u>(Ncv32u *d_src, Ncv32u texOffs, Ncv32u srcStride, Ncv32u curElemOffs) { return d_src[curElemOffs]; } template<> inline __device__ Ncv32f readElem<Ncv32f>(Ncv32f *d_src, Ncv32u texOffs, Ncv32u srcStride, Ncv32u curElemOffs) { return d_src[curElemOffs]; } /** * \brief Segmented scan kernel * * Calculates per-row prefix scans of the input image. * Out-of-bounds safe: reads 'size' elements, writes 'size+1' elements * * \tparam T_in Type of input image elements * \tparam T_out Type of output image elements * \tparam T_op Defines an operation to be performed on the input image pixels * * \param d_src [IN] Source image pointer * \param srcWidth [IN] Source image width * \param srcStride [IN] Source image stride * \param d_II [OUT] Output image pointer * \param IIstride [IN] Output image stride * * \return None */ template <class T_in, class T_out, bool tbDoSqr> __global__ void scanRows(T_in *d_src, Ncv32u texOffs, Ncv32u srcWidth, Ncv32u srcStride, T_out *d_II, Ncv32u IIstride) { //advance pointers to the current line if (sizeof(T_in) != 1) { d_src += srcStride * blockIdx.x; } //for initial image 8bit source we use texref tex8u d_II += IIstride * blockIdx.x; Ncv32u numBuckets = (srcWidth + NUM_SCAN_THREADS - 1) >> LOG2_NUM_SCAN_THREADS; Ncv32u offsetX = 0; __shared__ T_out shmem[NUM_SCAN_THREADS * 2]; __shared__ T_out carryElem; carryElem = 0; __syncthreads(); while (numBuckets--) { Ncv32u curElemOffs = offsetX + threadIdx.x; T_out curScanElem; T_in curElem; T_out curElemMod; if (curElemOffs < srcWidth) { //load elements curElem = readElem<T_in>(d_src, texOffs, srcStride, curElemOffs); } curElemMod = _scanElemOp<T_in, T_out>::scanElemOp<tbDoSqr>(curElem); //inclusive scan curScanElem = blockScanInclusive<T_out, NUM_SCAN_THREADS>(curElemMod, shmem); if (curElemOffs <= srcWidth) { //make scan exclusive and write the bucket to the output buffer d_II[curElemOffs] = carryElem + curScanElem - curElemMod; offsetX += NUM_SCAN_THREADS; } //remember last element for subsequent buckets adjustment __syncthreads(); if (threadIdx.x == NUM_SCAN_THREADS-1) { carryElem += curScanElem; } __syncthreads(); } if (offsetX == srcWidth && !threadIdx.x) { d_II[offsetX] = carryElem; } } template <bool tbDoSqr, class T_in, class T_out> NCVStatus scanRowsWrapperDevice(T_in *d_src, Ncv32u srcStride, T_out *d_dst, Ncv32u dstStride, NcvSize32u roi) { cudaChannelFormatDesc cfdTex; size_t alignmentOffset = 0; if (sizeof(T_in) == 1) { cfdTex = cudaCreateChannelDesc<Ncv8u>(); ncvAssertCUDAReturn(cudaBindTexture(&alignmentOffset, tex8u, d_src, cfdTex, roi.height * srcStride), NPPST_TEXTURE_BIND_ERROR); if (alignmentOffset > 0) { ncvAssertCUDAReturn(cudaUnbindTexture(tex8u), NCV_CUDA_ERROR); ncvAssertCUDAReturn(cudaBindTexture(&alignmentOffset, tex8u, d_src, cfdTex, alignmentOffset + roi.height * srcStride), NPPST_TEXTURE_BIND_ERROR); } } scanRows <T_in, T_out, tbDoSqr> <<<roi.height, NUM_SCAN_THREADS, 0, nppStGetActiveCUDAstream()>>> (d_src, (Ncv32u)alignmentOffset, roi.width, srcStride, d_dst, dstStride); ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); return NPPST_SUCCESS; } static Ncv32u getPaddedDimension(Ncv32u dim, Ncv32u elemTypeSize, Ncv32u allocatorAlignment) { Ncv32u alignMask = allocatorAlignment-1; Ncv32u inverseAlignMask = ~alignMask; Ncv32u dimBytes = dim * elemTypeSize; Ncv32u pitch = (dimBytes + alignMask) & inverseAlignMask; Ncv32u PaddedDim = pitch / elemTypeSize; return PaddedDim; } template <class T_in, class T_out> NCVStatus ncvIntegralImage_device(T_in *d_src, Ncv32u srcStep, T_out *d_dst, Ncv32u dstStep, NcvSize32u roi, INCVMemAllocator &gpuAllocator) { ncvAssertReturn(sizeof(T_out) == sizeof(Ncv32u), NPPST_MEM_INTERNAL_ERROR); ncvAssertReturn(gpuAllocator.memType() == NCVMemoryTypeDevice || gpuAllocator.memType() == NCVMemoryTypeNone, NPPST_MEM_RESIDENCE_ERROR); ncvAssertReturn(gpuAllocator.isInitialized(), NPPST_MEM_INTERNAL_ERROR); ncvAssertReturn((d_src != NULL && d_dst != NULL) || gpuAllocator.isCounting(), NPPST_NULL_POINTER_ERROR); ncvAssertReturn(roi.width > 0 && roi.height > 0, NPPST_INVALID_ROI); ncvAssertReturn(srcStep >= roi.width * sizeof(T_in) && dstStep >= (roi.width + 1) * sizeof(T_out) && srcStep % sizeof(T_in) == 0 && dstStep % sizeof(T_out) == 0, NPPST_INVALID_STEP); srcStep /= sizeof(T_in); dstStep /= sizeof(T_out); Ncv32u WidthII = roi.width + 1; Ncv32u HeightII = roi.height + 1; Ncv32u PaddedWidthII32 = getPaddedDimension(WidthII, sizeof(Ncv32u), gpuAllocator.alignment()); Ncv32u PaddedHeightII32 = getPaddedDimension(HeightII, sizeof(Ncv32u), gpuAllocator.alignment()); NCVMatrixAlloc<T_out> Tmp32_1(gpuAllocator, PaddedWidthII32, PaddedHeightII32); ncvAssertReturn(gpuAllocator.isCounting() || Tmp32_1.isMemAllocated(), NPPST_MEM_INTERNAL_ERROR); NCVMatrixAlloc<T_out> Tmp32_2(gpuAllocator, PaddedHeightII32, PaddedWidthII32); ncvAssertReturn(gpuAllocator.isCounting() || Tmp32_2.isMemAllocated(), NPPST_MEM_INTERNAL_ERROR); ncvAssertReturn(Tmp32_1.pitch() * Tmp32_1.height() == Tmp32_2.pitch() * Tmp32_2.height(), NPPST_MEM_INTERNAL_ERROR); NCVStatus ncvStat; NCV_SET_SKIP_COND(gpuAllocator.isCounting()); NCV_SKIP_COND_BEGIN ncvStat = scanRowsWrapperDevice <false> (d_src, srcStep, Tmp32_1.ptr(), PaddedWidthII32, roi); ncvAssertReturnNcvStat(ncvStat); ncvStat = nppiStTranspose_32u_C1R((Ncv32u *)Tmp32_1.ptr(), PaddedWidthII32*sizeof(Ncv32u), (Ncv32u *)Tmp32_2.ptr(), PaddedHeightII32*sizeof(Ncv32u), NcvSize32u(WidthII, roi.height)); ncvAssertReturnNcvStat(ncvStat); ncvStat = scanRowsWrapperDevice <false> (Tmp32_2.ptr(), PaddedHeightII32, Tmp32_1.ptr(), PaddedHeightII32, NcvSize32u(roi.height, WidthII)); ncvAssertReturnNcvStat(ncvStat); ncvStat = nppiStTranspose_32u_C1R((Ncv32u *)Tmp32_1.ptr(), PaddedHeightII32*sizeof(Ncv32u), (Ncv32u *)d_dst, dstStep*sizeof(Ncv32u), NcvSize32u(HeightII, WidthII)); ncvAssertReturnNcvStat(ncvStat); NCV_SKIP_COND_END return NPPST_SUCCESS; } NCVStatus ncvSquaredIntegralImage_device(Ncv8u *d_src, Ncv32u srcStep, Ncv64u *d_dst, Ncv32u dstStep, NcvSize32u roi, INCVMemAllocator &gpuAllocator) { ncvAssertReturn(gpuAllocator.isInitialized(), NPPST_MEM_INTERNAL_ERROR); ncvAssertReturn(gpuAllocator.memType() == NCVMemoryTypeDevice || gpuAllocator.memType() == NCVMemoryTypeNone, NPPST_MEM_RESIDENCE_ERROR); ncvAssertReturn((d_src != NULL && d_dst != NULL) || gpuAllocator.isCounting(), NPPST_NULL_POINTER_ERROR); ncvAssertReturn(roi.width > 0 && roi.height > 0, NPPST_INVALID_ROI); ncvAssertReturn(srcStep >= roi.width && dstStep >= (roi.width + 1) * sizeof(Ncv64u) && dstStep % sizeof(Ncv64u) == 0, NPPST_INVALID_STEP); dstStep /= sizeof(Ncv64u); Ncv32u WidthII = roi.width + 1; Ncv32u HeightII = roi.height + 1; Ncv32u PaddedWidthII32 = getPaddedDimension(WidthII, sizeof(Ncv32u), gpuAllocator.alignment()); Ncv32u PaddedHeightII32 = getPaddedDimension(HeightII, sizeof(Ncv32u), gpuAllocator.alignment()); Ncv32u PaddedWidthII64 = getPaddedDimension(WidthII, sizeof(Ncv64u), gpuAllocator.alignment()); Ncv32u PaddedHeightII64 = getPaddedDimension(HeightII, sizeof(Ncv64u), gpuAllocator.alignment()); Ncv32u PaddedWidthMax = PaddedWidthII32 > PaddedWidthII64 ? PaddedWidthII32 : PaddedWidthII64; Ncv32u PaddedHeightMax = PaddedHeightII32 > PaddedHeightII64 ? PaddedHeightII32 : PaddedHeightII64; NCVMatrixAlloc<Ncv32u> Tmp32_1(gpuAllocator, PaddedWidthII32, PaddedHeightII32); ncvAssertReturn(Tmp32_1.isMemAllocated(), NPPST_MEM_INTERNAL_ERROR); NCVMatrixAlloc<Ncv64u> Tmp64(gpuAllocator, PaddedWidthMax, PaddedHeightMax); ncvAssertReturn(Tmp64.isMemAllocated(), NPPST_MEM_INTERNAL_ERROR); NCVMatrixReuse<Ncv32u> Tmp32_2(Tmp64.getSegment(), gpuAllocator.alignment(), PaddedWidthII32, PaddedHeightII32); ncvAssertReturn(Tmp32_2.isMemReused(), NPPST_MEM_INTERNAL_ERROR); NCVMatrixReuse<Ncv64u> Tmp64_2(Tmp64.getSegment(), gpuAllocator.alignment(), PaddedWidthII64, PaddedHeightII64); ncvAssertReturn(Tmp64_2.isMemReused(), NPPST_MEM_INTERNAL_ERROR); NCVStatus ncvStat; NCV_SET_SKIP_COND(gpuAllocator.isCounting()); NCV_SKIP_COND_BEGIN ncvStat = scanRowsWrapperDevice <true, Ncv8u, Ncv32u> (d_src, srcStep, Tmp32_2.ptr(), PaddedWidthII32, roi); ncvAssertReturnNcvStat(ncvStat); ncvStat = nppiStTranspose_32u_C1R(Tmp32_2.ptr(), PaddedWidthII32*sizeof(Ncv32u), Tmp32_1.ptr(), PaddedHeightII32*sizeof(Ncv32u), NcvSize32u(WidthII, roi.height)); ncvAssertReturnNcvStat(ncvStat); ncvStat = scanRowsWrapperDevice <false, Ncv32u, Ncv64u> (Tmp32_1.ptr(), PaddedHeightII32, Tmp64_2.ptr(), PaddedHeightII64, NcvSize32u(roi.height, WidthII)); ncvAssertReturnNcvStat(ncvStat); ncvStat = nppiStTranspose_64u_C1R(Tmp64_2.ptr(), PaddedHeightII64*sizeof(Ncv64u), d_dst, dstStep*sizeof(Ncv64u), NcvSize32u(HeightII, WidthII)); ncvAssertReturnNcvStat(ncvStat); NCV_SKIP_COND_END return NPPST_SUCCESS; } NCVStatus nppiStIntegralGetSize_8u32u(NcvSize32u roiSize, Ncv32u *pBufsize, cudaDeviceProp &devProp) { ncvAssertReturn(pBufsize != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn(roiSize.width > 0 && roiSize.height > 0, NPPST_INVALID_ROI); NCVMemStackAllocator gpuCounter(static_cast<Ncv32u>(devProp.textureAlignment)); ncvAssertReturn(gpuCounter.isInitialized(), NPPST_MEM_INTERNAL_ERROR); NCVStatus ncvStat = ncvIntegralImage_device((Ncv8u*)NULL, roiSize.width, (Ncv32u*)NULL, (roiSize.width+1) * sizeof(Ncv32u), roiSize, gpuCounter); ncvAssertReturnNcvStat(ncvStat); *pBufsize = (Ncv32u)gpuCounter.maxSize(); return NPPST_SUCCESS; } NCVStatus nppiStIntegralGetSize_32f32f(NcvSize32u roiSize, Ncv32u *pBufsize, cudaDeviceProp &devProp) { ncvAssertReturn(pBufsize != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn(roiSize.width > 0 && roiSize.height > 0, NPPST_INVALID_ROI); NCVMemStackAllocator gpuCounter(static_cast<Ncv32u>(devProp.textureAlignment)); ncvAssertReturn(gpuCounter.isInitialized(), NPPST_MEM_INTERNAL_ERROR); NCVStatus ncvStat = ncvIntegralImage_device((Ncv32f*)NULL, roiSize.width * sizeof(Ncv32f), (Ncv32f*)NULL, (roiSize.width+1) * sizeof(Ncv32f), roiSize, gpuCounter); ncvAssertReturnNcvStat(ncvStat); *pBufsize = (Ncv32u)gpuCounter.maxSize(); return NPPST_SUCCESS; } NCVStatus nppiStSqrIntegralGetSize_8u64u(NcvSize32u roiSize, Ncv32u *pBufsize, cudaDeviceProp &devProp) { ncvAssertReturn(pBufsize != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn(roiSize.width > 0 && roiSize.height > 0, NPPST_INVALID_ROI); NCVMemStackAllocator gpuCounter(static_cast<Ncv32u>(devProp.textureAlignment)); ncvAssertReturn(gpuCounter.isInitialized(), NPPST_MEM_INTERNAL_ERROR); NCVStatus ncvStat = ncvSquaredIntegralImage_device(NULL, roiSize.width, NULL, (roiSize.width+1) * sizeof(Ncv64u), roiSize, gpuCounter); ncvAssertReturnNcvStat(ncvStat); *pBufsize = (Ncv32u)gpuCounter.maxSize(); return NPPST_SUCCESS; } NCVStatus nppiStIntegral_8u32u_C1R(Ncv8u *d_src, Ncv32u srcStep, Ncv32u *d_dst, Ncv32u dstStep, NcvSize32u roiSize, Ncv8u *pBuffer, Ncv32u bufSize, cudaDeviceProp &devProp) { NCVMemStackAllocator gpuAllocator(NCVMemoryTypeDevice, bufSize, static_cast<Ncv32u>(devProp.textureAlignment), pBuffer); ncvAssertReturn(gpuAllocator.isInitialized(), NPPST_MEM_INTERNAL_ERROR); NCVStatus ncvStat = ncvIntegralImage_device(d_src, srcStep, d_dst, dstStep, roiSize, gpuAllocator); ncvAssertReturnNcvStat(ncvStat); return NPPST_SUCCESS; } NCVStatus nppiStIntegral_32f32f_C1R(Ncv32f *d_src, Ncv32u srcStep, Ncv32f *d_dst, Ncv32u dstStep, NcvSize32u roiSize, Ncv8u *pBuffer, Ncv32u bufSize, cudaDeviceProp &devProp) { NCVMemStackAllocator gpuAllocator(NCVMemoryTypeDevice, bufSize, static_cast<Ncv32u>(devProp.textureAlignment), pBuffer); ncvAssertReturn(gpuAllocator.isInitialized(), NPPST_MEM_INTERNAL_ERROR); NCVStatus ncvStat = ncvIntegralImage_device(d_src, srcStep, d_dst, dstStep, roiSize, gpuAllocator); ncvAssertReturnNcvStat(ncvStat); return NPPST_SUCCESS; } NCVStatus nppiStSqrIntegral_8u64u_C1R(Ncv8u *d_src, Ncv32u srcStep, Ncv64u *d_dst, Ncv32u dstStep, NcvSize32u roiSize, Ncv8u *pBuffer, Ncv32u bufSize, cudaDeviceProp &devProp) { NCVMemStackAllocator gpuAllocator(NCVMemoryTypeDevice, bufSize, static_cast<Ncv32u>(devProp.textureAlignment), pBuffer); ncvAssertReturn(gpuAllocator.isInitialized(), NPPST_MEM_INTERNAL_ERROR); NCVStatus ncvStat = ncvSquaredIntegralImage_device(d_src, srcStep, d_dst, dstStep, roiSize, gpuAllocator); ncvAssertReturnNcvStat(ncvStat); return NPPST_SUCCESS; } NCVStatus nppiStIntegral_8u32u_C1R_host(Ncv8u *h_src, Ncv32u srcStep, Ncv32u *h_dst, Ncv32u dstStep, NcvSize32u roiSize) { ncvAssertReturn(h_src != NULL && h_dst != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn(roiSize.width > 0 && roiSize.height > 0, NPPST_INVALID_ROI); ncvAssertReturn(srcStep >= roiSize.width && dstStep >= (roiSize.width + 1) * sizeof(Ncv32u) && dstStep % sizeof(Ncv32u) == 0, NPPST_INVALID_STEP); dstStep /= sizeof(Ncv32u); Ncv32u WidthII = roiSize.width + 1; Ncv32u HeightII = roiSize.height + 1; memset(h_dst, 0, WidthII * sizeof(Ncv32u)); for (Ncv32u i=1; i<HeightII; i++) { h_dst[i * dstStep] = 0; for (Ncv32u j=1; j<WidthII; j++) { Ncv32u top = h_dst[(i-1) * dstStep + j]; Ncv32u left = h_dst[i * dstStep + (j - 1)]; Ncv32u topleft = h_dst[(i - 1) * dstStep + (j - 1)]; Ncv32u elem = h_src[(i - 1) * srcStep + (j - 1)]; h_dst[i * dstStep + j] = elem + left - topleft + top; } } return NPPST_SUCCESS; } NCVStatus nppiStIntegral_32f32f_C1R_host(Ncv32f *h_src, Ncv32u srcStep, Ncv32f *h_dst, Ncv32u dstStep, NcvSize32u roiSize) { ncvAssertReturn(h_src != NULL && h_dst != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn(roiSize.width > 0 && roiSize.height > 0, NPPST_INVALID_ROI); ncvAssertReturn(srcStep >= roiSize.width * sizeof(Ncv32f) && dstStep >= (roiSize.width + 1) * sizeof(Ncv32f) && srcStep % sizeof(Ncv32f) == 0 && dstStep % sizeof(Ncv32f) == 0, NPPST_INVALID_STEP); srcStep /= sizeof(Ncv32f); dstStep /= sizeof(Ncv32f); Ncv32u WidthII = roiSize.width + 1; Ncv32u HeightII = roiSize.height + 1; memset(h_dst, 0, WidthII * sizeof(Ncv32u)); for (Ncv32u i=1; i<HeightII; i++) { h_dst[i * dstStep] = 0.0f; for (Ncv32u j=1; j<WidthII; j++) { Ncv32f top = h_dst[(i-1) * dstStep + j]; Ncv32f left = h_dst[i * dstStep + (j - 1)]; Ncv32f topleft = h_dst[(i - 1) * dstStep + (j - 1)]; Ncv32f elem = h_src[(i - 1) * srcStep + (j - 1)]; h_dst[i * dstStep + j] = elem + left - topleft + top; } } return NPPST_SUCCESS; } NCVStatus nppiStSqrIntegral_8u64u_C1R_host(Ncv8u *h_src, Ncv32u srcStep, Ncv64u *h_dst, Ncv32u dstStep, NcvSize32u roiSize) { ncvAssertReturn(h_src != NULL && h_dst != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn(roiSize.width > 0 && roiSize.height > 0, NPPST_INVALID_ROI); ncvAssertReturn(srcStep >= roiSize.width && dstStep >= (roiSize.width + 1) * sizeof(Ncv64u) && dstStep % sizeof(Ncv64u) == 0, NPPST_INVALID_STEP); dstStep /= sizeof(Ncv64u); Ncv32u WidthII = roiSize.width + 1; Ncv32u HeightII = roiSize.height + 1; memset(h_dst, 0, WidthII * sizeof(Ncv64u)); for (Ncv32u i=1; i<HeightII; i++) { h_dst[i * dstStep] = 0; for (Ncv32u j=1; j<WidthII; j++) { Ncv64u top = h_dst[(i-1) * dstStep + j]; Ncv64u left = h_dst[i * dstStep + (j - 1)]; Ncv64u topleft = h_dst[(i - 1) * dstStep + (j - 1)]; Ncv64u elem = h_src[(i - 1) * srcStep + (j - 1)]; h_dst[i * dstStep + j] = elem*elem + left - topleft + top; } } return NPPST_SUCCESS; } //============================================================================== // // Decimate.cu // //============================================================================== const Ncv32u NUM_DOWNSAMPLE_NEAREST_THREADS_X = 32; const Ncv32u NUM_DOWNSAMPLE_NEAREST_THREADS_Y = 8; template<class T, NcvBool tbCacheTexture> __device__ T getElem_Decimate(Ncv32u x, T *d_src); template<> __device__ Ncv32u getElem_Decimate<Ncv32u, true>(Ncv32u x, Ncv32u *d_src) { return tex1Dfetch(tex32u, x); } template<> __device__ Ncv32u getElem_Decimate<Ncv32u, false>(Ncv32u x, Ncv32u *d_src) { return d_src[x]; } template<> __device__ Ncv64u getElem_Decimate<Ncv64u, true>(Ncv32u x, Ncv64u *d_src) { uint2 tmp = tex1Dfetch(tex64u, x); Ncv64u res = (Ncv64u)tmp.y; res <<= 32; res |= tmp.x; return res; } template<> __device__ Ncv64u getElem_Decimate<Ncv64u, false>(Ncv32u x, Ncv64u *d_src) { return d_src[x]; } template <class T, NcvBool tbCacheTexture> __global__ void decimate_C1R(T *d_src, Ncv32u srcStep, T *d_dst, Ncv32u dstStep, NcvSize32u dstRoi, Ncv32u scale) { int curX = blockIdx.x * blockDim.x + threadIdx.x; int curY = blockIdx.y * blockDim.y + threadIdx.y; if (curX >= dstRoi.width || curY >= dstRoi.height) { return; } d_dst[curY * dstStep + curX] = getElem_Decimate<T, tbCacheTexture>((curY * srcStep + curX) * scale, d_src); } template <class T> static NCVStatus decimateWrapperDevice(T *d_src, Ncv32u srcStep, T *d_dst, Ncv32u dstStep, NcvSize32u srcRoi, Ncv32u scale, NcvBool readThruTexture) { ncvAssertReturn(d_src != NULL && d_dst != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn(srcRoi.width > 0 && srcRoi.height > 0, NPPST_INVALID_ROI); ncvAssertReturn(scale != 0, NPPST_INVALID_SCALE); ncvAssertReturn(srcStep >= (Ncv32u)(srcRoi.width) * sizeof(T) && dstStep >= (Ncv32u)(srcRoi.width * sizeof(T) / scale), NPPST_INVALID_STEP); srcStep /= sizeof(T); dstStep /= sizeof(T); NcvSize32u dstRoi; dstRoi.width = srcRoi.width / scale; dstRoi.height = srcRoi.height / scale; dim3 grid((dstRoi.width + NUM_DOWNSAMPLE_NEAREST_THREADS_X - 1) / NUM_DOWNSAMPLE_NEAREST_THREADS_X, (dstRoi.height + NUM_DOWNSAMPLE_NEAREST_THREADS_Y - 1) / NUM_DOWNSAMPLE_NEAREST_THREADS_Y); dim3 block(NUM_DOWNSAMPLE_NEAREST_THREADS_X, NUM_DOWNSAMPLE_NEAREST_THREADS_Y); if (!readThruTexture) { decimate_C1R <T, false> <<<grid, block, 0, nppStGetActiveCUDAstream()>>> (d_src, srcStep, d_dst, dstStep, dstRoi, scale); } else { cudaChannelFormatDesc cfdTexSrc; if (sizeof(T) == sizeof(Ncv32u)) { cfdTexSrc = cudaCreateChannelDesc<Ncv32u>(); size_t alignmentOffset; ncvAssertCUDAReturn(cudaBindTexture(&alignmentOffset, tex32u, d_src, cfdTexSrc, srcRoi.height * srcStep * sizeof(T)), NPPST_TEXTURE_BIND_ERROR); ncvAssertReturn(alignmentOffset==0, NPPST_TEXTURE_BIND_ERROR); } else { cfdTexSrc = cudaCreateChannelDesc<uint2>(); size_t alignmentOffset; ncvAssertCUDAReturn(cudaBindTexture(&alignmentOffset, tex64u, d_src, cfdTexSrc, srcRoi.height * srcStep * sizeof(T)), NPPST_TEXTURE_BIND_ERROR); ncvAssertReturn(alignmentOffset==0, NPPST_TEXTURE_BIND_ERROR); } decimate_C1R <T, true> <<<grid, block, 0, nppStGetActiveCUDAstream()>>> (d_src, srcStep, d_dst, dstStep, dstRoi, scale); } ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); return NPPST_SUCCESS; } template <class T> static NCVStatus decimateWrapperHost(T *h_src, Ncv32u srcStep, T *h_dst, Ncv32u dstStep, NcvSize32u srcRoi, Ncv32u scale) { ncvAssertReturn(h_src != NULL && h_dst != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn(srcRoi.width != 0 && srcRoi.height != 0, NPPST_INVALID_ROI); ncvAssertReturn(scale != 0, NPPST_INVALID_SCALE); ncvAssertReturn(srcStep >= (Ncv32u)(srcRoi.width) * sizeof(T) && dstStep >= (Ncv32u)(srcRoi.width * sizeof(T) / scale) && srcStep % sizeof(T) == 0 && dstStep % sizeof(T) == 0, NPPST_INVALID_STEP); srcStep /= sizeof(T); dstStep /= sizeof(T); NcvSize32u dstRoi; dstRoi.width = srcRoi.width / scale; dstRoi.height = srcRoi.height / scale; for (Ncv32u i=0; i<dstRoi.height; i++) { for (Ncv32u j=0; j<dstRoi.width; j++) { h_dst[i*dstStep+j] = h_src[i*scale*srcStep + j*scale]; } } return NPPST_SUCCESS; } #define implementNppDecimate(bit, typ) \ NCVStatus nppiStDecimate_##bit##typ##_C1R(Ncv##bit##typ *d_src, Ncv32u srcStep, \ Ncv##bit##typ *d_dst, Ncv32u dstStep, \ NcvSize32u srcRoi, Ncv32u scale, NcvBool readThruTexture) \ { \ return decimateWrapperDevice<Ncv##bit##u>((Ncv##bit##u *)d_src, srcStep, \ (Ncv##bit##u *)d_dst, dstStep, \ srcRoi, scale, readThruTexture); \ } #define implementNppDecimateHost(bit, typ) \ NCVStatus nppiStDecimate_##bit##typ##_C1R_host(Ncv##bit##typ *h_src, Ncv32u srcStep, \ Ncv##bit##typ *h_dst, Ncv32u dstStep, \ NcvSize32u srcRoi, Ncv32u scale) \ { \ return decimateWrapperHost<Ncv##bit##u>((Ncv##bit##u *)h_src, srcStep, \ (Ncv##bit##u *)h_dst, dstStep, \ srcRoi, scale); \ } implementNppDecimate(32, u) implementNppDecimate(32, s) implementNppDecimate(32, f) implementNppDecimate(64, u) implementNppDecimate(64, s) implementNppDecimate(64, f) implementNppDecimateHost(32, u) implementNppDecimateHost(32, s) implementNppDecimateHost(32, f) implementNppDecimateHost(64, u) implementNppDecimateHost(64, s) implementNppDecimateHost(64, f) //============================================================================== // // RectStdDev.cu // //============================================================================== const Ncv32u NUM_RECTSTDDEV_THREADS = 128; template <NcvBool tbCacheTexture> __device__ Ncv32u getElemSum(Ncv32u x, Ncv32u *d_sum) { if (tbCacheTexture) { return tex1Dfetch(tex32u, x); } else { return d_sum[x]; } } template <NcvBool tbCacheTexture> __device__ Ncv64u getElemSqSum(Ncv32u x, Ncv64u *d_sqsum) { if (tbCacheTexture) { uint2 tmp = tex1Dfetch(tex64u, x); Ncv64u res = (Ncv64u)tmp.y; res <<= 32; res |= tmp.x; return res; } else { return d_sqsum[x]; } } template <NcvBool tbCacheTexture> __global__ void rectStdDev_32f_C1R(Ncv32u *d_sum, Ncv32u sumStep, Ncv64u *d_sqsum, Ncv32u sqsumStep, Ncv32f *d_norm, Ncv32u normStep, NcvSize32u roi, NcvRect32u rect, Ncv32f invRectArea) { Ncv32u x_offs = blockIdx.x * NUM_RECTSTDDEV_THREADS + threadIdx.x; if (x_offs >= roi.width) { return; } Ncv32u sum_offset = blockIdx.y * sumStep + x_offs; Ncv32u sqsum_offset = blockIdx.y * sqsumStep + x_offs; //OPT: try swapping order (could change cache hit/miss ratio) Ncv32u sum_tl = getElemSum<tbCacheTexture>(sum_offset + rect.y * sumStep + rect.x, d_sum); Ncv32u sum_bl = getElemSum<tbCacheTexture>(sum_offset + (rect.y + rect.height) * sumStep + rect.x, d_sum); Ncv32u sum_tr = getElemSum<tbCacheTexture>(sum_offset + rect.y * sumStep + rect.x + rect.width, d_sum); Ncv32u sum_br = getElemSum<tbCacheTexture>(sum_offset + (rect.y + rect.height) * sumStep + rect.x + rect.width, d_sum); Ncv32u sum_val = sum_br + sum_tl - sum_tr - sum_bl; Ncv64u sqsum_tl, sqsum_bl, sqsum_tr, sqsum_br; sqsum_tl = getElemSqSum<tbCacheTexture>(sqsum_offset + rect.y * sqsumStep + rect.x, d_sqsum); sqsum_bl = getElemSqSum<tbCacheTexture>(sqsum_offset + (rect.y + rect.height) * sqsumStep + rect.x, d_sqsum); sqsum_tr = getElemSqSum<tbCacheTexture>(sqsum_offset + rect.y * sqsumStep + rect.x + rect.width, d_sqsum); sqsum_br = getElemSqSum<tbCacheTexture>(sqsum_offset + (rect.y + rect.height) * sqsumStep + rect.x + rect.width, d_sqsum); Ncv64u sqsum_val = sqsum_br + sqsum_tl - sqsum_tr - sqsum_bl; Ncv32f mean = sum_val * invRectArea; ////////////////////////////////////////////////////////////////////////// // sqsum_val_res = sqsum_val / rectArea ////////////////////////////////////////////////////////////////////////// Ncv32f sqsum_val_1 = __ull2float_rz(sqsum_val); Ncv64u sqsum_val_2 = __float2ull_rz(sqsum_val_1); Ncv64u sqsum_val_3 = sqsum_val - sqsum_val_2; Ncv32f sqsum_val_4 = __ull2float_rn(sqsum_val_3); sqsum_val_1 *= invRectArea; sqsum_val_4 *= invRectArea; Ncv32f sqsum_val_res = sqsum_val_1 + sqsum_val_4; ////////////////////////////////////////////////////////////////////////// // variance = sqsum_val_res - mean * mean ////////////////////////////////////////////////////////////////////////// #if defined DISABLE_MAD_SELECTIVELY Ncv32f variance = sqsum_val_2 - __fmul_rn(mean, mean); #else Ncv32f variance = sqsum_val_res - mean * mean; #endif ////////////////////////////////////////////////////////////////////////// // stddev = sqrtf(variance) ////////////////////////////////////////////////////////////////////////// //Ncv32f stddev = sqrtf(variance); Ncv32f stddev = __fsqrt_rn(variance); d_norm[blockIdx.y * normStep + x_offs] = stddev; } NCVStatus nppiStRectStdDev_32f_C1R(Ncv32u *d_sum, Ncv32u sumStep, Ncv64u *d_sqsum, Ncv32u sqsumStep, Ncv32f *d_norm, Ncv32u normStep, NcvSize32u roi, NcvRect32u rect, Ncv32f scaleArea, NcvBool readThruTexture) { ncvAssertReturn(d_sum != NULL && d_sqsum != NULL && d_norm != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn(roi.width > 0 && roi.height > 0, NPPST_INVALID_ROI); ncvAssertReturn(sumStep >= (Ncv32u)(roi.width + rect.x + rect.width - 1) * sizeof(Ncv32u) && sqsumStep >= (Ncv32u)(roi.width + rect.x + rect.width - 1) * sizeof(Ncv64u) && normStep >= (Ncv32u)roi.width * sizeof(Ncv32f) && sumStep % sizeof(Ncv32u) == 0 && sqsumStep % sizeof(Ncv64u) == 0 && normStep % sizeof(Ncv32f) == 0, NPPST_INVALID_STEP); ncvAssertReturn(scaleArea >= 1.0f, NPPST_INVALID_SCALE); sumStep /= sizeof(Ncv32u); sqsumStep /= sizeof(Ncv64u); normStep /= sizeof(Ncv32f); Ncv32f rectArea = rect.width * rect.height * scaleArea; Ncv32f invRectArea = 1.0f / rectArea; dim3 grid(((roi.width + NUM_RECTSTDDEV_THREADS - 1) / NUM_RECTSTDDEV_THREADS), roi.height); dim3 block(NUM_RECTSTDDEV_THREADS); if (!readThruTexture) { rectStdDev_32f_C1R <false> <<<grid, block, 0, nppStGetActiveCUDAstream()>>> (d_sum, sumStep, d_sqsum, sqsumStep, d_norm, normStep, roi, rect, invRectArea); } else { cudaChannelFormatDesc cfdTexSrc; cudaChannelFormatDesc cfdTexSqr; cfdTexSrc = cudaCreateChannelDesc<Ncv32u>(); cfdTexSqr = cudaCreateChannelDesc<uint2>(); size_t alignmentOffset; ncvAssertCUDAReturn(cudaBindTexture(&alignmentOffset, tex32u, d_sum, cfdTexSrc, (roi.height + rect.y + rect.height) * sumStep * sizeof(Ncv32u)), NPPST_TEXTURE_BIND_ERROR); ncvAssertReturn(alignmentOffset==0, NPPST_TEXTURE_BIND_ERROR); ncvAssertCUDAReturn(cudaBindTexture(&alignmentOffset, tex64u, d_sqsum, cfdTexSqr, (roi.height + rect.y + rect.height) * sqsumStep * sizeof(Ncv64u)), NPPST_TEXTURE_BIND_ERROR); ncvAssertReturn(alignmentOffset==0, NPPST_TEXTURE_BIND_ERROR); rectStdDev_32f_C1R <true> <<<grid, block, 0, nppStGetActiveCUDAstream()>>> (NULL, sumStep, NULL, sqsumStep, d_norm, normStep, roi, rect, invRectArea); } ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); return NPPST_SUCCESS; } NCVStatus nppiStRectStdDev_32f_C1R_host(Ncv32u *h_sum, Ncv32u sumStep, Ncv64u *h_sqsum, Ncv32u sqsumStep, Ncv32f *h_norm, Ncv32u normStep, NcvSize32u roi, NcvRect32u rect, Ncv32f scaleArea) { ncvAssertReturn(h_sum != NULL && h_sqsum != NULL && h_norm != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn(roi.width > 0 && roi.height > 0, NPPST_INVALID_ROI); ncvAssertReturn(sumStep >= (Ncv32u)(roi.width + rect.x + rect.width - 1) * sizeof(Ncv32u) && sqsumStep >= (Ncv32u)(roi.width + rect.x + rect.width - 1) * sizeof(Ncv64u) && normStep >= (Ncv32u)roi.width * sizeof(Ncv32f) && sumStep % sizeof(Ncv32u) == 0 && sqsumStep % sizeof(Ncv64u) == 0 && normStep % sizeof(Ncv32f) == 0, NPPST_INVALID_STEP); ncvAssertReturn(scaleArea >= 1.0f, NPPST_INVALID_SCALE); sumStep /= sizeof(Ncv32u); sqsumStep /= sizeof(Ncv64u); normStep /= sizeof(Ncv32f); Ncv32f rectArea = rect.width * rect.height * scaleArea; Ncv32f invRectArea = 1.0f / rectArea; for (Ncv32u i=0; i<roi.height; i++) { for (Ncv32u j=0; j<roi.width; j++) { Ncv32u sum_offset = i * sumStep + j; Ncv32u sqsum_offset = i * sqsumStep + j; Ncv32u sum_tl = h_sum[sum_offset + rect.y * sumStep + rect.x]; Ncv32u sum_bl = h_sum[sum_offset + (rect.y + rect.height) * sumStep + rect.x]; Ncv32u sum_tr = h_sum[sum_offset + rect.y * sumStep + rect.x + rect.width]; Ncv32u sum_br = h_sum[sum_offset + (rect.y + rect.height) * sumStep + rect.x + rect.width]; Ncv64f sum_val = sum_br + sum_tl - sum_tr - sum_bl; Ncv64u sqsum_tl = h_sqsum[sqsum_offset + rect.y * sqsumStep + rect.x]; Ncv64u sqsum_bl = h_sqsum[sqsum_offset + (rect.y + rect.height) * sqsumStep + rect.x]; Ncv64u sqsum_tr = h_sqsum[sqsum_offset + rect.y * sqsumStep + rect.x + rect.width]; Ncv64u sqsum_br = h_sqsum[sqsum_offset + (rect.y + rect.height) * sqsumStep + rect.x + rect.width]; Ncv64f sqsum_val = (Ncv64f)(sqsum_br + sqsum_tl - sqsum_tr - sqsum_bl); Ncv64f mean = sum_val * invRectArea; Ncv64f sqsum_val_2 = sqsum_val / rectArea; Ncv64f variance = sqsum_val_2 - mean * mean; h_norm[i * normStep + j] = (Ncv32f)sqrt(variance); } } return NPPST_SUCCESS; } //============================================================================== // // Transpose.cu // //============================================================================== const Ncv32u TRANSPOSE_TILE_DIM = 16; const Ncv32u TRANSPOSE_BLOCK_ROWS = 16; /** * \brief Matrix transpose kernel * * Calculates transpose of the input image * \see TRANSPOSE_TILE_DIM * * \tparam T_in Type of input image elements * \tparam T_out Type of output image elements * * \param d_src [IN] Source image pointer * \param srcStride [IN] Source image stride * \param d_dst [OUT] Output image pointer * \param dstStride [IN] Output image stride * * \return None */ template <class T> __global__ void transpose(T *d_src, Ncv32u srcStride, T *d_dst, Ncv32u dstStride, NcvSize32u srcRoi) { __shared__ T tile[TRANSPOSE_TILE_DIM][TRANSPOSE_TILE_DIM+1]; Ncv32u blockIdx_x, blockIdx_y; // do diagonal reordering if (gridDim.x == gridDim.y) { blockIdx_y = blockIdx.x; blockIdx_x = (blockIdx.x + blockIdx.y) % gridDim.x; } else { Ncv32u bid = blockIdx.x + gridDim.x * blockIdx.y; blockIdx_y = bid % gridDim.y; blockIdx_x = ((bid / gridDim.y) + blockIdx_y) % gridDim.x; } Ncv32u xIndex = blockIdx_x * TRANSPOSE_TILE_DIM + threadIdx.x; Ncv32u yIndex = blockIdx_y * TRANSPOSE_TILE_DIM + threadIdx.y; Ncv32u index_gmem = xIndex + yIndex * srcStride; if (xIndex < srcRoi.width) { for (Ncv32u i=0; i<TRANSPOSE_TILE_DIM; i+=TRANSPOSE_BLOCK_ROWS) { if (yIndex + i < srcRoi.height) { tile[threadIdx.y+i][threadIdx.x] = d_src[index_gmem+i*srcStride]; } } } __syncthreads(); xIndex = blockIdx_y * TRANSPOSE_TILE_DIM + threadIdx.x; yIndex = blockIdx_x * TRANSPOSE_TILE_DIM + threadIdx.y; index_gmem = xIndex + yIndex * dstStride; if (xIndex < srcRoi.height) { for (Ncv32u i=0; i<TRANSPOSE_TILE_DIM; i+=TRANSPOSE_BLOCK_ROWS) { if (yIndex + i < srcRoi.width) { d_dst[index_gmem+i*dstStride] = tile[threadIdx.x][threadIdx.y+i]; } } } } template <class T> NCVStatus transposeWrapperDevice(T *d_src, Ncv32u srcStride, T *d_dst, Ncv32u dstStride, NcvSize32u srcRoi) { ncvAssertReturn(d_src != NULL && d_dst != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn(srcRoi.width > 0 && srcRoi.height > 0, NPPST_INVALID_ROI); ncvAssertReturn(srcStride >= srcRoi.width * sizeof(T) && dstStride >= srcRoi.height * sizeof(T) && srcStride % sizeof(T) == 0 && dstStride % sizeof(T) == 0, NPPST_INVALID_STEP); srcStride /= sizeof(T); dstStride /= sizeof(T); dim3 grid((srcRoi.width + TRANSPOSE_TILE_DIM - 1) / TRANSPOSE_TILE_DIM, (srcRoi.height + TRANSPOSE_TILE_DIM - 1) / TRANSPOSE_TILE_DIM); dim3 block(TRANSPOSE_TILE_DIM, TRANSPOSE_TILE_DIM); transpose <T> <<<grid, block, 0, nppStGetActiveCUDAstream()>>> (d_src, srcStride, d_dst, dstStride, srcRoi); ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); return NPPST_SUCCESS; } template <class T> static NCVStatus transposeWrapperHost(T *h_src, Ncv32u srcStride, T *h_dst, Ncv32u dstStride, NcvSize32u srcRoi) { ncvAssertReturn(h_src != NULL && h_dst != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn(srcRoi.width > 0 && srcRoi.height > 0, NPPST_INVALID_ROI); ncvAssertReturn(srcStride >= srcRoi.width * sizeof(T) && dstStride >= srcRoi.height * sizeof(T) && srcStride % sizeof(T) == 0 && dstStride % sizeof(T) == 0, NPPST_INVALID_STEP); srcStride /= sizeof(T); dstStride /= sizeof(T); for (Ncv32u i=0; i<srcRoi.height; i++) { for (Ncv32u j=0; j<srcRoi.width; j++) { h_dst[j*dstStride+i] = h_src[i*srcStride + j]; } } return NPPST_SUCCESS; } #define implementNppTranspose(bit, typ) \ NCVStatus nppiStTranspose_##bit##typ##_C1R(Ncv##bit##typ *d_src, Ncv32u srcStep, \ Ncv##bit##typ *d_dst, Ncv32u dstStep, NcvSize32u srcRoi) \ { \ return transposeWrapperDevice<Ncv##bit##u>((Ncv##bit##u *)d_src, srcStep, \ (Ncv##bit##u *)d_dst, dstStep, srcRoi); \ } #define implementNppTransposeHost(bit, typ) \ NCVStatus nppiStTranspose_##bit##typ##_C1R_host(Ncv##bit##typ *h_src, Ncv32u srcStep, \ Ncv##bit##typ *h_dst, Ncv32u dstStep, \ NcvSize32u srcRoi) \ { \ return transposeWrapperHost<Ncv##bit##u>((Ncv##bit##u *)h_src, srcStep, \ (Ncv##bit##u *)h_dst, dstStep, srcRoi); \ } implementNppTranspose(32,u) implementNppTranspose(32,s) implementNppTranspose(32,f) implementNppTranspose(64,u) implementNppTranspose(64,s) implementNppTranspose(64,f) implementNppTransposeHost(32,u) implementNppTransposeHost(32,s) implementNppTransposeHost(32,f) implementNppTransposeHost(64,u) implementNppTransposeHost(64,s) implementNppTransposeHost(64,f) NCVStatus nppiStTranspose_128_C1R(void *d_src, Ncv32u srcStep, void *d_dst, Ncv32u dstStep, NcvSize32u srcRoi) { return transposeWrapperDevice<uint4>((uint4 *)d_src, srcStep, (uint4 *)d_dst, dstStep, srcRoi); } NCVStatus nppiStTranspose_128_C1R_host(void *d_src, Ncv32u srcStep, void *d_dst, Ncv32u dstStep, NcvSize32u srcRoi) { return transposeWrapperHost<uint4>((uint4 *)d_src, srcStep, (uint4 *)d_dst, dstStep, srcRoi); } //============================================================================== // // Compact.cu // //============================================================================== const Ncv32u NUM_REMOVE_THREADS = 256; template <bool bRemove, bool bWritePartial> __global__ void removePass1Scan(Ncv32u *d_src, Ncv32u srcLen, Ncv32u *d_offsets, Ncv32u *d_blockSums, Ncv32u elemRemove) { Ncv32u blockId = blockIdx.y * 65535 + blockIdx.x; Ncv32u elemAddrIn = blockId * NUM_REMOVE_THREADS + threadIdx.x; if (elemAddrIn > srcLen + blockDim.x) { return; } __shared__ Ncv32u shmem[NUM_REMOVE_THREADS * 2]; Ncv32u scanElem = 0; if (elemAddrIn < srcLen) { if (bRemove) { scanElem = (d_src[elemAddrIn] != elemRemove) ? 1 : 0; } else { scanElem = d_src[elemAddrIn]; } } Ncv32u localScanInc = blockScanInclusive<Ncv32u, NUM_REMOVE_THREADS>(scanElem, shmem); __syncthreads(); if (elemAddrIn < srcLen) { if (threadIdx.x == NUM_REMOVE_THREADS-1 && bWritePartial) { d_blockSums[blockId] = localScanInc; } if (bRemove) { d_offsets[elemAddrIn] = localScanInc - scanElem; } else { d_src[elemAddrIn] = localScanInc - scanElem; } } } __global__ void removePass2Adjust(Ncv32u *d_offsets, Ncv32u srcLen, Ncv32u *d_blockSums) { Ncv32u blockId = blockIdx.y * 65535 + blockIdx.x; Ncv32u elemAddrIn = blockId * NUM_REMOVE_THREADS + threadIdx.x; if (elemAddrIn >= srcLen) { return; } __shared__ Ncv32u valOffs; valOffs = d_blockSums[blockId]; __syncthreads(); d_offsets[elemAddrIn] += valOffs; } __global__ void removePass3Compact(Ncv32u *d_src, Ncv32u srcLen, Ncv32u *d_offsets, Ncv32u *d_dst, Ncv32u elemRemove, Ncv32u *dstLenValue) { Ncv32u blockId = blockIdx.y * 65535 + blockIdx.x; Ncv32u elemAddrIn = blockId * NUM_REMOVE_THREADS + threadIdx.x; if (elemAddrIn >= srcLen) { return; } Ncv32u elem = d_src[elemAddrIn]; Ncv32u elemAddrOut = d_offsets[elemAddrIn]; if (elem != elemRemove) { d_dst[elemAddrOut] = elem; } if (elemAddrIn == srcLen-1) { if (elem != elemRemove) { *dstLenValue = elemAddrOut + 1; } else { *dstLenValue = elemAddrOut; } } } NCVStatus compactVector_32u_device(Ncv32u *d_src, Ncv32u srcLen, Ncv32u *d_dst, Ncv32u *dstLenPinned, Ncv32u elemRemove, INCVMemAllocator &gpuAllocator) { ncvAssertReturn(gpuAllocator.isInitialized(), NPPST_MEM_INTERNAL_ERROR); ncvAssertReturn((d_src != NULL && d_dst != NULL) || gpuAllocator.isCounting(), NPPST_NULL_POINTER_ERROR); if (srcLen == 0) { if (dstLenPinned != NULL) { *dstLenPinned = 0; } return NPPST_SUCCESS; } std::vector<Ncv32u> partSumNums; std::vector<Ncv32u> partSumOffsets; Ncv32u partSumLastNum = srcLen; Ncv32u partSumLastOffs = 0; do { partSumNums.push_back(partSumLastNum); partSumOffsets.push_back(partSumLastOffs); Ncv32u curPartSumAlignedLength = alignUp(partSumLastNum * sizeof(Ncv32u), gpuAllocator.alignment()) / sizeof(Ncv32u); partSumLastOffs += curPartSumAlignedLength; partSumLastNum = (partSumLastNum + NUM_REMOVE_THREADS - 1) / NUM_REMOVE_THREADS; } while (partSumLastNum>1); partSumNums.push_back(partSumLastNum); partSumOffsets.push_back(partSumLastOffs); NCVVectorAlloc<Ncv32u> d_hierSums(gpuAllocator, partSumLastOffs+1); ncvAssertReturn(gpuAllocator.isCounting() || d_hierSums.isMemAllocated(), NPPST_MEM_INTERNAL_ERROR); NCVVectorAlloc<Ncv32u> d_numDstElements(gpuAllocator, 1); ncvAssertReturn(gpuAllocator.isCounting() || d_numDstElements.isMemAllocated(), NPPST_MEM_INTERNAL_ERROR); NCV_SET_SKIP_COND(gpuAllocator.isCounting()); NCV_SKIP_COND_BEGIN dim3 block(NUM_REMOVE_THREADS); //calculate zero-level partial sums for indices calculation if (partSumNums.size() > 2) { dim3 grid(partSumNums[1]); if (grid.x > 65535) { grid.y = (grid.x + 65534) / 65535; grid.x = 65535; } removePass1Scan <true, true> <<<grid, block, 0, nppStGetActiveCUDAstream()>>> (d_src, srcLen, d_hierSums.ptr(), d_hierSums.ptr() + partSumOffsets[1], elemRemove); ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); //calculate hierarchical partial sums for (Ncv32u i=1; i<partSumNums.size()-1; i++) { dim3 grid(partSumNums[i+1]); if (grid.x > 65535) { grid.y = (grid.x + 65534) / 65535; grid.x = 65535; } if (grid.x != 1) { removePass1Scan <false, true> <<<grid, block, 0, nppStGetActiveCUDAstream()>>> (d_hierSums.ptr() + partSumOffsets[i], partSumNums[i], NULL, d_hierSums.ptr() + partSumOffsets[i+1], 0); } else { removePass1Scan <false, false> <<<grid, block, 0, nppStGetActiveCUDAstream()>>> (d_hierSums.ptr() + partSumOffsets[i], partSumNums[i], NULL, NULL, 0); } ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); } //adjust hierarchical partial sums for (Ncv32s i=(Ncv32s)partSumNums.size()-3; i>=0; i--) { dim3 grid(partSumNums[i+1]); if (grid.x > 65535) { grid.y = (grid.x + 65534) / 65535; grid.x = 65535; } removePass2Adjust <<<grid, block, 0, nppStGetActiveCUDAstream()>>> (d_hierSums.ptr() + partSumOffsets[i], partSumNums[i], d_hierSums.ptr() + partSumOffsets[i+1]); ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); } } else { dim3 grid(partSumNums[1]); removePass1Scan <true, false> <<<grid, block, 0, nppStGetActiveCUDAstream()>>> (d_src, srcLen, d_hierSums.ptr(), NULL, elemRemove); ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); } //compact source vector using indices dim3 grid(partSumNums[1]); if (grid.x > 65535) { grid.y = (grid.x + 65534) / 65535; grid.x = 65535; } removePass3Compact <<<grid, block, 0, nppStGetActiveCUDAstream()>>> (d_src, srcLen, d_hierSums.ptr(), d_dst, elemRemove, d_numDstElements.ptr()); ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); //get number of dst elements if (dstLenPinned != NULL) { ncvAssertCUDAReturn(cudaMemcpyAsync(dstLenPinned, d_numDstElements.ptr(), sizeof(Ncv32u), cudaMemcpyDeviceToHost, nppStGetActiveCUDAstream()), NPPST_MEM_RESIDENCE_ERROR); ncvAssertCUDAReturn(cudaStreamSynchronize(nppStGetActiveCUDAstream()), NPPST_MEM_RESIDENCE_ERROR); } NCV_SKIP_COND_END return NPPST_SUCCESS; } NCVStatus nppsStCompactGetSize_32u(Ncv32u srcLen, Ncv32u *pBufsize, cudaDeviceProp &devProp) { ncvAssertReturn(pBufsize != NULL, NPPST_NULL_POINTER_ERROR); if (srcLen == 0) { *pBufsize = 0; return NPPST_SUCCESS; } NCVMemStackAllocator gpuCounter(static_cast<Ncv32u>(devProp.textureAlignment)); ncvAssertReturn(gpuCounter.isInitialized(), NPPST_MEM_INTERNAL_ERROR); NCVStatus ncvStat = compactVector_32u_device(NULL, srcLen, NULL, NULL, 0xC001C0DE, gpuCounter); ncvAssertReturnNcvStat(ncvStat); *pBufsize = (Ncv32u)gpuCounter.maxSize(); return NPPST_SUCCESS; } NCVStatus nppsStCompactGetSize_32s(Ncv32u srcLen, Ncv32u *pBufsize, cudaDeviceProp &devProp) { return nppsStCompactGetSize_32u(srcLen, pBufsize, devProp); } NCVStatus nppsStCompactGetSize_32f(Ncv32u srcLen, Ncv32u *pBufsize, cudaDeviceProp &devProp) { return nppsStCompactGetSize_32u(srcLen, pBufsize, devProp); } NCVStatus nppsStCompact_32u(Ncv32u *d_src, Ncv32u srcLen, Ncv32u *d_dst, Ncv32u *p_dstLen, Ncv32u elemRemove, Ncv8u *pBuffer, Ncv32u bufSize, cudaDeviceProp &devProp) { NCVMemStackAllocator gpuAllocator(NCVMemoryTypeDevice, bufSize, static_cast<Ncv32u>(devProp.textureAlignment), pBuffer); ncvAssertReturn(gpuAllocator.isInitialized(), NPPST_MEM_INTERNAL_ERROR); NCVStatus ncvStat = compactVector_32u_device(d_src, srcLen, d_dst, p_dstLen, elemRemove, gpuAllocator); ncvAssertReturnNcvStat(ncvStat); return NPPST_SUCCESS; } NCVStatus nppsStCompact_32s(Ncv32s *d_src, Ncv32u srcLen, Ncv32s *d_dst, Ncv32u *p_dstLen, Ncv32s elemRemove, Ncv8u *pBuffer, Ncv32u bufSize, cudaDeviceProp &devProp) { return nppsStCompact_32u((Ncv32u *)d_src, srcLen, (Ncv32u *)d_dst, p_dstLen, *(Ncv32u *)&elemRemove, pBuffer, bufSize, devProp); } NCVStatus nppsStCompact_32f(Ncv32f *d_src, Ncv32u srcLen, Ncv32f *d_dst, Ncv32u *p_dstLen, Ncv32f elemRemove, Ncv8u *pBuffer, Ncv32u bufSize, cudaDeviceProp &devProp) { return nppsStCompact_32u((Ncv32u *)d_src, srcLen, (Ncv32u *)d_dst, p_dstLen, *(Ncv32u *)&elemRemove, pBuffer, bufSize, devProp); } NCVStatus nppsStCompact_32u_host(Ncv32u *h_src, Ncv32u srcLen, Ncv32u *h_dst, Ncv32u *dstLen, Ncv32u elemRemove) { ncvAssertReturn(h_src != NULL && h_dst != NULL, NPPST_NULL_POINTER_ERROR); if (srcLen == 0) { if (dstLen != NULL) { *dstLen = 0; } return NPPST_SUCCESS; } Ncv32u dstIndex = 0; for (Ncv32u srcIndex=0; srcIndex<srcLen; srcIndex++) { if (h_src[srcIndex] != elemRemove) { h_dst[dstIndex++] = h_src[srcIndex]; } } if (dstLen != NULL) { *dstLen = dstIndex; } return NPPST_SUCCESS; } NCVStatus nppsStCompact_32s_host(Ncv32s *h_src, Ncv32u srcLen, Ncv32s *h_dst, Ncv32u *dstLen, Ncv32s elemRemove) { return nppsStCompact_32u_host((Ncv32u *)h_src, srcLen, (Ncv32u *)h_dst, dstLen, *(Ncv32u *)&elemRemove); } NCVStatus nppsStCompact_32f_host(Ncv32f *h_src, Ncv32u srcLen, Ncv32f *h_dst, Ncv32u *dstLen, Ncv32f elemRemove) { return nppsStCompact_32u_host((Ncv32u *)h_src, srcLen, (Ncv32u *)h_dst, dstLen, *(Ncv32u *)&elemRemove); } //============================================================================== // // Filter.cu // //============================================================================== texture <float, 1, cudaReadModeElementType> texSrc; texture <float, 1, cudaReadModeElementType> texKernel; __forceinline__ __device__ float getValueMirrorRow(const int rowOffset, int i, int w) { if (i < 0) i = 1 - i; if (i >= w) i = w + w - i - 1; return tex1Dfetch (texSrc, rowOffset + i); } __forceinline__ __device__ float getValueMirrorColumn(const int offset, const int rowStep, int j, int h) { if (j < 0) j = 1 - j; if (j >= h) j = h + h - j - 1; return tex1Dfetch (texSrc, offset + j * rowStep); } __global__ void FilterRowBorderMirror_32f_C1R(Ncv32u srcStep, Ncv32f *pDst, NcvSize32u dstSize, Ncv32u dstStep, NcvRect32u roi, Ncv32s nKernelSize, Ncv32s nAnchor, Ncv32f multiplier) { // position within ROI const int ix = blockDim.x * blockIdx.x + threadIdx.x; const int iy = blockDim.y * blockIdx.y + threadIdx.y; if (ix >= roi.width || iy >= roi.height) { return; } const int p = nKernelSize - nAnchor - 1; const int j = roi.y + iy; const int rowOffset = j * srcStep + roi.x; float sum = 0.0f; for (int m = 0; m < nKernelSize; ++m) { sum += getValueMirrorRow (rowOffset, ix + m - p, roi.width) * tex1Dfetch (texKernel, m); } pDst[iy * dstStep + ix] = sum * multiplier; } __global__ void FilterColumnBorderMirror_32f_C1R(Ncv32u srcStep, Ncv32f *pDst, NcvSize32u dstSize, Ncv32u dstStep, NcvRect32u roi, Ncv32s nKernelSize, Ncv32s nAnchor, Ncv32f multiplier) { const int ix = blockDim.x * blockIdx.x + threadIdx.x; const int iy = blockDim.y * blockIdx.y + threadIdx.y; if (ix >= roi.width || iy >= roi.height) { return; } const int p = nKernelSize - nAnchor - 1; const int i = roi.x + ix; const int offset = i + roi.y * srcStep; float sum = 0.0f; for (int m = 0; m < nKernelSize; ++m) { sum += getValueMirrorColumn (offset, srcStep, iy + m - p, roi.height) * tex1Dfetch (texKernel, m); } pDst[ix + iy * dstStep] = sum * multiplier; } NCVStatus nppiStFilterRowBorder_32f_C1R(const Ncv32f *pSrc, NcvSize32u srcSize, Ncv32u nSrcStep, Ncv32f *pDst, NcvSize32u dstSize, Ncv32u nDstStep, NcvRect32u oROI, NppStBorderType borderType, const Ncv32f *pKernel, Ncv32s nKernelSize, Ncv32s nAnchor, Ncv32f multiplier) { ncvAssertReturn (pSrc != NULL && pDst != NULL && pKernel != NULL, NCV_NULL_PTR); ncvAssertReturn (oROI.width > 0 && oROI.height > 0, NPPST_INVALID_ROI); ncvAssertReturn (srcSize.width * sizeof (Ncv32f) <= nSrcStep && dstSize.width * sizeof (Ncv32f) <= nDstStep && oROI.width * sizeof (Ncv32f) <= nSrcStep && oROI.width * sizeof (Ncv32f) <= nDstStep && nSrcStep % sizeof (Ncv32f) == 0 && nDstStep % sizeof (Ncv32f) == 0, NPPST_INVALID_STEP); Ncv32u srcStep = nSrcStep / sizeof (Ncv32f); Ncv32u dstStep = nDstStep / sizeof (Ncv32f); // adjust ROI size to be within source image if (oROI.x + oROI.width > srcSize.width) { oROI.width = srcSize.width - oROI.x; } if (oROI.y + oROI.height > srcSize.height) { oROI.height = srcSize.height - oROI.y; } cudaChannelFormatDesc floatChannel = cudaCreateChannelDesc <float> (); texSrc.normalized = false; texKernel.normalized = false; cudaBindTexture (0, texSrc, pSrc, floatChannel, srcSize.height * nSrcStep); cudaBindTexture (0, texKernel, pKernel, floatChannel, nKernelSize * sizeof (Ncv32f)); dim3 ctaSize (32, 6); dim3 gridSize ((oROI.width + ctaSize.x - 1) / ctaSize.x, (oROI.height + ctaSize.y - 1) / ctaSize.y); switch (borderType) { case nppStBorderNone: return NPPST_ERROR; case nppStBorderClamp: return NPPST_ERROR; case nppStBorderWrap: return NPPST_ERROR; case nppStBorderMirror: FilterRowBorderMirror_32f_C1R <<<gridSize, ctaSize, 0, nppStGetActiveCUDAstream ()>>> (srcStep, pDst, dstSize, dstStep, oROI, nKernelSize, nAnchor, multiplier); ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); break; default: return NPPST_ERROR; } return NPPST_SUCCESS; } NCVStatus nppiStFilterColumnBorder_32f_C1R(const Ncv32f *pSrc, NcvSize32u srcSize, Ncv32u nSrcStep, Ncv32f *pDst, NcvSize32u dstSize, Ncv32u nDstStep, NcvRect32u oROI, NppStBorderType borderType, const Ncv32f *pKernel, Ncv32s nKernelSize, Ncv32s nAnchor, Ncv32f multiplier) { ncvAssertReturn (pSrc != NULL && pDst != NULL && pKernel != NULL, NCV_NULL_PTR); ncvAssertReturn (oROI.width > 0 && oROI.height > 0, NPPST_INVALID_ROI); ncvAssertReturn (srcSize.width * sizeof (Ncv32f) <= nSrcStep && dstSize.width * sizeof (Ncv32f) <= nDstStep && oROI.width * sizeof (Ncv32f) <= nSrcStep && oROI.width * sizeof (Ncv32f) <= nDstStep && nSrcStep % sizeof (Ncv32f) == 0 && nDstStep % sizeof (Ncv32f) == 0, NPPST_INVALID_STEP); Ncv32u srcStep = nSrcStep / sizeof (Ncv32f); Ncv32u dstStep = nDstStep / sizeof (Ncv32f); // adjust ROI size to be within source image if (oROI.x + oROI.width > srcSize.width) { oROI.width = srcSize.width - oROI.x; } if (oROI.y + oROI.height > srcSize.height) { oROI.height = srcSize.height - oROI.y; } cudaChannelFormatDesc floatChannel = cudaCreateChannelDesc <float> (); texSrc.normalized = false; texKernel.normalized = false; cudaBindTexture (0, texSrc, pSrc, floatChannel, srcSize.height * nSrcStep); cudaBindTexture (0, texKernel, pKernel, floatChannel, nKernelSize * sizeof (Ncv32f)); dim3 ctaSize (32, 6); dim3 gridSize ((oROI.width + ctaSize.x - 1) / ctaSize.x, (oROI.height + ctaSize.y - 1) / ctaSize.y); switch (borderType) { case nppStBorderClamp: return NPPST_ERROR; case nppStBorderWrap: return NPPST_ERROR; case nppStBorderMirror: FilterColumnBorderMirror_32f_C1R <<<gridSize, ctaSize, 0, nppStGetActiveCUDAstream ()>>> (srcStep, pDst, dstSize, dstStep, oROI, nKernelSize, nAnchor, multiplier); ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); break; default: return NPPST_ERROR; } return NPPST_SUCCESS; } //============================================================================== // // FrameInterpolate.cu // //============================================================================== inline Ncv32u iDivUp(Ncv32u num, Ncv32u denom) { return (num + denom - 1)/denom; } texture<float, 2, cudaReadModeElementType> tex_src1; texture<float, 2, cudaReadModeElementType> tex_src0; __global__ void BlendFramesKernel(const float *u, const float *v, // forward flow const float *ur, const float *vr, // backward flow const float *o0, const float *o1, // coverage masks int w, int h, int s, float theta, float *out) { const int ix = threadIdx.x + blockDim.x * blockIdx.x; const int iy = threadIdx.y + blockDim.y * blockIdx.y; const int pos = ix + s * iy; if (ix >= w || iy >= h) return; float _u = u[pos]; float _v = v[pos]; float _ur = ur[pos]; float _vr = vr[pos]; float x = (float)ix + 0.5f; float y = (float)iy + 0.5f; bool b0 = o0[pos] > 1e-4f; bool b1 = o1[pos] > 1e-4f; if (b0 && b1) { // pixel is visible on both frames out[pos] = tex2D(tex_src0, x - _u * theta, y - _v * theta) * (1.0f - theta) + tex2D(tex_src1, x + _u * (1.0f - theta), y + _v * (1.0f - theta)) * theta; } else if (b0) { // visible on the first frame only out[pos] = tex2D(tex_src0, x - _u * theta, y - _v * theta); } else { // visible on the second frame only out[pos] = tex2D(tex_src1, x - _ur * (1.0f - theta), y - _vr * (1.0f - theta)); } } NCVStatus BlendFrames(const Ncv32f *src0, const Ncv32f *src1, const Ncv32f *ufi, const Ncv32f *vfi, const Ncv32f *ubi, const Ncv32f *vbi, const Ncv32f *o1, const Ncv32f *o2, Ncv32u width, Ncv32u height, Ncv32u stride, Ncv32f theta, Ncv32f *out) { tex_src1.addressMode[0] = cudaAddressModeClamp; tex_src1.addressMode[1] = cudaAddressModeClamp; tex_src1.filterMode = cudaFilterModeLinear; tex_src1.normalized = false; tex_src0.addressMode[0] = cudaAddressModeClamp; tex_src0.addressMode[1] = cudaAddressModeClamp; tex_src0.filterMode = cudaFilterModeLinear; tex_src0.normalized = false; cudaChannelFormatDesc desc = cudaCreateChannelDesc <float> (); const Ncv32u pitch = stride * sizeof (float); ncvAssertCUDAReturn (cudaBindTexture2D (0, tex_src1, src1, desc, width, height, pitch), NPPST_TEXTURE_BIND_ERROR); ncvAssertCUDAReturn (cudaBindTexture2D (0, tex_src0, src0, desc, width, height, pitch), NPPST_TEXTURE_BIND_ERROR); dim3 threads (32, 4); dim3 blocks (iDivUp (width, threads.x), iDivUp (height, threads.y)); BlendFramesKernel<<<blocks, threads, 0, nppStGetActiveCUDAstream ()>>> (ufi, vfi, ubi, vbi, o1, o2, width, height, stride, theta, out); ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); return NPPST_SUCCESS; } NCVStatus nppiStGetInterpolationBufferSize(NcvSize32u srcSize, Ncv32u nStep, Ncv32u *hpSize) { NCVStatus status = NPPST_ERROR; status = nppiStVectorWarpGetBufferSize(srcSize, nStep, hpSize); return status; } NCVStatus nppiStInterpolateFrames(const NppStInterpolationState *pState) { // check state validity ncvAssertReturn (pState->pSrcFrame0 != 0 && pState->pSrcFrame1 != 0 && pState->pFU != 0 && pState->pFV != 0 && pState->pBU != 0 && pState->pBV != 0 && pState->pNewFrame != 0 && pState->ppBuffers[0] != 0 && pState->ppBuffers[1] != 0 && pState->ppBuffers[2] != 0 && pState->ppBuffers[3] != 0 && pState->ppBuffers[4] != 0 && pState->ppBuffers[5] != 0, NPPST_NULL_POINTER_ERROR); ncvAssertReturn (pState->size.width > 0 && pState->size.height > 0, NPPST_ERROR); ncvAssertReturn (pState->nStep >= pState->size.width * sizeof (Ncv32f) && pState->nStep > 0 && pState->nStep % sizeof (Ncv32f) == 0, NPPST_INVALID_STEP); // change notation Ncv32f *cov0 = pState->ppBuffers[0]; Ncv32f *cov1 = pState->ppBuffers[1]; Ncv32f *fwdU = pState->ppBuffers[2]; // forward u Ncv32f *fwdV = pState->ppBuffers[3]; // forward v Ncv32f *bwdU = pState->ppBuffers[4]; // backward u Ncv32f *bwdV = pState->ppBuffers[5]; // backward v // warp flow ncvAssertReturnNcvStat ( nppiStVectorWarp_PSF2x2_32f_C1 (pState->pFU, pState->size, pState->nStep, pState->pFU, pState->pFV, pState->nStep, cov0, pState->pos, fwdU) ); ncvAssertReturnNcvStat ( nppiStVectorWarp_PSF2x2_32f_C1 (pState->pFV, pState->size, pState->nStep, pState->pFU, pState->pFV, pState->nStep, cov0, pState->pos, fwdV) ); // warp backward flow ncvAssertReturnNcvStat ( nppiStVectorWarp_PSF2x2_32f_C1 (pState->pBU, pState->size, pState->nStep, pState->pBU, pState->pBV, pState->nStep, cov1, 1.0f - pState->pos, bwdU) ); ncvAssertReturnNcvStat ( nppiStVectorWarp_PSF2x2_32f_C1 (pState->pBV, pState->size, pState->nStep, pState->pBU, pState->pBV, pState->nStep, cov1, 1.0f - pState->pos, bwdU) ); // interpolate frame ncvAssertReturnNcvStat ( BlendFrames (pState->pSrcFrame0, pState->pSrcFrame1, fwdU, fwdV, bwdU, bwdV, cov0, cov1, pState->size.width, pState->size.height, pState->nStep / sizeof (Ncv32f), pState->pos, pState->pNewFrame) ); return NPPST_SUCCESS; } //============================================================================== // // VectorWarpFrame.cu // //============================================================================== __global__ void ForwardWarpKernel_PSF2x2(const float *u, const float *v, const float *src, const int w, const int h, const int flow_stride, const int image_stride, const float time_scale, float *normalization_factor, float *dst) { int j = threadIdx.x + blockDim.x * blockIdx.x; int i = threadIdx.y + blockDim.y * blockIdx.y; if (i >= h || j >= w) return; int flow_row_offset = i * flow_stride; int image_row_offset = i * image_stride; //bottom left corner of a target pixel float cx = u[flow_row_offset + j] * time_scale + (float)j + 1.0f; float cy = v[flow_row_offset + j] * time_scale + (float)i + 1.0f; // pixel containing bottom left corner float px; float py; float dx = modff (cx, &px); float dy = modff (cy, &py); // target pixel integer coords int tx; int ty; tx = (int) px; ty = (int) py; float value = src[image_row_offset + j]; float weight; // fill pixel containing bottom right corner if (!((tx >= w) || (tx < 0) || (ty >= h) || (ty < 0))) { weight = dx * dy; atomicAdd (dst + ty * image_stride + tx, value * weight); atomicAdd (normalization_factor + ty * image_stride + tx, weight); } // fill pixel containing bottom left corner tx -= 1; if (!((tx >= w) || (tx < 0) || (ty >= h) || (ty < 0))) { weight = (1.0f - dx) * dy; atomicAdd (dst + ty * image_stride + tx, value * weight); atomicAdd (normalization_factor + ty * image_stride + tx, weight); } // fill pixel containing upper left corner ty -= 1; if (!((tx >= w) || (tx < 0) || (ty >= h) || (ty < 0))) { weight = (1.0f - dx) * (1.0f - dy); atomicAdd (dst + ty * image_stride + tx, value * weight); atomicAdd (normalization_factor + ty * image_stride + tx, weight); } // fill pixel containing upper right corner tx += 1; if (!((tx >= w) || (tx < 0) || (ty >= h) || (ty < 0))) { weight = dx * (1.0f - dy); atomicAdd (dst + ty * image_stride + tx, value * weight); atomicAdd (normalization_factor + ty * image_stride + tx, weight); } } __global__ void ForwardWarpKernel_PSF1x1(const float *u, const float *v, const float *src, const int w, const int h, const int flow_stride, const int image_stride, const float time_scale, float *dst) { int j = threadIdx.x + blockDim.x * blockIdx.x; int i = threadIdx.y + blockDim.y * blockIdx.y; if (i >= h || j >= w) return; int flow_row_offset = i * flow_stride; int image_row_offset = i * image_stride; float u_ = u[flow_row_offset + j]; float v_ = v[flow_row_offset + j]; //bottom left corner of target pixel float cx = u_ * time_scale + (float)j + 1.0f; float cy = v_ * time_scale + (float)i + 1.0f; // pixel containing bottom left corner int tx = __float2int_rn (cx); int ty = __float2int_rn (cy); float value = src[image_row_offset + j]; // fill pixel if (!((tx >= w) || (tx < 0) || (ty >= h) || (ty < 0))) { atomicAdd (dst + ty * image_stride + tx, value); } } __global__ void NormalizeKernel(const float *normalization_factor, int w, int h, int s, float *image) { int i = threadIdx.y + blockDim.y * blockIdx.y; int j = threadIdx.x + blockDim.x * blockIdx.x; if (i >= h || j >= w) return; const int pos = i * s + j; float scale = normalization_factor[pos]; float invScale = (scale == 0.0f) ? 1.0f : (1.0f / scale); image[pos] *= invScale; } __global__ void MemsetKernel(const float value, int w, int h, float *image) { int i = threadIdx.y + blockDim.y * blockIdx.y; int j = threadIdx.x + blockDim.x * blockIdx.x; if (i >= h || j >= w) return; const int pos = i * w + j; image[pos] = value; } NCVStatus nppiStVectorWarpGetBufferSize (NcvSize32u srcSize, Ncv32u nSrcStep, Ncv32u *hpSize) { ncvAssertReturn (hpSize != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn (srcSize.width * sizeof (Ncv32f) <= nSrcStep, NPPST_INVALID_STEP); *hpSize = nSrcStep * srcSize.height; return NPPST_SUCCESS; } // does not require normalization NCVStatus nppiStVectorWarp_PSF1x1_32f_C1(const Ncv32f *pSrc, NcvSize32u srcSize, Ncv32u nSrcStep, const Ncv32f *pU, const Ncv32f *pV, Ncv32u nVFStep, Ncv32f timeScale, Ncv32f *pDst) { ncvAssertReturn (pSrc != NULL && pU != NULL && pV != NULL && pDst != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn (srcSize.width * sizeof (Ncv32f) <= nSrcStep && srcSize.width * sizeof (Ncv32f) <= nVFStep, NPPST_INVALID_STEP); Ncv32u srcStep = nSrcStep / sizeof (Ncv32f); Ncv32u vfStep = nVFStep / sizeof (Ncv32f); dim3 ctaSize (32, 6); dim3 gridSize (iDivUp (srcSize.width, ctaSize.x), iDivUp (srcSize.height, ctaSize.y)); ForwardWarpKernel_PSF1x1 <<<gridSize, ctaSize, 0, nppStGetActiveCUDAstream()>>> (pU, pV, pSrc, srcSize.width, srcSize.height, vfStep, srcStep, timeScale, pDst); ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); return NPPST_SUCCESS; } NCVStatus nppiStVectorWarp_PSF2x2_32f_C1(const Ncv32f *pSrc, NcvSize32u srcSize, Ncv32u nSrcStep, const Ncv32f *pU, const Ncv32f *pV, Ncv32u nVFStep, Ncv32f *pBuffer, Ncv32f timeScale, Ncv32f *pDst) { ncvAssertReturn (pSrc != NULL && pU != NULL && pV != NULL && pDst != NULL && pBuffer != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn (srcSize.width * sizeof (Ncv32f) <= nSrcStep && srcSize.width * sizeof (Ncv32f) <= nVFStep, NPPST_INVALID_STEP); Ncv32u srcStep = nSrcStep / sizeof (Ncv32f); Ncv32u vfStep = nVFStep / sizeof(Ncv32f); dim3 ctaSize(32, 6); dim3 gridSize (iDivUp (srcSize.width, ctaSize.x), iDivUp (srcSize.height, ctaSize.y)); MemsetKernel <<<gridSize, ctaSize, 0, nppStGetActiveCUDAstream()>>> (0, srcSize.width, srcSize.height, pBuffer); ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); ForwardWarpKernel_PSF2x2 <<<gridSize, ctaSize, 0, nppStGetActiveCUDAstream()>>> (pU, pV, pSrc, srcSize.width, srcSize.height, vfStep, srcStep, timeScale, pBuffer, pDst); ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); NormalizeKernel <<<gridSize, ctaSize, 0, nppStGetActiveCUDAstream()>>> (pBuffer, srcSize.width, srcSize.height, srcStep, pDst); ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); return NPPST_SUCCESS; } //============================================================================== // // Resize.cu // //============================================================================== texture <float, 2, cudaReadModeElementType> texSrc2D; __forceinline__ __device__ float processLine(int spos, float xmin, float xmax, int ixmin, int ixmax, float fxmin, float cxmax) { // first element float wsum = 1.0f - xmin + fxmin; float sum = tex1Dfetch(texSrc, spos) * (1.0f - xmin + fxmin); spos++; for (int ix = ixmin + 1; ix < ixmax; ++ix) { sum += tex1Dfetch(texSrc, spos); spos++; wsum += 1.0f; } sum += tex1Dfetch(texSrc, spos) * (cxmax - xmax); wsum += cxmax - xmax; return sum / wsum; } __global__ void resizeSuperSample_32f(NcvSize32u srcSize, Ncv32u srcStep, NcvRect32u srcROI, Ncv32f *dst, NcvSize32u dstSize, Ncv32u dstStep, NcvRect32u dstROI, Ncv32f scaleX, Ncv32f scaleY) { // position within dst ROI const int ix = blockIdx.x * blockDim.x + threadIdx.x; const int iy = blockIdx.y * blockDim.y + threadIdx.y; if (ix >= dstROI.width || iy >= dstROI.height) { return; } float rw = (float) srcROI.width; float rh = (float) srcROI.height; // source position float x = scaleX * (float) ix; float y = scaleY * (float) iy; // x sampling range float xBegin = fmax (x - scaleX, 0.0f); float xEnd = fmin (x + scaleX, rw - 1.0f); // y sampling range float yBegin = fmax (y - scaleY, 0.0f); float yEnd = fmin (y + scaleY, rh - 1.0f); // x range of source samples float floorXBegin = std::floor (xBegin); float ceilXEnd = std::ceil (xEnd); int iXBegin = srcROI.x + (int) floorXBegin; int iXEnd = srcROI.x + (int) ceilXEnd; // y range of source samples float floorYBegin = std::floor (yBegin); float ceilYEnd = std::ceil (yEnd); int iYBegin = srcROI.y + (int) floorYBegin; int iYEnd = srcROI.y + (int) ceilYEnd; // first row int pos = iYBegin * srcStep + iXBegin; float wsum = 1.0f - yBegin + floorYBegin; float sum = processLine (pos, xBegin, xEnd, iXBegin, iXEnd, floorXBegin, ceilXEnd) * (1.0f - yBegin + floorYBegin); pos += srcStep; for (int iy = iYBegin + 1; iy < iYEnd; ++iy) { sum += processLine (pos, xBegin, xEnd, iXBegin, iXEnd, floorXBegin, ceilXEnd); pos += srcStep; wsum += 1.0f; } sum += processLine (pos, xBegin, xEnd, iXBegin, iXEnd, floorXBegin, ceilXEnd) * (ceilYEnd - yEnd); wsum += ceilYEnd - yEnd; sum /= wsum; dst[(ix + dstROI.x) + (iy + dstROI.y) * dstStep] = sum; } // bicubic interpolation __forceinline__ __device__ float bicubicCoeff(float x_) { float x = std::abs(x_); if (x <= 1.0f) { return x * x * (1.5f * x - 2.5f) + 1.0f; } else if (x < 2.0f) { return x * (x * (-0.5f * x + 2.5f) - 4.0f) + 2.0f; } else { return 0.0f; } } __global__ void resizeBicubic(NcvSize32u srcSize, NcvRect32u srcROI, NcvSize32u dstSize, Ncv32u dstStep, Ncv32f *dst, NcvRect32u dstROI, Ncv32f scaleX, Ncv32f scaleY) { const int ix = blockIdx.x * blockDim.x + threadIdx.x; const int iy = blockIdx.y * blockDim.y + threadIdx.y; if (ix >= dstROI.width || iy >= dstROI.height) { return; } const float dx = 1.0f / srcROI.width; const float dy = 1.0f / srcROI.height; float rx = (float) srcROI.x; float ry = (float) srcROI.y; float rw = (float) srcROI.width; float rh = (float) srcROI.height; float x = scaleX * (float) ix; float y = scaleY * (float) iy; // sampling range // border mode is clamp float xmin = fmax (std::ceil (x - 2.0f), 0.0f); float xmax = fmin (std::floor (x + 2.0f), rw - 1.0f); float ymin = fmax (std::ceil (y - 2.0f), 0.0f); float ymax = fmin (std::floor (y + 2.0f), rh - 1.0f); // shift data window to match ROI rx += 0.5f; ry += 0.5f; x += rx; y += ry; xmin += rx; xmax += rx; ymin += ry; ymax += ry; float sum = 0.0f; float wsum = 0.0f; for (float cy = ymin; cy <= ymax; cy += 1.0f) { for (float cx = xmin; cx <= xmax; cx += 1.0f) { float xDist = x - cx; float yDist = y - cy; float wx = bicubicCoeff (xDist); float wy = bicubicCoeff (yDist); wx *= wy; sum += wx * tex2D (texSrc2D, cx * dx, cy * dy); wsum += wx; } } dst[(ix + dstROI.x)+ (iy + dstROI.y) * dstStep] = (!wsum)? 0 : sum / wsum; } NCVStatus nppiStResize_32f_C1R(const Ncv32f *pSrc, NcvSize32u srcSize, Ncv32u nSrcStep, NcvRect32u srcROI, Ncv32f *pDst, NcvSize32u dstSize, Ncv32u nDstStep, NcvRect32u dstROI, Ncv32f xFactor, Ncv32f yFactor, NppStInterpMode interpolation) { NCVStatus status = NPPST_SUCCESS; ncvAssertReturn (pSrc != NULL && pDst != NULL, NPPST_NULL_POINTER_ERROR); ncvAssertReturn (xFactor != 0.0 && yFactor != 0.0, NPPST_INVALID_SCALE); ncvAssertReturn (nSrcStep >= sizeof (Ncv32f) * (Ncv32u) srcSize.width && nDstStep >= sizeof (Ncv32f) * (Ncv32f) dstSize.width, NPPST_INVALID_STEP); Ncv32u srcStep = nSrcStep / sizeof (Ncv32f); Ncv32u dstStep = nDstStep / sizeof (Ncv32f); // TODO: preprocess ROI to prevent out of bounds access if (interpolation == nppStSupersample) { // bind texture cudaBindTexture (0, texSrc, pSrc, srcSize.height * nSrcStep); // invoke kernel dim3 ctaSize (32, 6); dim3 gridSize ((dstROI.width + ctaSize.x - 1) / ctaSize.x, (dstROI.height + ctaSize.y - 1) / ctaSize.y); resizeSuperSample_32f <<<gridSize, ctaSize, 0, nppStGetActiveCUDAstream ()>>> (srcSize, srcStep, srcROI, pDst, dstSize, dstStep, dstROI, 1.0f / xFactor, 1.0f / yFactor); } else if (interpolation == nppStBicubic) { texSrc2D.addressMode[0] = cudaAddressModeMirror; texSrc2D.addressMode[1] = cudaAddressModeMirror; texSrc2D.normalized = true; cudaChannelFormatDesc desc = cudaCreateChannelDesc <float> (); cudaBindTexture2D (0, texSrc2D, pSrc, desc, srcSize.width, srcSize.height, nSrcStep); dim3 ctaSize (32, 6); dim3 gridSize ((dstSize.width + ctaSize.x - 1) / ctaSize.x, (dstSize.height + ctaSize.y - 1) / ctaSize.y); resizeBicubic <<<gridSize, ctaSize, 0, nppStGetActiveCUDAstream ()>>> (srcSize, srcROI, dstSize, dstStep, pDst, dstROI, 1.0f / xFactor, 1.0f / yFactor); } else { status = NPPST_ERROR; } ncvAssertCUDALastErrorReturn(NPPST_CUDA_KERNEL_EXECUTION_ERROR); return status; }
1bb8b19759edcbbfdfb36533346df871ec32988f.hip
// !!! This is a file automatically generated by hipify!!! // Excercise 0 - Device Query #include <wb.h> //@@ The purpose of this code is to become familiar with the submission //@@ process. Do not worry if you do not understand all the details of //@@ the code. int main(int argc, char ** argv) { int deviceCount; wbArg_read(argc, argv); hipGetDeviceCount(&deviceCount); wbTime_start(GPU, "Getting GPU Data."); //@@ start a timer for (int dev = 0; dev < deviceCount; dev++) { hipDeviceProp_t deviceProp; hipGetDeviceProperties(&deviceProp, dev); if (dev == 0) { if (deviceProp.major == 9999 && deviceProp.minor == 9999) { wbLog(TRACE, "No CUDA GPU has been detected"); return -1; } else if (deviceCount == 1) { //@@ WbLog is a provided logging API (similar to Log4J). //@@ The logging function wbLog takes a level which is either //@@ OFF, FATAL, ERROR, WARN, INFO, DEBUG, or TRACE and a //@@ message to be printed. wbLog(TRACE, "There is 1 device supporting CUDA"); } else { wbLog(TRACE, "There are ", deviceCount, " devices supporting CUDA"); } } wbLog(TRACE, "Device ", dev, " name: ", deviceProp.name); wbLog(TRACE, " Computational Capabilities: ", deviceProp.major, ".", deviceProp.minor); wbLog(TRACE, " Maximum global memory size: ", deviceProp.totalGlobalMem); wbLog(TRACE, " Maximum constant memory size: ", deviceProp.totalConstMem); wbLog(TRACE, " Maximum shared memory size per block: ", deviceProp.sharedMemPerBlock); wbLog(TRACE, " Maximum threads per block: ", deviceProp.maxThreadsPerBlock); wbLog(TRACE, " Maximum block dimensions: ", deviceProp.maxThreadsDim[0], " x ", deviceProp.maxThreadsDim[1], " x ", deviceProp.maxThreadsDim[2]); wbLog(TRACE, " Maximum grid dimensions: ", deviceProp.maxGridSize[0], " x ", deviceProp.maxGridSize[1], " x ", deviceProp.maxGridSize[2]); wbLog(TRACE, " Warp size: ", deviceProp.warpSize); } wbTime_stop(GPU, "Getting GPU Data."); //@@ stop the timer return 0; }
1bb8b19759edcbbfdfb36533346df871ec32988f.cu
// Excercise 0 - Device Query #include <wb.h> //@@ The purpose of this code is to become familiar with the submission //@@ process. Do not worry if you do not understand all the details of //@@ the code. int main(int argc, char ** argv) { int deviceCount; wbArg_read(argc, argv); cudaGetDeviceCount(&deviceCount); wbTime_start(GPU, "Getting GPU Data."); //@@ start a timer for (int dev = 0; dev < deviceCount; dev++) { cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, dev); if (dev == 0) { if (deviceProp.major == 9999 && deviceProp.minor == 9999) { wbLog(TRACE, "No CUDA GPU has been detected"); return -1; } else if (deviceCount == 1) { //@@ WbLog is a provided logging API (similar to Log4J). //@@ The logging function wbLog takes a level which is either //@@ OFF, FATAL, ERROR, WARN, INFO, DEBUG, or TRACE and a //@@ message to be printed. wbLog(TRACE, "There is 1 device supporting CUDA"); } else { wbLog(TRACE, "There are ", deviceCount, " devices supporting CUDA"); } } wbLog(TRACE, "Device ", dev, " name: ", deviceProp.name); wbLog(TRACE, " Computational Capabilities: ", deviceProp.major, ".", deviceProp.minor); wbLog(TRACE, " Maximum global memory size: ", deviceProp.totalGlobalMem); wbLog(TRACE, " Maximum constant memory size: ", deviceProp.totalConstMem); wbLog(TRACE, " Maximum shared memory size per block: ", deviceProp.sharedMemPerBlock); wbLog(TRACE, " Maximum threads per block: ", deviceProp.maxThreadsPerBlock); wbLog(TRACE, " Maximum block dimensions: ", deviceProp.maxThreadsDim[0], " x ", deviceProp.maxThreadsDim[1], " x ", deviceProp.maxThreadsDim[2]); wbLog(TRACE, " Maximum grid dimensions: ", deviceProp.maxGridSize[0], " x ", deviceProp.maxGridSize[1], " x ", deviceProp.maxGridSize[2]); wbLog(TRACE, " Warp size: ", deviceProp.warpSize); } wbTime_stop(GPU, "Getting GPU Data."); //@@ stop the timer return 0; }
b01d2b901639fe1b7e110f3f18a1294844862c10.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void TgvUpdateDualVariablesTGVKernel(float* u_, float2 *v_, float alpha0, float alpha1, float sigma, float eta_p, float eta_q, float* a, float* b, float*c, float4* grad_v, float2* p, float4* q, int width, int height, int stride) { int iy = blockIdx.y * blockDim.y + threadIdx.y; // current row int ix = blockIdx.x * blockDim.x + threadIdx.x; // current column float desiredRadius = (float)width / 2.20f; float halfWidth = (float)width / 2.0f; float halfHeight = (float)height / 2.0f; float radius = sqrtf((iy - halfHeight) * (iy - halfHeight) + (ix - halfWidth) * (ix - halfWidth)); if ((iy < height) && (ix < width)) { int pos = ix + iy * stride; if (radius >= desiredRadius) { p[pos] = make_float2(0.0f, 0.0f); q[pos] = make_float4(0.0f, 0.0f, 0.0f, 0.0f); } else { int right = (ix + 1) + iy * stride; int down = ix + (iy + 1) * stride; int left = (ix - 1) + iy * stride; int up = ix + (iy - 1) * stride; //u_x = dxp(u_) - v_(:, : , 1); float u_x, u_y; if ((ix + 1) < width) u_x = u_[right] - u_[pos] - v_[pos].x; else u_x = u_[pos] - u_[left] - v_[pos].x; //u_y = dyp(u_) - v_(:, : , 2); if ((iy + 1) < height) u_y = u_[down] - u_[pos] - v_[pos].y; else u_y = u_[pos] - u_[up] - v_[pos].y; //du_tensor_x = a.*u_x + c.*u_y; float du_tensor_x = a[pos] * u_x + c[pos] * u_y; //du_tensor_y = c.*u_x + b.*u_y; float du_tensor_y = c[pos] * u_x + b[pos] * u_y; //p(:, : , 1) = p(:, : , 1) + alpha1*sigma / eta_p.*du_tensor_x; p[pos].x = p[pos].x + (alpha1*sigma / eta_p) * du_tensor_x; //p(:, : , 2) = p(:, : , 2) + alpha1*sigma / eta_p.*du_tensor_y; p[pos].y = p[pos].y + (alpha1*sigma / eta_p) * du_tensor_y; //projection //reprojection = max(1.0, sqrt(p(:, : , 1). ^ 2 + p(:, : , 2). ^ 2)); float reprojection = sqrtf(p[pos].x * p[pos].x + p[pos].y * p[pos].y); if (reprojection < 1.0f) { reprojection = 1.0f; } //p(:, : , 1) = p(:, : , 1). / reprojection; p[pos].x = p[pos].x / reprojection; //p(:, : , 2) = p(:, : , 2). / reprojection; p[pos].y = p[pos].y / reprojection; //grad_v(:, : , 1) = dxp(v_(:, : , 1)); if ((ix + 1) < width) grad_v[pos].x = v_[right].x - v_[pos].x; else grad_v[pos].x = v_[pos].x - v_[left].x; //grad_v(:, : , 2) = dyp(v_(:, : , 2)); if ((iy + 1) < height) grad_v[pos].y = v_[down].y - v_[pos].y; else grad_v[pos].y = v_[pos].y - v_[up].y; //grad_v(:, : , 3) = dyp(v_(:, : , 1)); if ((iy + 1) < height) grad_v[pos].z = v_[down].x - v_[pos].x; else grad_v[pos].z = v_[pos].x - v_[up].x; //grad_v(:, : , 4) = dxp(v_(:, : , 2)); if ((ix + 1) < width) grad_v[pos].w = v_[right].y - v_[pos].y; else grad_v[pos].w = v_[pos].y - v_[left].y; //q = q + alpha0*sigma / eta_q.*grad_v; float ase = alpha0 * sigma / eta_q; float4 qpos; qpos.x = q[pos].x + ase * grad_v[pos].x; qpos.y = q[pos].y + ase * grad_v[pos].y; qpos.z = q[pos].z + ase * grad_v[pos].z; qpos.w = q[pos].w + ase * grad_v[pos].w; //reproject = max(1.0, sqrt(q(:, : , 1). ^ 2 + q(:, : , 2). ^ 2 + q(:, : , 3). ^ 2 + q(:, : , 4). ^ 2)); float reproject = sqrtf(qpos.x * qpos.x + qpos.y * qpos.y + qpos.z * qpos.z + qpos.w * qpos.w); if (reproject < 1.0f) { reproject = 1.0f; } //q(:, : , 1) = q(:, : , 1). / reproject; q[pos].x = qpos.x / reproject; //q(:, : , 2) = q(:, : , 2). / reproject; q[pos].y = qpos.y / reproject; //q(:, : , 3) = q(:, : , 3). / reproject; q[pos].z = qpos.z / reproject; //q(:, : , 4) = q(:, : , 4). / reproject; q[pos].w = qpos.w / reproject; } } }
b01d2b901639fe1b7e110f3f18a1294844862c10.cu
#include "includes.h" __global__ void TgvUpdateDualVariablesTGVKernel(float* u_, float2 *v_, float alpha0, float alpha1, float sigma, float eta_p, float eta_q, float* a, float* b, float*c, float4* grad_v, float2* p, float4* q, int width, int height, int stride) { int iy = blockIdx.y * blockDim.y + threadIdx.y; // current row int ix = blockIdx.x * blockDim.x + threadIdx.x; // current column float desiredRadius = (float)width / 2.20f; float halfWidth = (float)width / 2.0f; float halfHeight = (float)height / 2.0f; float radius = sqrtf((iy - halfHeight) * (iy - halfHeight) + (ix - halfWidth) * (ix - halfWidth)); if ((iy < height) && (ix < width)) { int pos = ix + iy * stride; if (radius >= desiredRadius) { p[pos] = make_float2(0.0f, 0.0f); q[pos] = make_float4(0.0f, 0.0f, 0.0f, 0.0f); } else { int right = (ix + 1) + iy * stride; int down = ix + (iy + 1) * stride; int left = (ix - 1) + iy * stride; int up = ix + (iy - 1) * stride; //u_x = dxp(u_) - v_(:, : , 1); float u_x, u_y; if ((ix + 1) < width) u_x = u_[right] - u_[pos] - v_[pos].x; else u_x = u_[pos] - u_[left] - v_[pos].x; //u_y = dyp(u_) - v_(:, : , 2); if ((iy + 1) < height) u_y = u_[down] - u_[pos] - v_[pos].y; else u_y = u_[pos] - u_[up] - v_[pos].y; //du_tensor_x = a.*u_x + c.*u_y; float du_tensor_x = a[pos] * u_x + c[pos] * u_y; //du_tensor_y = c.*u_x + b.*u_y; float du_tensor_y = c[pos] * u_x + b[pos] * u_y; //p(:, : , 1) = p(:, : , 1) + alpha1*sigma / eta_p.*du_tensor_x; p[pos].x = p[pos].x + (alpha1*sigma / eta_p) * du_tensor_x; //p(:, : , 2) = p(:, : , 2) + alpha1*sigma / eta_p.*du_tensor_y; p[pos].y = p[pos].y + (alpha1*sigma / eta_p) * du_tensor_y; //projection //reprojection = max(1.0, sqrt(p(:, : , 1). ^ 2 + p(:, : , 2). ^ 2)); float reprojection = sqrtf(p[pos].x * p[pos].x + p[pos].y * p[pos].y); if (reprojection < 1.0f) { reprojection = 1.0f; } //p(:, : , 1) = p(:, : , 1). / reprojection; p[pos].x = p[pos].x / reprojection; //p(:, : , 2) = p(:, : , 2). / reprojection; p[pos].y = p[pos].y / reprojection; //grad_v(:, : , 1) = dxp(v_(:, : , 1)); if ((ix + 1) < width) grad_v[pos].x = v_[right].x - v_[pos].x; else grad_v[pos].x = v_[pos].x - v_[left].x; //grad_v(:, : , 2) = dyp(v_(:, : , 2)); if ((iy + 1) < height) grad_v[pos].y = v_[down].y - v_[pos].y; else grad_v[pos].y = v_[pos].y - v_[up].y; //grad_v(:, : , 3) = dyp(v_(:, : , 1)); if ((iy + 1) < height) grad_v[pos].z = v_[down].x - v_[pos].x; else grad_v[pos].z = v_[pos].x - v_[up].x; //grad_v(:, : , 4) = dxp(v_(:, : , 2)); if ((ix + 1) < width) grad_v[pos].w = v_[right].y - v_[pos].y; else grad_v[pos].w = v_[pos].y - v_[left].y; //q = q + alpha0*sigma / eta_q.*grad_v; float ase = alpha0 * sigma / eta_q; float4 qpos; qpos.x = q[pos].x + ase * grad_v[pos].x; qpos.y = q[pos].y + ase * grad_v[pos].y; qpos.z = q[pos].z + ase * grad_v[pos].z; qpos.w = q[pos].w + ase * grad_v[pos].w; //reproject = max(1.0, sqrt(q(:, : , 1). ^ 2 + q(:, : , 2). ^ 2 + q(:, : , 3). ^ 2 + q(:, : , 4). ^ 2)); float reproject = sqrtf(qpos.x * qpos.x + qpos.y * qpos.y + qpos.z * qpos.z + qpos.w * qpos.w); if (reproject < 1.0f) { reproject = 1.0f; } //q(:, : , 1) = q(:, : , 1). / reproject; q[pos].x = qpos.x / reproject; //q(:, : , 2) = q(:, : , 2). / reproject; q[pos].y = qpos.y / reproject; //q(:, : , 3) = q(:, : , 3). / reproject; q[pos].z = qpos.z / reproject; //q(:, : , 4) = q(:, : , 4). / reproject; q[pos].w = qpos.w / reproject; } } }
90638ba87256fd666e5023608602b11c0683420a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #if !defined CUDA_DISABLER #include "opencv2/core/cuda/common.hpp" #include "opencv2/core/cuda/emulation.hpp" #include "opencv2/core/cuda/transform.hpp" #include "opencv2/core/cuda/functional.hpp" #include "opencv2/core/cuda/utility.hpp" #include "opencv2/core/cuda.hpp" using namespace cv::cuda; using namespace cv::cuda::device; namespace canny { struct L1 : binary_function<int, int, float> { __device__ __forceinline__ float operator ()(int x, int y) const { return ::abs(x) + ::abs(y); } __host__ __device__ __forceinline__ L1() {} __host__ __device__ __forceinline__ L1(const L1&) {} }; struct L2 : binary_function<int, int, float> { __device__ __forceinline__ float operator ()(int x, int y) const { return ::sqrtf(x * x + y * y); } __host__ __device__ __forceinline__ L2() {} __host__ __device__ __forceinline__ L2(const L2&) {} }; } namespace cv { namespace cuda { namespace device { template <> struct TransformFunctorTraits<canny::L1> : DefaultTransformFunctorTraits<canny::L1> { enum { smart_shift = 4 }; }; template <> struct TransformFunctorTraits<canny::L2> : DefaultTransformFunctorTraits<canny::L2> { enum { smart_shift = 4 }; }; }}} namespace canny { struct SrcTex { virtual ~SrcTex() {} __host__ SrcTex(int _xoff, int _yoff) : xoff(_xoff), yoff(_yoff) {} __device__ __forceinline__ virtual int operator ()(int y, int x) const = 0; int xoff; int yoff; }; texture<uchar, hipTextureType2D, hipReadModeElementType> tex_src(false, hipFilterModePoint, hipAddressModeClamp); struct SrcTexRef : SrcTex { __host__ SrcTexRef(int _xoff, int _yoff) : SrcTex(_xoff, _yoff) {} __device__ __forceinline__ int operator ()(int y, int x) const override { return tex2D(tex_src, x + xoff, y + yoff); } }; struct SrcTexObj : SrcTex { __host__ SrcTexObj(int _xoff, int _yoff, hipTextureObject_t _tex_src_object) : SrcTex(_xoff, _yoff), tex_src_object(_tex_src_object) { } __device__ __forceinline__ int operator ()(int y, int x) const override { return tex2D<uchar>(tex_src_object, x + xoff, y + yoff); } hipTextureObject_t tex_src_object; }; template < class T, class Norm, typename = typename std::enable_if<std::is_base_of<SrcTex, T>::value>::type > __global__ void calcMagnitudeKernel(const T src, PtrStepi dx, PtrStepi dy, PtrStepSzf mag, const Norm norm) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (y >= mag.rows || x >= mag.cols) return; int dxVal = (src(y - 1, x + 1) + 2 * src(y, x + 1) + src(y + 1, x + 1)) - (src(y - 1, x - 1) + 2 * src(y, x - 1) + src(y + 1, x - 1)); int dyVal = (src(y + 1, x - 1) + 2 * src(y + 1, x) + src(y + 1, x + 1)) - (src(y - 1, x - 1) + 2 * src(y - 1, x) + src(y - 1, x + 1)); dx(y, x) = dxVal; dy(y, x) = dyVal; mag(y, x) = norm(dxVal, dyVal); } void calcMagnitude(PtrStepSzb srcWhole, int xoff, int yoff, PtrStepSzi dx, PtrStepSzi dy, PtrStepSzf mag, bool L2Grad, hipStream_t stream) { const dim3 block(16, 16); const dim3 grid(divUp(mag.cols, block.x), divUp(mag.rows, block.y)); bool cc30 = deviceSupports(FEATURE_SET_COMPUTE_30); if (cc30) { hipTextureDesc texDesc; memset(&texDesc, 0, sizeof(texDesc)); texDesc.addressMode[0] = hipAddressModeClamp; texDesc.addressMode[1] = hipAddressModeClamp; texDesc.addressMode[2] = hipAddressModeClamp; hipTextureObject_t tex = 0; createTextureObjectPitch2D(&tex, srcWhole, texDesc); SrcTexObj src(xoff, yoff, tex); if (L2Grad) { L2 norm; hipLaunchKernelGGL(( calcMagnitudeKernel), dim3(grid), dim3(block), 0, stream, src, dx, dy, mag, norm); } else { L1 norm; hipLaunchKernelGGL(( calcMagnitudeKernel), dim3(grid), dim3(block), 0, stream, src, dx, dy, mag, norm); } cudaSafeCall( hipGetLastError() ); if (stream == NULL) cudaSafeCall( hipDeviceSynchronize() ); else cudaSafeCall( hipStreamSynchronize(stream) ); cudaSafeCall( hipDestroyTextureObject(tex) ); } else { bindTexture(&tex_src, srcWhole); SrcTexRef src(xoff, yoff); if (L2Grad) { L2 norm; hipLaunchKernelGGL(( calcMagnitudeKernel), dim3(grid), dim3(block), 0, stream, src, dx, dy, mag, norm); } else { L1 norm; hipLaunchKernelGGL(( calcMagnitudeKernel), dim3(grid), dim3(block), 0, stream, src, dx, dy, mag, norm); } cudaSafeCall( hipGetLastError() ); if (stream == NULL) cudaSafeCall( hipDeviceSynchronize() ); } } void calcMagnitude(PtrStepSzi dx, PtrStepSzi dy, PtrStepSzf mag, bool L2Grad, hipStream_t stream) { if (L2Grad) { L2 norm; transform(dx, dy, mag, norm, WithOutMask(), stream); } else { L1 norm; transform(dx, dy, mag, norm, WithOutMask(), stream); } } } ////////////////////////////////////////////////////////////////////////////////////////// namespace canny { texture<float, hipTextureType2D, hipReadModeElementType> tex_mag(false, hipFilterModePoint, hipAddressModeClamp); __global__ void calcMapKernel(const PtrStepSzi dx, const PtrStepi dy, PtrStepi map, const float low_thresh, const float high_thresh) { const int CANNY_SHIFT = 15; const int TG22 = (int)(0.4142135623730950488016887242097*(1<<CANNY_SHIFT) + 0.5); const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x == 0 || x >= dx.cols - 1 || y == 0 || y >= dx.rows - 1) return; int dxVal = dx(y, x); int dyVal = dy(y, x); const int s = (dxVal ^ dyVal) < 0 ? -1 : 1; const float m = tex2D(tex_mag, x, y); dxVal = ::abs(dxVal); dyVal = ::abs(dyVal); // 0 - the pixel can not belong to an edge // 1 - the pixel might belong to an edge // 2 - the pixel does belong to an edge int edge_type = 0; if (m > low_thresh) { const int tg22x = dxVal * TG22; const int tg67x = tg22x + ((dxVal + dxVal) << CANNY_SHIFT); dyVal <<= CANNY_SHIFT; if (dyVal < tg22x) { if (m > tex2D(tex_mag, x - 1, y) && m >= tex2D(tex_mag, x + 1, y)) edge_type = 1 + (int)(m > high_thresh); } else if(dyVal > tg67x) { if (m > tex2D(tex_mag, x, y - 1) && m >= tex2D(tex_mag, x, y + 1)) edge_type = 1 + (int)(m > high_thresh); } else { if (m > tex2D(tex_mag, x - s, y - 1) && m >= tex2D(tex_mag, x + s, y + 1)) edge_type = 1 + (int)(m > high_thresh); } } map(y, x) = edge_type; } __global__ void calcMapKernel(const PtrStepSzi dx, const PtrStepi dy, PtrStepi map, const float low_thresh, const float high_thresh, hipTextureObject_t tex_mag) { const int CANNY_SHIFT = 15; const int TG22 = (int)(0.4142135623730950488016887242097*(1<<CANNY_SHIFT) + 0.5); const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x == 0 || x >= dx.cols - 1 || y == 0 || y >= dx.rows - 1) return; int dxVal = dx(y, x); int dyVal = dy(y, x); const int s = (dxVal ^ dyVal) < 0 ? -1 : 1; const float m = tex2D<float>(tex_mag, x, y); dxVal = ::abs(dxVal); dyVal = ::abs(dyVal); // 0 - the pixel can not belong to an edge // 1 - the pixel might belong to an edge // 2 - the pixel does belong to an edge int edge_type = 0; if (m > low_thresh) { const int tg22x = dxVal * TG22; const int tg67x = tg22x + ((dxVal + dxVal) << CANNY_SHIFT); dyVal <<= CANNY_SHIFT; if (dyVal < tg22x) { if (m > tex2D<float>(tex_mag, x - 1, y) && m >= tex2D<float>(tex_mag, x + 1, y)) edge_type = 1 + (int)(m > high_thresh); } else if(dyVal > tg67x) { if (m > tex2D<float>(tex_mag, x, y - 1) && m >= tex2D<float>(tex_mag, x, y + 1)) edge_type = 1 + (int)(m > high_thresh); } else { if (m > tex2D<float>(tex_mag, x - s, y - 1) && m >= tex2D<float>(tex_mag, x + s, y + 1)) edge_type = 1 + (int)(m > high_thresh); } } map(y, x) = edge_type; } void calcMap(PtrStepSzi dx, PtrStepSzi dy, PtrStepSzf mag, PtrStepSzi map, float low_thresh, float high_thresh, hipStream_t stream) { const dim3 block(16, 16); const dim3 grid(divUp(dx.cols, block.x), divUp(dx.rows, block.y)); if (deviceSupports(FEATURE_SET_COMPUTE_30)) { // Use the texture object hipResourceDesc resDesc; memset(&resDesc, 0, sizeof(resDesc)); resDesc.resType = hipResourceTypePitch2D; resDesc.res.pitch2D.devPtr = mag.ptr(); resDesc.res.pitch2D.height = mag.rows; resDesc.res.pitch2D.width = mag.cols; resDesc.res.pitch2D.pitchInBytes = mag.step; resDesc.res.pitch2D.desc = hipCreateChannelDesc<float>(); hipTextureDesc texDesc; memset(&texDesc, 0, sizeof(texDesc)); texDesc.addressMode[0] = hipAddressModeClamp; texDesc.addressMode[1] = hipAddressModeClamp; texDesc.addressMode[2] = hipAddressModeClamp; hipTextureObject_t tex=0; hipCreateTextureObject(&tex, &resDesc, &texDesc, NULL); hipLaunchKernelGGL(( calcMapKernel), dim3(grid), dim3(block), 0, stream, dx, dy, map, low_thresh, high_thresh, tex); cudaSafeCall( hipGetLastError() ); if (stream == NULL) cudaSafeCall( hipDeviceSynchronize() ); else cudaSafeCall( hipStreamSynchronize(stream) ); cudaSafeCall( hipDestroyTextureObject(tex) ); } else { // Use the texture reference bindTexture(&tex_mag, mag); hipLaunchKernelGGL(( calcMapKernel), dim3(grid), dim3(block), 0, stream, dx, dy, map, low_thresh, high_thresh); cudaSafeCall( hipGetLastError() ); if (stream == NULL) cudaSafeCall( hipDeviceSynchronize() ); } } } ////////////////////////////////////////////////////////////////////////////////////////// namespace canny { __device__ __forceinline__ bool checkIdx(int y, int x, int rows, int cols) { return (y >= 0) && (y < rows) && (x >= 0) && (x < cols); } __global__ void edgesHysteresisLocalKernel(PtrStepSzi map, short2* st, int* d_counter) { __shared__ volatile int smem[18][18]; const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; smem[threadIdx.y + 1][threadIdx.x + 1] = checkIdx(y, x, map.rows, map.cols) ? map(y, x) : 0; if (threadIdx.y == 0) smem[0][threadIdx.x + 1] = checkIdx(y - 1, x, map.rows, map.cols) ? map(y - 1, x) : 0; if (threadIdx.y == blockDim.y - 1) smem[blockDim.y + 1][threadIdx.x + 1] = checkIdx(y + 1, x, map.rows, map.cols) ? map(y + 1, x) : 0; if (threadIdx.x == 0) smem[threadIdx.y + 1][0] = checkIdx(y, x - 1, map.rows, map.cols) ? map(y, x - 1) : 0; if (threadIdx.x == blockDim.x - 1) smem[threadIdx.y + 1][blockDim.x + 1] = checkIdx(y, x + 1, map.rows, map.cols) ? map(y, x + 1) : 0; if (threadIdx.x == 0 && threadIdx.y == 0) smem[0][0] = checkIdx(y - 1, x - 1, map.rows, map.cols) ? map(y - 1, x - 1) : 0; if (threadIdx.x == blockDim.x - 1 && threadIdx.y == 0) smem[0][blockDim.x + 1] = checkIdx(y - 1, x + 1, map.rows, map.cols) ? map(y - 1, x + 1) : 0; if (threadIdx.x == 0 && threadIdx.y == blockDim.y - 1) smem[blockDim.y + 1][0] = checkIdx(y + 1, x - 1, map.rows, map.cols) ? map(y + 1, x - 1) : 0; if (threadIdx.x == blockDim.x - 1 && threadIdx.y == blockDim.y - 1) smem[blockDim.y + 1][blockDim.x + 1] = checkIdx(y + 1, x + 1, map.rows, map.cols) ? map(y + 1, x + 1) : 0; __syncthreads(); if (x >= map.cols || y >= map.rows) return; int n; #pragma unroll for (int k = 0; k < 16; ++k) { n = 0; if (smem[threadIdx.y + 1][threadIdx.x + 1] == 1) { n += smem[threadIdx.y ][threadIdx.x ] == 2; n += smem[threadIdx.y ][threadIdx.x + 1] == 2; n += smem[threadIdx.y ][threadIdx.x + 2] == 2; n += smem[threadIdx.y + 1][threadIdx.x ] == 2; n += smem[threadIdx.y + 1][threadIdx.x + 2] == 2; n += smem[threadIdx.y + 2][threadIdx.x ] == 2; n += smem[threadIdx.y + 2][threadIdx.x + 1] == 2; n += smem[threadIdx.y + 2][threadIdx.x + 2] == 2; } __syncthreads(); if (n > 0) smem[threadIdx.y + 1][threadIdx.x + 1] = 2; __syncthreads(); } const int e = smem[threadIdx.y + 1][threadIdx.x + 1]; map(y, x) = e; n = 0; if (e == 2) { n += smem[threadIdx.y ][threadIdx.x ] == 1; n += smem[threadIdx.y ][threadIdx.x + 1] == 1; n += smem[threadIdx.y ][threadIdx.x + 2] == 1; n += smem[threadIdx.y + 1][threadIdx.x ] == 1; n += smem[threadIdx.y + 1][threadIdx.x + 2] == 1; n += smem[threadIdx.y + 2][threadIdx.x ] == 1; n += smem[threadIdx.y + 2][threadIdx.x + 1] == 1; n += smem[threadIdx.y + 2][threadIdx.x + 2] == 1; } if (n > 0) { const int ind = ::atomicAdd(d_counter, 1); st[ind] = make_short2(x, y); } } void edgesHysteresisLocal(PtrStepSzi map, short2* st1, int* d_counter, hipStream_t stream) { cudaSafeCall( hipMemsetAsync(d_counter, 0, sizeof(int), stream) ); const dim3 block(16, 16); const dim3 grid(divUp(map.cols, block.x), divUp(map.rows, block.y)); hipLaunchKernelGGL(( edgesHysteresisLocalKernel), dim3(grid), dim3(block), 0, stream, map, st1, d_counter); cudaSafeCall( hipGetLastError() ); if (stream == NULL) cudaSafeCall( hipDeviceSynchronize() ); } } ////////////////////////////////////////////////////////////////////////////////////////// namespace canny { __constant__ int c_dx[8] = {-1, 0, 1, -1, 1, -1, 0, 1}; __constant__ int c_dy[8] = {-1, -1, -1, 0, 0, 1, 1, 1}; __global__ void edgesHysteresisGlobalKernel(PtrStepSzi map, short2* st1, short2* st2, int* d_counter, const int count) { const int stack_size = 512; __shared__ int s_counter; __shared__ int s_ind; __shared__ short2 s_st[stack_size]; if (threadIdx.x == 0) s_counter = 0; __syncthreads(); int ind = blockIdx.y * gridDim.x + blockIdx.x; if (ind >= count) return; short2 pos = st1[ind]; if (threadIdx.x < 8) { pos.x += c_dx[threadIdx.x]; pos.y += c_dy[threadIdx.x]; if (pos.x > 0 && pos.x < map.cols - 1 && pos.y > 0 && pos.y < map.rows - 1 && map(pos.y, pos.x) == 1) { map(pos.y, pos.x) = 2; ind = Emulation::smem::atomicAdd(&s_counter, 1); s_st[ind] = pos; } } __syncthreads(); while (s_counter > 0 && s_counter <= stack_size - blockDim.x) { const int subTaskIdx = threadIdx.x >> 3; const int portion = ::min(s_counter, blockDim.x >> 3); if (subTaskIdx < portion) pos = s_st[s_counter - 1 - subTaskIdx]; __syncthreads(); if (threadIdx.x == 0) s_counter -= portion; __syncthreads(); if (subTaskIdx < portion) { pos.x += c_dx[threadIdx.x & 7]; pos.y += c_dy[threadIdx.x & 7]; if (pos.x > 0 && pos.x < map.cols - 1 && pos.y > 0 && pos.y < map.rows - 1 && map(pos.y, pos.x) == 1) { map(pos.y, pos.x) = 2; ind = Emulation::smem::atomicAdd(&s_counter, 1); s_st[ind] = pos; } } __syncthreads(); } if (s_counter > 0) { if (threadIdx.x == 0) { s_ind = ::atomicAdd(d_counter, s_counter); if (s_ind + s_counter > map.cols * map.rows) s_counter = 0; } __syncthreads(); ind = s_ind; for (int i = threadIdx.x; i < s_counter; i += blockDim.x) st2[ind + i] = s_st[i]; } } void edgesHysteresisGlobal(PtrStepSzi map, short2* st1, short2* st2, int* d_counter, hipStream_t stream) { int count; cudaSafeCall( hipMemcpyAsync(&count, d_counter, sizeof(int), hipMemcpyDeviceToHost, stream) ); cudaSafeCall( hipStreamSynchronize(stream) ); while (count > 0) { cudaSafeCall( hipMemsetAsync(d_counter, 0, sizeof(int), stream) ); const dim3 block(128); const dim3 grid(::min(count, 65535u), divUp(count, 65535), 1); hipLaunchKernelGGL(( edgesHysteresisGlobalKernel), dim3(grid), dim3(block), 0, stream, map, st1, st2, d_counter, count); cudaSafeCall( hipGetLastError() ); if (stream == NULL) cudaSafeCall( hipDeviceSynchronize() ); cudaSafeCall( hipMemcpyAsync(&count, d_counter, sizeof(int), hipMemcpyDeviceToHost, stream) ); cudaSafeCall( hipStreamSynchronize(stream) ); count = min(count, map.cols * map.rows); //std::swap(st1, st2); short2* tmp = st1; st1 = st2; st2 = tmp; } } } ////////////////////////////////////////////////////////////////////////////////////////// namespace canny { struct GetEdges : unary_function<int, uchar> { __device__ __forceinline__ uchar operator ()(int e) const { return (uchar)(-(e >> 1)); } __host__ __device__ __forceinline__ GetEdges() {} __host__ __device__ __forceinline__ GetEdges(const GetEdges&) {} }; } namespace cv { namespace cuda { namespace device { template <> struct TransformFunctorTraits<canny::GetEdges> : DefaultTransformFunctorTraits<canny::GetEdges> { enum { smart_shift = 4 }; }; }}} namespace canny { void getEdges(PtrStepSzi map, PtrStepSzb dst, hipStream_t stream) { transform(map, dst, GetEdges(), WithOutMask(), stream); } } #endif /* CUDA_DISABLER */
90638ba87256fd666e5023608602b11c0683420a.cu
/*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #if !defined CUDA_DISABLER #include "opencv2/core/cuda/common.hpp" #include "opencv2/core/cuda/emulation.hpp" #include "opencv2/core/cuda/transform.hpp" #include "opencv2/core/cuda/functional.hpp" #include "opencv2/core/cuda/utility.hpp" #include "opencv2/core/cuda.hpp" using namespace cv::cuda; using namespace cv::cuda::device; namespace canny { struct L1 : binary_function<int, int, float> { __device__ __forceinline__ float operator ()(int x, int y) const { return ::abs(x) + ::abs(y); } __host__ __device__ __forceinline__ L1() {} __host__ __device__ __forceinline__ L1(const L1&) {} }; struct L2 : binary_function<int, int, float> { __device__ __forceinline__ float operator ()(int x, int y) const { return ::sqrtf(x * x + y * y); } __host__ __device__ __forceinline__ L2() {} __host__ __device__ __forceinline__ L2(const L2&) {} }; } namespace cv { namespace cuda { namespace device { template <> struct TransformFunctorTraits<canny::L1> : DefaultTransformFunctorTraits<canny::L1> { enum { smart_shift = 4 }; }; template <> struct TransformFunctorTraits<canny::L2> : DefaultTransformFunctorTraits<canny::L2> { enum { smart_shift = 4 }; }; }}} namespace canny { struct SrcTex { virtual ~SrcTex() {} __host__ SrcTex(int _xoff, int _yoff) : xoff(_xoff), yoff(_yoff) {} __device__ __forceinline__ virtual int operator ()(int y, int x) const = 0; int xoff; int yoff; }; texture<uchar, cudaTextureType2D, cudaReadModeElementType> tex_src(false, cudaFilterModePoint, cudaAddressModeClamp); struct SrcTexRef : SrcTex { __host__ SrcTexRef(int _xoff, int _yoff) : SrcTex(_xoff, _yoff) {} __device__ __forceinline__ int operator ()(int y, int x) const override { return tex2D(tex_src, x + xoff, y + yoff); } }; struct SrcTexObj : SrcTex { __host__ SrcTexObj(int _xoff, int _yoff, cudaTextureObject_t _tex_src_object) : SrcTex(_xoff, _yoff), tex_src_object(_tex_src_object) { } __device__ __forceinline__ int operator ()(int y, int x) const override { return tex2D<uchar>(tex_src_object, x + xoff, y + yoff); } cudaTextureObject_t tex_src_object; }; template < class T, class Norm, typename = typename std::enable_if<std::is_base_of<SrcTex, T>::value>::type > __global__ void calcMagnitudeKernel(const T src, PtrStepi dx, PtrStepi dy, PtrStepSzf mag, const Norm norm) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (y >= mag.rows || x >= mag.cols) return; int dxVal = (src(y - 1, x + 1) + 2 * src(y, x + 1) + src(y + 1, x + 1)) - (src(y - 1, x - 1) + 2 * src(y, x - 1) + src(y + 1, x - 1)); int dyVal = (src(y + 1, x - 1) + 2 * src(y + 1, x) + src(y + 1, x + 1)) - (src(y - 1, x - 1) + 2 * src(y - 1, x) + src(y - 1, x + 1)); dx(y, x) = dxVal; dy(y, x) = dyVal; mag(y, x) = norm(dxVal, dyVal); } void calcMagnitude(PtrStepSzb srcWhole, int xoff, int yoff, PtrStepSzi dx, PtrStepSzi dy, PtrStepSzf mag, bool L2Grad, cudaStream_t stream) { const dim3 block(16, 16); const dim3 grid(divUp(mag.cols, block.x), divUp(mag.rows, block.y)); bool cc30 = deviceSupports(FEATURE_SET_COMPUTE_30); if (cc30) { cudaTextureDesc texDesc; memset(&texDesc, 0, sizeof(texDesc)); texDesc.addressMode[0] = cudaAddressModeClamp; texDesc.addressMode[1] = cudaAddressModeClamp; texDesc.addressMode[2] = cudaAddressModeClamp; cudaTextureObject_t tex = 0; createTextureObjectPitch2D(&tex, srcWhole, texDesc); SrcTexObj src(xoff, yoff, tex); if (L2Grad) { L2 norm; calcMagnitudeKernel<<<grid, block, 0, stream>>>(src, dx, dy, mag, norm); } else { L1 norm; calcMagnitudeKernel<<<grid, block, 0, stream>>>(src, dx, dy, mag, norm); } cudaSafeCall( cudaGetLastError() ); if (stream == NULL) cudaSafeCall( cudaDeviceSynchronize() ); else cudaSafeCall( cudaStreamSynchronize(stream) ); cudaSafeCall( cudaDestroyTextureObject(tex) ); } else { bindTexture(&tex_src, srcWhole); SrcTexRef src(xoff, yoff); if (L2Grad) { L2 norm; calcMagnitudeKernel<<<grid, block, 0, stream>>>(src, dx, dy, mag, norm); } else { L1 norm; calcMagnitudeKernel<<<grid, block, 0, stream>>>(src, dx, dy, mag, norm); } cudaSafeCall( cudaGetLastError() ); if (stream == NULL) cudaSafeCall( cudaDeviceSynchronize() ); } } void calcMagnitude(PtrStepSzi dx, PtrStepSzi dy, PtrStepSzf mag, bool L2Grad, cudaStream_t stream) { if (L2Grad) { L2 norm; transform(dx, dy, mag, norm, WithOutMask(), stream); } else { L1 norm; transform(dx, dy, mag, norm, WithOutMask(), stream); } } } ////////////////////////////////////////////////////////////////////////////////////////// namespace canny { texture<float, cudaTextureType2D, cudaReadModeElementType> tex_mag(false, cudaFilterModePoint, cudaAddressModeClamp); __global__ void calcMapKernel(const PtrStepSzi dx, const PtrStepi dy, PtrStepi map, const float low_thresh, const float high_thresh) { const int CANNY_SHIFT = 15; const int TG22 = (int)(0.4142135623730950488016887242097*(1<<CANNY_SHIFT) + 0.5); const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x == 0 || x >= dx.cols - 1 || y == 0 || y >= dx.rows - 1) return; int dxVal = dx(y, x); int dyVal = dy(y, x); const int s = (dxVal ^ dyVal) < 0 ? -1 : 1; const float m = tex2D(tex_mag, x, y); dxVal = ::abs(dxVal); dyVal = ::abs(dyVal); // 0 - the pixel can not belong to an edge // 1 - the pixel might belong to an edge // 2 - the pixel does belong to an edge int edge_type = 0; if (m > low_thresh) { const int tg22x = dxVal * TG22; const int tg67x = tg22x + ((dxVal + dxVal) << CANNY_SHIFT); dyVal <<= CANNY_SHIFT; if (dyVal < tg22x) { if (m > tex2D(tex_mag, x - 1, y) && m >= tex2D(tex_mag, x + 1, y)) edge_type = 1 + (int)(m > high_thresh); } else if(dyVal > tg67x) { if (m > tex2D(tex_mag, x, y - 1) && m >= tex2D(tex_mag, x, y + 1)) edge_type = 1 + (int)(m > high_thresh); } else { if (m > tex2D(tex_mag, x - s, y - 1) && m >= tex2D(tex_mag, x + s, y + 1)) edge_type = 1 + (int)(m > high_thresh); } } map(y, x) = edge_type; } __global__ void calcMapKernel(const PtrStepSzi dx, const PtrStepi dy, PtrStepi map, const float low_thresh, const float high_thresh, cudaTextureObject_t tex_mag) { const int CANNY_SHIFT = 15; const int TG22 = (int)(0.4142135623730950488016887242097*(1<<CANNY_SHIFT) + 0.5); const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x == 0 || x >= dx.cols - 1 || y == 0 || y >= dx.rows - 1) return; int dxVal = dx(y, x); int dyVal = dy(y, x); const int s = (dxVal ^ dyVal) < 0 ? -1 : 1; const float m = tex2D<float>(tex_mag, x, y); dxVal = ::abs(dxVal); dyVal = ::abs(dyVal); // 0 - the pixel can not belong to an edge // 1 - the pixel might belong to an edge // 2 - the pixel does belong to an edge int edge_type = 0; if (m > low_thresh) { const int tg22x = dxVal * TG22; const int tg67x = tg22x + ((dxVal + dxVal) << CANNY_SHIFT); dyVal <<= CANNY_SHIFT; if (dyVal < tg22x) { if (m > tex2D<float>(tex_mag, x - 1, y) && m >= tex2D<float>(tex_mag, x + 1, y)) edge_type = 1 + (int)(m > high_thresh); } else if(dyVal > tg67x) { if (m > tex2D<float>(tex_mag, x, y - 1) && m >= tex2D<float>(tex_mag, x, y + 1)) edge_type = 1 + (int)(m > high_thresh); } else { if (m > tex2D<float>(tex_mag, x - s, y - 1) && m >= tex2D<float>(tex_mag, x + s, y + 1)) edge_type = 1 + (int)(m > high_thresh); } } map(y, x) = edge_type; } void calcMap(PtrStepSzi dx, PtrStepSzi dy, PtrStepSzf mag, PtrStepSzi map, float low_thresh, float high_thresh, cudaStream_t stream) { const dim3 block(16, 16); const dim3 grid(divUp(dx.cols, block.x), divUp(dx.rows, block.y)); if (deviceSupports(FEATURE_SET_COMPUTE_30)) { // Use the texture object cudaResourceDesc resDesc; memset(&resDesc, 0, sizeof(resDesc)); resDesc.resType = cudaResourceTypePitch2D; resDesc.res.pitch2D.devPtr = mag.ptr(); resDesc.res.pitch2D.height = mag.rows; resDesc.res.pitch2D.width = mag.cols; resDesc.res.pitch2D.pitchInBytes = mag.step; resDesc.res.pitch2D.desc = cudaCreateChannelDesc<float>(); cudaTextureDesc texDesc; memset(&texDesc, 0, sizeof(texDesc)); texDesc.addressMode[0] = cudaAddressModeClamp; texDesc.addressMode[1] = cudaAddressModeClamp; texDesc.addressMode[2] = cudaAddressModeClamp; cudaTextureObject_t tex=0; cudaCreateTextureObject(&tex, &resDesc, &texDesc, NULL); calcMapKernel<<<grid, block, 0, stream>>>(dx, dy, map, low_thresh, high_thresh, tex); cudaSafeCall( cudaGetLastError() ); if (stream == NULL) cudaSafeCall( cudaDeviceSynchronize() ); else cudaSafeCall( cudaStreamSynchronize(stream) ); cudaSafeCall( cudaDestroyTextureObject(tex) ); } else { // Use the texture reference bindTexture(&tex_mag, mag); calcMapKernel<<<grid, block, 0, stream>>>(dx, dy, map, low_thresh, high_thresh); cudaSafeCall( cudaGetLastError() ); if (stream == NULL) cudaSafeCall( cudaDeviceSynchronize() ); } } } ////////////////////////////////////////////////////////////////////////////////////////// namespace canny { __device__ __forceinline__ bool checkIdx(int y, int x, int rows, int cols) { return (y >= 0) && (y < rows) && (x >= 0) && (x < cols); } __global__ void edgesHysteresisLocalKernel(PtrStepSzi map, short2* st, int* d_counter) { __shared__ volatile int smem[18][18]; const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; smem[threadIdx.y + 1][threadIdx.x + 1] = checkIdx(y, x, map.rows, map.cols) ? map(y, x) : 0; if (threadIdx.y == 0) smem[0][threadIdx.x + 1] = checkIdx(y - 1, x, map.rows, map.cols) ? map(y - 1, x) : 0; if (threadIdx.y == blockDim.y - 1) smem[blockDim.y + 1][threadIdx.x + 1] = checkIdx(y + 1, x, map.rows, map.cols) ? map(y + 1, x) : 0; if (threadIdx.x == 0) smem[threadIdx.y + 1][0] = checkIdx(y, x - 1, map.rows, map.cols) ? map(y, x - 1) : 0; if (threadIdx.x == blockDim.x - 1) smem[threadIdx.y + 1][blockDim.x + 1] = checkIdx(y, x + 1, map.rows, map.cols) ? map(y, x + 1) : 0; if (threadIdx.x == 0 && threadIdx.y == 0) smem[0][0] = checkIdx(y - 1, x - 1, map.rows, map.cols) ? map(y - 1, x - 1) : 0; if (threadIdx.x == blockDim.x - 1 && threadIdx.y == 0) smem[0][blockDim.x + 1] = checkIdx(y - 1, x + 1, map.rows, map.cols) ? map(y - 1, x + 1) : 0; if (threadIdx.x == 0 && threadIdx.y == blockDim.y - 1) smem[blockDim.y + 1][0] = checkIdx(y + 1, x - 1, map.rows, map.cols) ? map(y + 1, x - 1) : 0; if (threadIdx.x == blockDim.x - 1 && threadIdx.y == blockDim.y - 1) smem[blockDim.y + 1][blockDim.x + 1] = checkIdx(y + 1, x + 1, map.rows, map.cols) ? map(y + 1, x + 1) : 0; __syncthreads(); if (x >= map.cols || y >= map.rows) return; int n; #pragma unroll for (int k = 0; k < 16; ++k) { n = 0; if (smem[threadIdx.y + 1][threadIdx.x + 1] == 1) { n += smem[threadIdx.y ][threadIdx.x ] == 2; n += smem[threadIdx.y ][threadIdx.x + 1] == 2; n += smem[threadIdx.y ][threadIdx.x + 2] == 2; n += smem[threadIdx.y + 1][threadIdx.x ] == 2; n += smem[threadIdx.y + 1][threadIdx.x + 2] == 2; n += smem[threadIdx.y + 2][threadIdx.x ] == 2; n += smem[threadIdx.y + 2][threadIdx.x + 1] == 2; n += smem[threadIdx.y + 2][threadIdx.x + 2] == 2; } __syncthreads(); if (n > 0) smem[threadIdx.y + 1][threadIdx.x + 1] = 2; __syncthreads(); } const int e = smem[threadIdx.y + 1][threadIdx.x + 1]; map(y, x) = e; n = 0; if (e == 2) { n += smem[threadIdx.y ][threadIdx.x ] == 1; n += smem[threadIdx.y ][threadIdx.x + 1] == 1; n += smem[threadIdx.y ][threadIdx.x + 2] == 1; n += smem[threadIdx.y + 1][threadIdx.x ] == 1; n += smem[threadIdx.y + 1][threadIdx.x + 2] == 1; n += smem[threadIdx.y + 2][threadIdx.x ] == 1; n += smem[threadIdx.y + 2][threadIdx.x + 1] == 1; n += smem[threadIdx.y + 2][threadIdx.x + 2] == 1; } if (n > 0) { const int ind = ::atomicAdd(d_counter, 1); st[ind] = make_short2(x, y); } } void edgesHysteresisLocal(PtrStepSzi map, short2* st1, int* d_counter, cudaStream_t stream) { cudaSafeCall( cudaMemsetAsync(d_counter, 0, sizeof(int), stream) ); const dim3 block(16, 16); const dim3 grid(divUp(map.cols, block.x), divUp(map.rows, block.y)); edgesHysteresisLocalKernel<<<grid, block, 0, stream>>>(map, st1, d_counter); cudaSafeCall( cudaGetLastError() ); if (stream == NULL) cudaSafeCall( cudaDeviceSynchronize() ); } } ////////////////////////////////////////////////////////////////////////////////////////// namespace canny { __constant__ int c_dx[8] = {-1, 0, 1, -1, 1, -1, 0, 1}; __constant__ int c_dy[8] = {-1, -1, -1, 0, 0, 1, 1, 1}; __global__ void edgesHysteresisGlobalKernel(PtrStepSzi map, short2* st1, short2* st2, int* d_counter, const int count) { const int stack_size = 512; __shared__ int s_counter; __shared__ int s_ind; __shared__ short2 s_st[stack_size]; if (threadIdx.x == 0) s_counter = 0; __syncthreads(); int ind = blockIdx.y * gridDim.x + blockIdx.x; if (ind >= count) return; short2 pos = st1[ind]; if (threadIdx.x < 8) { pos.x += c_dx[threadIdx.x]; pos.y += c_dy[threadIdx.x]; if (pos.x > 0 && pos.x < map.cols - 1 && pos.y > 0 && pos.y < map.rows - 1 && map(pos.y, pos.x) == 1) { map(pos.y, pos.x) = 2; ind = Emulation::smem::atomicAdd(&s_counter, 1); s_st[ind] = pos; } } __syncthreads(); while (s_counter > 0 && s_counter <= stack_size - blockDim.x) { const int subTaskIdx = threadIdx.x >> 3; const int portion = ::min(s_counter, blockDim.x >> 3); if (subTaskIdx < portion) pos = s_st[s_counter - 1 - subTaskIdx]; __syncthreads(); if (threadIdx.x == 0) s_counter -= portion; __syncthreads(); if (subTaskIdx < portion) { pos.x += c_dx[threadIdx.x & 7]; pos.y += c_dy[threadIdx.x & 7]; if (pos.x > 0 && pos.x < map.cols - 1 && pos.y > 0 && pos.y < map.rows - 1 && map(pos.y, pos.x) == 1) { map(pos.y, pos.x) = 2; ind = Emulation::smem::atomicAdd(&s_counter, 1); s_st[ind] = pos; } } __syncthreads(); } if (s_counter > 0) { if (threadIdx.x == 0) { s_ind = ::atomicAdd(d_counter, s_counter); if (s_ind + s_counter > map.cols * map.rows) s_counter = 0; } __syncthreads(); ind = s_ind; for (int i = threadIdx.x; i < s_counter; i += blockDim.x) st2[ind + i] = s_st[i]; } } void edgesHysteresisGlobal(PtrStepSzi map, short2* st1, short2* st2, int* d_counter, cudaStream_t stream) { int count; cudaSafeCall( cudaMemcpyAsync(&count, d_counter, sizeof(int), cudaMemcpyDeviceToHost, stream) ); cudaSafeCall( cudaStreamSynchronize(stream) ); while (count > 0) { cudaSafeCall( cudaMemsetAsync(d_counter, 0, sizeof(int), stream) ); const dim3 block(128); const dim3 grid(::min(count, 65535u), divUp(count, 65535), 1); edgesHysteresisGlobalKernel<<<grid, block, 0, stream>>>(map, st1, st2, d_counter, count); cudaSafeCall( cudaGetLastError() ); if (stream == NULL) cudaSafeCall( cudaDeviceSynchronize() ); cudaSafeCall( cudaMemcpyAsync(&count, d_counter, sizeof(int), cudaMemcpyDeviceToHost, stream) ); cudaSafeCall( cudaStreamSynchronize(stream) ); count = min(count, map.cols * map.rows); //std::swap(st1, st2); short2* tmp = st1; st1 = st2; st2 = tmp; } } } ////////////////////////////////////////////////////////////////////////////////////////// namespace canny { struct GetEdges : unary_function<int, uchar> { __device__ __forceinline__ uchar operator ()(int e) const { return (uchar)(-(e >> 1)); } __host__ __device__ __forceinline__ GetEdges() {} __host__ __device__ __forceinline__ GetEdges(const GetEdges&) {} }; } namespace cv { namespace cuda { namespace device { template <> struct TransformFunctorTraits<canny::GetEdges> : DefaultTransformFunctorTraits<canny::GetEdges> { enum { smart_shift = 4 }; }; }}} namespace canny { void getEdges(PtrStepSzi map, PtrStepSzb dst, cudaStream_t stream) { transform(map, dst, GetEdges(), WithOutMask(), stream); } } #endif /* CUDA_DISABLER */
a0545d7b9caf607b6c11b0b7d52f884c0a94ff07.hip
// !!! This is a file automatically generated by hipify!!! // Copyright (c) 2020 Michael Koesel and respective contributors // SPDX-License-Identifier: MIT // See accompanying LICENSE file for detailed information #include "dogm/common.h" #include "dogm/cuda_utils.h" #include "dogm/dogm_types.h" #include "dogm/kernel/update_persistent_particles.h" #include <hip/hip_runtime.h> #include <device_launch_parameters.h> namespace dogm { __device__ float calc_norm_assoc(float occ_accum, float rho_p) { return occ_accum > 0.0f ? rho_p / occ_accum : 0.0f; } __device__ float calc_norm_unassoc(const GridCell& grid_cell) { float occ_mass = grid_cell.occ_mass; return occ_mass > 0.0 ? grid_cell.pers_occ_mass / occ_mass : 0.0; } __device__ void set_normalization_components(GridCell* __restrict__ grid_cell_array, int i, float mu_A, float mu_UA) { grid_cell_array[i].mu_A = mu_A; grid_cell_array[i].mu_UA = mu_UA; } __device__ float update_unnorm(const ParticlesSoA& particle_array, int i, const MeasurementCell* __restrict__ meas_cell_array) { return meas_cell_array[particle_array.grid_cell_idx[i]].likelihood * particle_array.weight[i]; } __device__ float normalize(const ParticlesSoA& particle, int i, const GridCell* __restrict__ grid_cell_array, const MeasurementCell* __restrict__ meas_cell_array, float weight) { const int cell_idx = particle.grid_cell_idx[i]; const GridCell& cell = grid_cell_array[cell_idx]; const MeasurementCell& meas_cell = meas_cell_array[cell_idx]; return meas_cell.p_A * cell.mu_A * weight + (1.0f - meas_cell.p_A) * cell.mu_UA * particle.weight[i]; } __global__ void updatePersistentParticlesKernel1(const ParticlesSoA particle_array, const MeasurementCell* __restrict__ meas_cell_array, float* __restrict__ weight_array, int particle_count) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < particle_count; i += blockDim.x * gridDim.x) { weight_array[i] = update_unnorm(particle_array, i, meas_cell_array); } } __global__ void updatePersistentParticlesKernel2(GridCell* __restrict__ grid_cell_array, const float* __restrict__ weight_array_accum, int cell_count) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < cell_count; i += blockDim.x * gridDim.x) { int start_idx = grid_cell_array[i].start_idx; int end_idx = grid_cell_array[i].end_idx; if (start_idx != -1) { float m_occ_accum = subtract(weight_array_accum, start_idx, end_idx); float rho_p = grid_cell_array[i].pers_occ_mass; float mu_A = calc_norm_assoc(m_occ_accum, rho_p); float mu_UA = calc_norm_unassoc(grid_cell_array[i]); set_normalization_components(grid_cell_array, i, mu_A, mu_UA); // printf("mu_A: %f, mu_UA: %f\n", mu_A, mu_UA); } } } __global__ void updatePersistentParticlesKernel3(const ParticlesSoA particle_array, const MeasurementCell* __restrict__ meas_cell_array, const GridCell* __restrict__ grid_cell_array, float* __restrict__ weight_array, int particle_count) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < particle_count; i += blockDim.x * gridDim.x) { weight_array[i] = normalize(particle_array, i, grid_cell_array, meas_cell_array, weight_array[i]); } } } /* namespace dogm */
a0545d7b9caf607b6c11b0b7d52f884c0a94ff07.cu
// Copyright (c) 2020 Michael Koesel and respective contributors // SPDX-License-Identifier: MIT // See accompanying LICENSE file for detailed information #include "dogm/common.h" #include "dogm/cuda_utils.h" #include "dogm/dogm_types.h" #include "dogm/kernel/update_persistent_particles.h" #include <cuda_runtime.h> #include <device_launch_parameters.h> namespace dogm { __device__ float calc_norm_assoc(float occ_accum, float rho_p) { return occ_accum > 0.0f ? rho_p / occ_accum : 0.0f; } __device__ float calc_norm_unassoc(const GridCell& grid_cell) { float occ_mass = grid_cell.occ_mass; return occ_mass > 0.0 ? grid_cell.pers_occ_mass / occ_mass : 0.0; } __device__ void set_normalization_components(GridCell* __restrict__ grid_cell_array, int i, float mu_A, float mu_UA) { grid_cell_array[i].mu_A = mu_A; grid_cell_array[i].mu_UA = mu_UA; } __device__ float update_unnorm(const ParticlesSoA& particle_array, int i, const MeasurementCell* __restrict__ meas_cell_array) { return meas_cell_array[particle_array.grid_cell_idx[i]].likelihood * particle_array.weight[i]; } __device__ float normalize(const ParticlesSoA& particle, int i, const GridCell* __restrict__ grid_cell_array, const MeasurementCell* __restrict__ meas_cell_array, float weight) { const int cell_idx = particle.grid_cell_idx[i]; const GridCell& cell = grid_cell_array[cell_idx]; const MeasurementCell& meas_cell = meas_cell_array[cell_idx]; return meas_cell.p_A * cell.mu_A * weight + (1.0f - meas_cell.p_A) * cell.mu_UA * particle.weight[i]; } __global__ void updatePersistentParticlesKernel1(const ParticlesSoA particle_array, const MeasurementCell* __restrict__ meas_cell_array, float* __restrict__ weight_array, int particle_count) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < particle_count; i += blockDim.x * gridDim.x) { weight_array[i] = update_unnorm(particle_array, i, meas_cell_array); } } __global__ void updatePersistentParticlesKernel2(GridCell* __restrict__ grid_cell_array, const float* __restrict__ weight_array_accum, int cell_count) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < cell_count; i += blockDim.x * gridDim.x) { int start_idx = grid_cell_array[i].start_idx; int end_idx = grid_cell_array[i].end_idx; if (start_idx != -1) { float m_occ_accum = subtract(weight_array_accum, start_idx, end_idx); float rho_p = grid_cell_array[i].pers_occ_mass; float mu_A = calc_norm_assoc(m_occ_accum, rho_p); float mu_UA = calc_norm_unassoc(grid_cell_array[i]); set_normalization_components(grid_cell_array, i, mu_A, mu_UA); // printf("mu_A: %f, mu_UA: %f\n", mu_A, mu_UA); } } } __global__ void updatePersistentParticlesKernel3(const ParticlesSoA particle_array, const MeasurementCell* __restrict__ meas_cell_array, const GridCell* __restrict__ grid_cell_array, float* __restrict__ weight_array, int particle_count) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < particle_count; i += blockDim.x * gridDim.x) { weight_array[i] = normalize(particle_array, i, grid_cell_array, meas_cell_array, weight_array[i]); } } } /* namespace dogm */
6f0ac612deb7db40078ac41a5c09d0c43375c768.hip
// !!! This is a file automatically generated by hipify!!! /* This version use neighbor_e and neighbor_o to stroe the negibhor indices of even sites and odd site.*/ #include <stdio.h> #include <stdlib.h> #include <omp.h> #include <math.h> #include <string.h> #include <hip/hip_runtime.h> #include <rocblas.h> void FPRINTF(FILE*, int N, double*); double EVALUATE_ERROR(int, int, double, double*); __global__ void INITIALIZE(int N, double dx, int* L_e, int* L_o, int* R_e, int* R_o, int* D_e, int* D_o, int* U_e, int* U_o, double* rho_even, double* rho_odd, double* rho, double* field_even, double* field_odd, double* field_analytic) { extern __shared__ double sm[]; int idx_x = threadIdx.x + blockIdx.x*blockDim.x; int idx_y = threadIdx.y + blockIdx.y*blockDim.y; int idx = idx_x + idx_y*N/2; int idx_sm = threadIdx.x + blockDim.x*threadIdx.y; int idx_site_e, idx_site_o; sm[idx_sm] = 0.0; int site_x_e = (2*idx)%N; int site_y_e = (2*idx)/N; int parity_e = (site_x_e+site_y_e)%2; site_x_e += parity_e; double x_e = site_x_e*dx; double y_e = site_y_e*dx; idx_site_e = site_x_e + N*site_y_e; int site_x_o = (2*idx)%N; int site_y_o = (2*idx)/N; int parity_o = (site_x_o+site_y_o+1)%2; site_x_o += parity_o; double x_o = site_x_o*dx; double y_o = site_y_o*dx; idx_site_o = site_x_o + N*site_y_o; field_analytic[idx_site_e] = x_e*(1.-x_e)*y_e*(1.-y_e)*exp(x_e-y_e); if ( site_x_e==0 || site_x_e==N-1 || site_y_e==0 || site_y_e==N-1 ) { field_even[idx] = field_analytic[idx_site_e]; rho[idx_site_e] = 0.0; rho_even[idx] = 0.0; } else { field_even[idx] = 0.0; rho[idx_site_e] = 2.*x_e*(y_e-1)*(y_e-2.*x_e+x_e*y_e+2)*exp(x_e-y_e); rho_even[idx] = rho[idx_site_e]; // field_even[idx] = field_analytic[idx_site_o]; } field_analytic[idx_site_o] = x_o*(1.-x_o)*y_o*(1.-y_o)*exp(x_o-y_o); if ( site_x_o==0 || site_x_o==N-1 || site_y_o==0 || site_y_o==N-1 ) { field_odd[idx] = field_analytic[idx_site_o]; rho[idx_site_o] = 0.0; rho_odd[idx] = 0.0; } else { field_odd[idx] = 0.0; rho[idx_site_o]= 2.*x_o*(y_o-1)*(y_o-2.*x_o+x_o*y_o+2)*exp(x_o-y_o); rho_odd[idx] = rho[idx_site_o]; // field_odd[idx] = field_analytic[idx_site_o]; } // construct neighbors for even sites int site_x = idx%(N/2); int site_y = idx/(N/2); if ( (idx>N/2-1)&&(idx<(N*N)/2-N/2)) { if (site_y%2==0) { if (site_x!=0) { int L = site_x-1 + site_y*(N/2); int R = idx; int D = site_x + (site_y-1)*(N/2); int U = site_x + (site_y+1)*(N/2); L_e[idx] = L; R_e[idx] = R; D_e[idx] = D; U_e[idx] = U; // printf("%d\t%d\t%d\t%d\t%d\n", idx, L, R, U, D); } if (site_x!=(N/2)-1) { int L = idx; int R = site_x +1 + site_y*(N/2); int D = site_x + (site_y-1)*(N/2); int U = site_x + (site_y+1)*(N/2); L_o[idx] = L; R_o[idx] = R; D_o[idx] = D; U_o[idx] = U; // printf("%d\t%d\t%d\t%d\t%d\n", idx, L, R, U, D); } } else { if (site_x!=(N/2)-1) { int L = idx; int R = site_x+1 + site_y*(N/2); int D = site_x + (site_y-1)*(N/2); int U = site_x + (site_y_e+1)*(N/2); L_e[idx] = L; R_e[idx] = R; D_e[idx] = D; U_e[idx] = U; // printf("%d\t%d\t%d\t%d\t%d\n", idx, L, R, U, D); } if (site_x!=0) { int L = site_x-1 + site_y*(N/2); int R = idx; int D = site_x + (site_y-1)*(N/2); int U = site_x + (site_y+1)*(N/2); L_o[idx] = L; R_o[idx] = R; D_o[idx] = D; U_o[idx] = U; // printf("%d\t%d\t%d\t%d\t%d\n", idx, L, R, U, D); } } } else { L_e[idx] = 0; R_e[idx] = 0; U_e[idx] = 0; D_e[idx] = 0; L_o[idx] = 0; R_o[idx] = 0; U_o[idx] = 0; D_o[idx] = 0; } } __global__ void SOR_SOLVER_EVEN(int N, double dx, double omega, int* L_e, int* R_e, int* D_e, int* U_e, double* field_even, double* field_odd, double* rho_even) { int idx_x = threadIdx.x + blockIdx.x*blockDim.x; int idx_y = threadIdx.y + blockIdx.y*blockDim.y; int idx = idx_x + idx_y*N/2; int site_x = idx%(N/2); int site_y = idx/(N/2); if ( (idx>N/2-1)&&(idx<(N*N)/2-N/2)) { if (site_y%2==0) { if (site_x!=0) { field_even[idx] += 0.25*omega*( field_odd[L_e[idx]] + field_odd[R_e[idx]] + field_odd[U_e[idx]] + field_odd[D_e[idx]] - dx*dx*rho_even[idx] - 4.*field_even[idx]); } } else { if (site_x!=(N/2)-1) { field_even[idx] += 0.25*omega*( field_odd[L_e[idx]] + field_odd[R_e[idx]] + field_odd[U_e[idx]] + field_odd[D_e[idx]] - dx*dx*rho_even[idx] - 4.*field_even[idx]); } } } __syncthreads(); } __global__ void SOR_SOLVER_ODD(int N, double dx, double omega, int* L_o, int* R_o, int* D_o, int* U_o, double* field_even, double* field_odd, double* rho_odd) { int idx_x = threadIdx.x + blockIdx.x*blockDim.x; int idx_y = threadIdx.y + blockIdx.y*blockDim.y; int idx = idx_x + idx_y*N/2; int site_x = idx%(N/2); int site_y = idx/(N/2); if ( (idx>N/2-1)&&(idx<(N*N)/2-N/2)) { if (site_y%2==0) { if (site_x!=(N/2)-1) { field_odd[idx] += 0.25*omega*( field_even[L_o[idx]] + field_even[R_o[idx]] + field_even[U_o[idx]] + field_even[D_o[idx]] - dx*dx*rho_odd[idx] - 4.*field_odd[idx]); } } else { if (site_x!=0) { field_odd[idx] += 0.25*omega*( field_even[L_o[idx]] + field_even[R_o[idx]] + field_even[U_o[idx]] + field_even[D_o[idx]] - dx*dx*rho_odd[idx] - 4.*field_odd[idx]); } } } __syncthreads(); } __global__ void ERROR(int N, double dx, int* L_e, int* L_o, int* R_e, int* R_o, int* D_e, int* D_o, int* U_e, int* U_o, double* rho_even, double* rho_odd, double* field_even, double* field_odd, double *error_block) { extern __shared__ double sm[]; int idx_x = threadIdx.x + blockIdx.x*blockDim.x; int idx_y = threadIdx.y + blockIdx.y*blockDim.y; int idx = idx_x + idx_y*N/2; int idx_sm = threadIdx.x + blockDim.x*threadIdx.y; sm[idx_sm] = 0.0; int site_x = idx%(N/2); int site_y = idx/(N/2); if ( (idx>N/2-1)&&(idx<(N*N)/2-N/2)) { if (site_y%2==0) { if (site_x!=0) sm[idx_sm] += pow((field_odd[L_e[idx]]+field_odd[R_e[idx]]+field_odd[D_e[idx]]+field_odd[U_e[idx]]-4.0*field_even[idx])/dx/dx-rho_even[idx], 2.0); if (site_x!=(N/2)-1) sm[idx_sm] += pow((field_even[L_o[idx]]+field_even[R_o[idx]]+field_even[D_o[idx]]+field_even[U_o[idx]]-4.0*field_odd[idx])/dx/dx-rho_odd[idx], 2.0); } else { if (site_x!=(N/2)-1) sm[idx_sm] += pow((field_odd[L_e[idx]]+field_odd[R_e[idx]]+field_odd[D_e[idx]]+field_odd[U_e[idx]]-4.0*field_even[idx])/dx/dx-rho_even[idx], 2.0); if (site_x!=0) sm[idx_sm] += pow((field_even[L_o[idx]]+field_even[R_o[idx]]+field_even[D_o[idx]]+field_even[U_o[idx]]-4.0*field_odd[idx])/dx/dx-rho_odd[idx], 2.0); } } __syncthreads(); for (int shift=blockDim.x*blockDim.y/2; shift>0; shift/=2) { if (idx_sm<shift) sm[idx_sm] += sm[idx_sm+shift]; __syncthreads(); } if (idx_sm==0) error_block[blockIdx.x+blockIdx.y*gridDim.x] = sm[0]; } int main(void) { int N, N_threads, N_block, display_interval, tpb_x, tpb_y, bpg_x, bpg_y; // int N, N_threads, display_interval, tpb, bpg; float preparation_time, computation_time, total_time; double omega, dx, criteria; long iter, iter_max; double *field_even, *field_odd, *rho_even, *rho_odd, *field_final, *field_analytic, *rho, *error_block; int **neighbor_even, **neighbor_odd; size_t size_lattice, size_sm; hipEvent_t start, stop; FILE* output_field, *output_rho; printf("Solve the Poission problem using SOR by OpenMP.\n\n"); printf("Enter the latttice size (N,N) (N must be divisible by 2)."); scanf("%d", &N); printf("The lattice size is (%d,%d).\n", N, N); printf("Set the value of omega.\n"); scanf("%lf",&omega); printf("The value of omega is %.4f .\n", omega); printf("Set the maximum iteration times.\n"); scanf("%ld", &iter_max); printf("The maximum iteration times is %ld .\n", iter_max); printf("Set the stopping criteria.\n"); scanf("%lf", &criteria); printf("The stopping criteria is %.4e .\n", criteria); printf("Set the display interval during iterations.\n"); scanf("%d", &display_interval); printf("The display interval is set to be %d .\n", display_interval); // printf("Set the number of OpenMP threads.\n"); // scanf("%d", &N_threads); // printf("The number of OpenMP threads is %d .\n", N_threads); printf("Set the GPU threads per block (tx,ty). (N/2 must be divisible by tx and N must be divisible by N)\n"); scanf("%d %d", &tpb_x, &tpb_y); if ((N/2)%tpb_x!=0) { printf("N/2 is not divisible by tx! Exit!\n"); return EXIT_FAILURE; } else if (N%tpb_y!=0) { printf("N is not divisible by ty! Exit!\n"); return EXIT_FAILURE; } else { printf("Threads per block for GPU is (%d,%d) .\n", tpb_x, tpb_y); printf("The block per grid will be set automatically."); bpg_x = (N/2)/tpb_x; bpg_y = N/tpb_y; printf("Blocks per grid for GPU is (%d,%d) .\n", bpg_x, bpg_y); } printf("Set the number of OpenMP threads.\n"); scanf("%d", &N_threads); printf("The number of OpenMP threads is %d.\n", N_threads); printf("\n"); printf("Start Preparation...\n"); hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start,0); dx = 1./(N-1); N_block = (N/2)/tpb_x*(N/tpb_y); size_lattice = N*N*sizeof(double); size_sm = tpb_x*tpb_y*sizeof(double); field_final = (double*)malloc(N*N*sizeof(double)); neighbor_even = (int**)malloc(4*sizeof(int*)); neighbor_odd = (int**)malloc(4*sizeof(int*)); output_field = fopen("analytical_field_distribution_allocate.txt","w"); output_rho = fopen("charge_distribution_allocate.txt","w"); hipSetDevice(0); dim3 tpb(tpb_x,tpb_y); dim3 bpg(bpg_x,bpg_y); cublasMath_t mode = CUBLAS_TENSOR_OP_MATH; hipblasPointerMode_t mode_pt = HIPBLAS_POINTER_MODE_HOST; hipblasHandle_t handle; hipblasCreate(&handle); cublasSetMathMode(handle, mode); hipblasSetPointerMode(handle, mode_pt); hipMallocManaged(&field_even, size_lattice/2); hipMallocManaged(&field_odd, size_lattice/2); hipMallocManaged(&field_analytic, size_lattice); hipMallocManaged(&rho_even, size_lattice/2); hipMallocManaged(&rho_odd, size_lattice/2); hipMallocManaged(&rho, size_lattice); hipMallocManaged(&error_block, N_block*sizeof(double)); // construct neighbor index for (int i=0; i<4; i++) { hipMallocManaged(&neighbor_even[i], (N*N)/2*sizeof(int)); hipMallocManaged(&neighbor_odd[i], (N*N)/2*sizeof(int)); } // hipLaunchKernelGGL(( INITIALIZE), dim3(bpg),dim3(tpb),size_sm, 0, N, dx, neighbor_even[0], neighbor_odd[0], neighbor_even[1], neighbor_odd[1], neighbor_even[2],neighbor_odd[2], neighbor_even[3], neighbor_odd[3], rho_even, rho_odd, rho, field_even, field_odd, field_analytic); hipLaunchKernelGGL(( ERROR), dim3(bpg),dim3(tpb),size_sm, 0, N, dx, neighbor_even[0], neighbor_odd[0], neighbor_even[1], neighbor_odd[1], neighbor_even[2], neighbor_odd[2], neighbor_even[3], neighbor_odd[3], rho_even, rho_odd, field_even, field_odd, error_block); hipDeviceSynchronize(); double norm; hipblasDdot(handle, N*N, rho, 1, rho, 1, &norm); norm = sqrt(norm); printf("Norm = %.4e\n", norm); // debug // for (int j=0; j<N; j++) // { // for (int i=0; i<N/2; i++) // printf("%d\t",neighbor_odd[3][i+N/2*j]); // printf("\n"); // } // FPRINTF(output_field, N, field_analytic); FPRINTF(output_rho, N, rho); printf("Preparation ends.\n"); hipEventRecord(stop,0); hipEventSynchronize(stop); hipEventElapsedTime(&preparation_time, start, stop); printf("Total preparation time is %.4f ms.\n\n", preparation_time); hipEventRecord(start,0); double error = EVALUATE_ERROR(N, N_block, norm, error_block); printf("Starts computation with error = %.8e...\n", error); iter = 0; while (error>criteria&&iter<iter_max) { hipLaunchKernelGGL(( SOR_SOLVER_EVEN), dim3(bpg),dim3(tpb), 0, 0, N, dx, omega, neighbor_even[0], neighbor_even[1], neighbor_even[2], neighbor_even[3], field_even, field_odd, rho_even); // hipDeviceSynchronize(); hipLaunchKernelGGL(( SOR_SOLVER_ODD), dim3(bpg),dim3(tpb), 0, 0, N, dx, omega, neighbor_odd[0], neighbor_odd[1], neighbor_odd[2], neighbor_odd[3], field_even, field_odd, rho_odd); // hipDeviceSynchronize(); hipLaunchKernelGGL(( ERROR), dim3(bpg),dim3(tpb),size_sm, 0, N, dx, neighbor_even[0], neighbor_odd[0], neighbor_even[1], neighbor_odd[1], neighbor_even[2], neighbor_odd[2], neighbor_even[3], neighbor_odd[3], rho_even, rho_odd, field_even, field_odd, error_block); hipDeviceSynchronize(); error = EVALUATE_ERROR(N, N_block, norm, error_block); iter += 1; if (iter%display_interval==0) printf("Iteration = %ld , error = %.8e .\n", iter, error); } omp_set_num_threads(N_threads); # pragma omp parallel for for (int i_E=0; i_E<(N*N)/2; i_E++) { int ix = (2*i_E)%N; int iy = (2*i_E)/N; int parity = (ix+iy)%2; ix += parity; field_final[ix+iy*N] = field_even[i_E]; } # pragma omp parallel for for (int i_O=0; i_O<(N*N)/2; i_O++) { int ix = (2*i_O)%N; int iy = (2*i_O)/N; int parity = (ix+iy+1)%2; ix += parity; field_final[ix+iy*N] = field_odd[i_O]; } output_field = fopen("simulated_field_distribution_GPU_allocate.txt","w"); FPRINTF(output_field, N, field_final); hipEventRecord(stop,0); hipEventSynchronize(stop); hipEventElapsedTime(&computation_time, start, stop); printf("Computation time is %.4f ms.\n", computation_time); total_time = preparation_time + computation_time; printf("Total iteration is %ld ; total time is %.4f ms.\n", iter, total_time); free(field_final); free(neighbor_even); free(neighbor_odd); hipFree(field_even); hipFree(field_odd); hipFree(field_analytic); hipFree(rho_even); hipFree(rho_odd); hipFree(rho); hipFree(error_block); fclose(output_field); fclose(output_rho); return EXIT_SUCCESS; } double EVALUATE_ERROR(int N, int N_block, double norm, double* error_block) { double error = 0.0; for (int i=0; i<N_block; i++) error += error_block[i]; return sqrt(error)/norm; } //void LAPLACIAN_SOR(int N, double dx, double omega, double* field_even, double* field_odd, double *rho_even, double *rho_odd, int **neighbor_even, int **neighbor_odd) //{ //# pragma omp parallel for // for (int i_E=N/2; i_E<(N*N)/2-N/2; i_E++) // { // int i_x = i_E%(N/2); // int i_y = i_E/(N/2); // if (i_y%2==0) // { // if (i_x!=0) // field_even[i_E] += 0.25*omega*( field_odd[neighbor_even[0][i_E]] + field_odd[neighbor_even[1][i_E]] + field_odd[neighbor_even[2][i_E]] + field_odd[neighbor_even[3][i_E]] - dx*dx*rho_even[i_E] - 4.*field_even[i_E]); // } // else // { // if (i_x!=(N/2)-1) // field_even[i_E] += 0.25*omega*( field_odd[neighbor_even[0][i_E]] + field_odd[neighbor_even[1][i_E]] + field_odd[neighbor_even[2][i_E]] + field_odd[neighbor_even[3][i_E]] - dx*dx*rho_even[i_E] - 4.*field_even[i_E]); // } // } //# pragma omp parallel for // for (int i_O=N/2; i_O<(N*N)/2-N/2; i_O++) // { // int i_x = i_O%(N/2); // int i_y = i_O/(N/2); // if (i_y%2==0) // { // if (i_x!=(N/2)-1) // field_odd[i_O] += 0.25*omega*( field_even[neighbor_odd[0][i_O]] + field_even[neighbor_odd[1][i_O]] + field_even[neighbor_odd[2][i_O]] + field_even[neighbor_odd[3][i_O]] - dx*dx*rho_odd[i_O] - 4.*field_odd[i_O]); // } // else // { // if (i_x!=0) // field_odd[i_O] += 0.25*omega*( field_even[neighbor_odd[0][i_O]] + field_even[neighbor_odd[1][i_O]] + field_even[neighbor_odd[2][i_O]] + field_even[neighbor_odd[3][i_O]] - dx*dx*rho_odd[i_O] - 4.*field_odd[i_O]); // } // } //} void FPRINTF(FILE *output_file, int N, double *array) { for (int j=0; j<N; j++) { for (int i=0; i<N; i++) fprintf(output_file, "%.8e\t", array[i+j*N]); fprintf(output_file, "\n"); } }
6f0ac612deb7db40078ac41a5c09d0c43375c768.cu
/* This version use neighbor_e and neighbor_o to stroe the negibhor indices of even sites and odd site.*/ #include <stdio.h> #include <stdlib.h> #include <omp.h> #include <math.h> #include <string.h> #include <cuda_runtime.h> #include <cublas_v2.h> void FPRINTF(FILE*, int N, double*); double EVALUATE_ERROR(int, int, double, double*); __global__ void INITIALIZE(int N, double dx, int* L_e, int* L_o, int* R_e, int* R_o, int* D_e, int* D_o, int* U_e, int* U_o, double* rho_even, double* rho_odd, double* rho, double* field_even, double* field_odd, double* field_analytic) { extern __shared__ double sm[]; int idx_x = threadIdx.x + blockIdx.x*blockDim.x; int idx_y = threadIdx.y + blockIdx.y*blockDim.y; int idx = idx_x + idx_y*N/2; int idx_sm = threadIdx.x + blockDim.x*threadIdx.y; int idx_site_e, idx_site_o; sm[idx_sm] = 0.0; int site_x_e = (2*idx)%N; int site_y_e = (2*idx)/N; int parity_e = (site_x_e+site_y_e)%2; site_x_e += parity_e; double x_e = site_x_e*dx; double y_e = site_y_e*dx; idx_site_e = site_x_e + N*site_y_e; int site_x_o = (2*idx)%N; int site_y_o = (2*idx)/N; int parity_o = (site_x_o+site_y_o+1)%2; site_x_o += parity_o; double x_o = site_x_o*dx; double y_o = site_y_o*dx; idx_site_o = site_x_o + N*site_y_o; field_analytic[idx_site_e] = x_e*(1.-x_e)*y_e*(1.-y_e)*exp(x_e-y_e); if ( site_x_e==0 || site_x_e==N-1 || site_y_e==0 || site_y_e==N-1 ) { field_even[idx] = field_analytic[idx_site_e]; rho[idx_site_e] = 0.0; rho_even[idx] = 0.0; } else { field_even[idx] = 0.0; rho[idx_site_e] = 2.*x_e*(y_e-1)*(y_e-2.*x_e+x_e*y_e+2)*exp(x_e-y_e); rho_even[idx] = rho[idx_site_e]; // field_even[idx] = field_analytic[idx_site_o]; } field_analytic[idx_site_o] = x_o*(1.-x_o)*y_o*(1.-y_o)*exp(x_o-y_o); if ( site_x_o==0 || site_x_o==N-1 || site_y_o==0 || site_y_o==N-1 ) { field_odd[idx] = field_analytic[idx_site_o]; rho[idx_site_o] = 0.0; rho_odd[idx] = 0.0; } else { field_odd[idx] = 0.0; rho[idx_site_o]= 2.*x_o*(y_o-1)*(y_o-2.*x_o+x_o*y_o+2)*exp(x_o-y_o); rho_odd[idx] = rho[idx_site_o]; // field_odd[idx] = field_analytic[idx_site_o]; } // construct neighbors for even sites int site_x = idx%(N/2); int site_y = idx/(N/2); if ( (idx>N/2-1)&&(idx<(N*N)/2-N/2)) { if (site_y%2==0) { if (site_x!=0) { int L = site_x-1 + site_y*(N/2); int R = idx; int D = site_x + (site_y-1)*(N/2); int U = site_x + (site_y+1)*(N/2); L_e[idx] = L; R_e[idx] = R; D_e[idx] = D; U_e[idx] = U; // printf("%d\t%d\t%d\t%d\t%d\n", idx, L, R, U, D); } if (site_x!=(N/2)-1) { int L = idx; int R = site_x +1 + site_y*(N/2); int D = site_x + (site_y-1)*(N/2); int U = site_x + (site_y+1)*(N/2); L_o[idx] = L; R_o[idx] = R; D_o[idx] = D; U_o[idx] = U; // printf("%d\t%d\t%d\t%d\t%d\n", idx, L, R, U, D); } } else { if (site_x!=(N/2)-1) { int L = idx; int R = site_x+1 + site_y*(N/2); int D = site_x + (site_y-1)*(N/2); int U = site_x + (site_y_e+1)*(N/2); L_e[idx] = L; R_e[idx] = R; D_e[idx] = D; U_e[idx] = U; // printf("%d\t%d\t%d\t%d\t%d\n", idx, L, R, U, D); } if (site_x!=0) { int L = site_x-1 + site_y*(N/2); int R = idx; int D = site_x + (site_y-1)*(N/2); int U = site_x + (site_y+1)*(N/2); L_o[idx] = L; R_o[idx] = R; D_o[idx] = D; U_o[idx] = U; // printf("%d\t%d\t%d\t%d\t%d\n", idx, L, R, U, D); } } } else { L_e[idx] = 0; R_e[idx] = 0; U_e[idx] = 0; D_e[idx] = 0; L_o[idx] = 0; R_o[idx] = 0; U_o[idx] = 0; D_o[idx] = 0; } } __global__ void SOR_SOLVER_EVEN(int N, double dx, double omega, int* L_e, int* R_e, int* D_e, int* U_e, double* field_even, double* field_odd, double* rho_even) { int idx_x = threadIdx.x + blockIdx.x*blockDim.x; int idx_y = threadIdx.y + blockIdx.y*blockDim.y; int idx = idx_x + idx_y*N/2; int site_x = idx%(N/2); int site_y = idx/(N/2); if ( (idx>N/2-1)&&(idx<(N*N)/2-N/2)) { if (site_y%2==0) { if (site_x!=0) { field_even[idx] += 0.25*omega*( field_odd[L_e[idx]] + field_odd[R_e[idx]] + field_odd[U_e[idx]] + field_odd[D_e[idx]] - dx*dx*rho_even[idx] - 4.*field_even[idx]); } } else { if (site_x!=(N/2)-1) { field_even[idx] += 0.25*omega*( field_odd[L_e[idx]] + field_odd[R_e[idx]] + field_odd[U_e[idx]] + field_odd[D_e[idx]] - dx*dx*rho_even[idx] - 4.*field_even[idx]); } } } __syncthreads(); } __global__ void SOR_SOLVER_ODD(int N, double dx, double omega, int* L_o, int* R_o, int* D_o, int* U_o, double* field_even, double* field_odd, double* rho_odd) { int idx_x = threadIdx.x + blockIdx.x*blockDim.x; int idx_y = threadIdx.y + blockIdx.y*blockDim.y; int idx = idx_x + idx_y*N/2; int site_x = idx%(N/2); int site_y = idx/(N/2); if ( (idx>N/2-1)&&(idx<(N*N)/2-N/2)) { if (site_y%2==0) { if (site_x!=(N/2)-1) { field_odd[idx] += 0.25*omega*( field_even[L_o[idx]] + field_even[R_o[idx]] + field_even[U_o[idx]] + field_even[D_o[idx]] - dx*dx*rho_odd[idx] - 4.*field_odd[idx]); } } else { if (site_x!=0) { field_odd[idx] += 0.25*omega*( field_even[L_o[idx]] + field_even[R_o[idx]] + field_even[U_o[idx]] + field_even[D_o[idx]] - dx*dx*rho_odd[idx] - 4.*field_odd[idx]); } } } __syncthreads(); } __global__ void ERROR(int N, double dx, int* L_e, int* L_o, int* R_e, int* R_o, int* D_e, int* D_o, int* U_e, int* U_o, double* rho_even, double* rho_odd, double* field_even, double* field_odd, double *error_block) { extern __shared__ double sm[]; int idx_x = threadIdx.x + blockIdx.x*blockDim.x; int idx_y = threadIdx.y + blockIdx.y*blockDim.y; int idx = idx_x + idx_y*N/2; int idx_sm = threadIdx.x + blockDim.x*threadIdx.y; sm[idx_sm] = 0.0; int site_x = idx%(N/2); int site_y = idx/(N/2); if ( (idx>N/2-1)&&(idx<(N*N)/2-N/2)) { if (site_y%2==0) { if (site_x!=0) sm[idx_sm] += pow((field_odd[L_e[idx]]+field_odd[R_e[idx]]+field_odd[D_e[idx]]+field_odd[U_e[idx]]-4.0*field_even[idx])/dx/dx-rho_even[idx], 2.0); if (site_x!=(N/2)-1) sm[idx_sm] += pow((field_even[L_o[idx]]+field_even[R_o[idx]]+field_even[D_o[idx]]+field_even[U_o[idx]]-4.0*field_odd[idx])/dx/dx-rho_odd[idx], 2.0); } else { if (site_x!=(N/2)-1) sm[idx_sm] += pow((field_odd[L_e[idx]]+field_odd[R_e[idx]]+field_odd[D_e[idx]]+field_odd[U_e[idx]]-4.0*field_even[idx])/dx/dx-rho_even[idx], 2.0); if (site_x!=0) sm[idx_sm] += pow((field_even[L_o[idx]]+field_even[R_o[idx]]+field_even[D_o[idx]]+field_even[U_o[idx]]-4.0*field_odd[idx])/dx/dx-rho_odd[idx], 2.0); } } __syncthreads(); for (int shift=blockDim.x*blockDim.y/2; shift>0; shift/=2) { if (idx_sm<shift) sm[idx_sm] += sm[idx_sm+shift]; __syncthreads(); } if (idx_sm==0) error_block[blockIdx.x+blockIdx.y*gridDim.x] = sm[0]; } int main(void) { int N, N_threads, N_block, display_interval, tpb_x, tpb_y, bpg_x, bpg_y; // int N, N_threads, display_interval, tpb, bpg; float preparation_time, computation_time, total_time; double omega, dx, criteria; long iter, iter_max; double *field_even, *field_odd, *rho_even, *rho_odd, *field_final, *field_analytic, *rho, *error_block; int **neighbor_even, **neighbor_odd; size_t size_lattice, size_sm; cudaEvent_t start, stop; FILE* output_field, *output_rho; printf("Solve the Poission problem using SOR by OpenMP.\n\n"); printf("Enter the latttice size (N,N) (N must be divisible by 2)."); scanf("%d", &N); printf("The lattice size is (%d,%d).\n", N, N); printf("Set the value of omega.\n"); scanf("%lf",&omega); printf("The value of omega is %.4f .\n", omega); printf("Set the maximum iteration times.\n"); scanf("%ld", &iter_max); printf("The maximum iteration times is %ld .\n", iter_max); printf("Set the stopping criteria.\n"); scanf("%lf", &criteria); printf("The stopping criteria is %.4e .\n", criteria); printf("Set the display interval during iterations.\n"); scanf("%d", &display_interval); printf("The display interval is set to be %d .\n", display_interval); // printf("Set the number of OpenMP threads.\n"); // scanf("%d", &N_threads); // printf("The number of OpenMP threads is %d .\n", N_threads); printf("Set the GPU threads per block (tx,ty). (N/2 must be divisible by tx and N must be divisible by N)\n"); scanf("%d %d", &tpb_x, &tpb_y); if ((N/2)%tpb_x!=0) { printf("N/2 is not divisible by tx! Exit!\n"); return EXIT_FAILURE; } else if (N%tpb_y!=0) { printf("N is not divisible by ty! Exit!\n"); return EXIT_FAILURE; } else { printf("Threads per block for GPU is (%d,%d) .\n", tpb_x, tpb_y); printf("The block per grid will be set automatically."); bpg_x = (N/2)/tpb_x; bpg_y = N/tpb_y; printf("Blocks per grid for GPU is (%d,%d) .\n", bpg_x, bpg_y); } printf("Set the number of OpenMP threads.\n"); scanf("%d", &N_threads); printf("The number of OpenMP threads is %d.\n", N_threads); printf("\n"); printf("Start Preparation...\n"); cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start,0); dx = 1./(N-1); N_block = (N/2)/tpb_x*(N/tpb_y); size_lattice = N*N*sizeof(double); size_sm = tpb_x*tpb_y*sizeof(double); field_final = (double*)malloc(N*N*sizeof(double)); neighbor_even = (int**)malloc(4*sizeof(int*)); neighbor_odd = (int**)malloc(4*sizeof(int*)); output_field = fopen("analytical_field_distribution_allocate.txt","w"); output_rho = fopen("charge_distribution_allocate.txt","w"); cudaSetDevice(0); dim3 tpb(tpb_x,tpb_y); dim3 bpg(bpg_x,bpg_y); cublasMath_t mode = CUBLAS_TENSOR_OP_MATH; cublasPointerMode_t mode_pt = CUBLAS_POINTER_MODE_HOST; cublasHandle_t handle; cublasCreate(&handle); cublasSetMathMode(handle, mode); cublasSetPointerMode(handle, mode_pt); cudaMallocManaged(&field_even, size_lattice/2); cudaMallocManaged(&field_odd, size_lattice/2); cudaMallocManaged(&field_analytic, size_lattice); cudaMallocManaged(&rho_even, size_lattice/2); cudaMallocManaged(&rho_odd, size_lattice/2); cudaMallocManaged(&rho, size_lattice); cudaMallocManaged(&error_block, N_block*sizeof(double)); // construct neighbor index for (int i=0; i<4; i++) { cudaMallocManaged(&neighbor_even[i], (N*N)/2*sizeof(int)); cudaMallocManaged(&neighbor_odd[i], (N*N)/2*sizeof(int)); } // INITIALIZE<<<bpg,tpb,size_sm>>>(N, dx, neighbor_even[0], neighbor_odd[0], neighbor_even[1], neighbor_odd[1], neighbor_even[2],neighbor_odd[2], neighbor_even[3], neighbor_odd[3], rho_even, rho_odd, rho, field_even, field_odd, field_analytic); ERROR<<<bpg,tpb,size_sm>>>(N, dx, neighbor_even[0], neighbor_odd[0], neighbor_even[1], neighbor_odd[1], neighbor_even[2], neighbor_odd[2], neighbor_even[3], neighbor_odd[3], rho_even, rho_odd, field_even, field_odd, error_block); cudaDeviceSynchronize(); double norm; cublasDdot(handle, N*N, rho, 1, rho, 1, &norm); norm = sqrt(norm); printf("Norm = %.4e\n", norm); // debug // for (int j=0; j<N; j++) // { // for (int i=0; i<N/2; i++) // printf("%d\t",neighbor_odd[3][i+N/2*j]); // printf("\n"); // } // FPRINTF(output_field, N, field_analytic); FPRINTF(output_rho, N, rho); printf("Preparation ends.\n"); cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime(&preparation_time, start, stop); printf("Total preparation time is %.4f ms.\n\n", preparation_time); cudaEventRecord(start,0); double error = EVALUATE_ERROR(N, N_block, norm, error_block); printf("Starts computation with error = %.8e...\n", error); iter = 0; while (error>criteria&&iter<iter_max) { SOR_SOLVER_EVEN<<<bpg,tpb>>>(N, dx, omega, neighbor_even[0], neighbor_even[1], neighbor_even[2], neighbor_even[3], field_even, field_odd, rho_even); // cudaDeviceSynchronize(); SOR_SOLVER_ODD<<<bpg,tpb>>>(N, dx, omega, neighbor_odd[0], neighbor_odd[1], neighbor_odd[2], neighbor_odd[3], field_even, field_odd, rho_odd); // cudaDeviceSynchronize(); ERROR<<<bpg,tpb,size_sm>>>(N, dx, neighbor_even[0], neighbor_odd[0], neighbor_even[1], neighbor_odd[1], neighbor_even[2], neighbor_odd[2], neighbor_even[3], neighbor_odd[3], rho_even, rho_odd, field_even, field_odd, error_block); cudaDeviceSynchronize(); error = EVALUATE_ERROR(N, N_block, norm, error_block); iter += 1; if (iter%display_interval==0) printf("Iteration = %ld , error = %.8e .\n", iter, error); } omp_set_num_threads(N_threads); # pragma omp parallel for for (int i_E=0; i_E<(N*N)/2; i_E++) { int ix = (2*i_E)%N; int iy = (2*i_E)/N; int parity = (ix+iy)%2; ix += parity; field_final[ix+iy*N] = field_even[i_E]; } # pragma omp parallel for for (int i_O=0; i_O<(N*N)/2; i_O++) { int ix = (2*i_O)%N; int iy = (2*i_O)/N; int parity = (ix+iy+1)%2; ix += parity; field_final[ix+iy*N] = field_odd[i_O]; } output_field = fopen("simulated_field_distribution_GPU_allocate.txt","w"); FPRINTF(output_field, N, field_final); cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime(&computation_time, start, stop); printf("Computation time is %.4f ms.\n", computation_time); total_time = preparation_time + computation_time; printf("Total iteration is %ld ; total time is %.4f ms.\n", iter, total_time); free(field_final); free(neighbor_even); free(neighbor_odd); cudaFree(field_even); cudaFree(field_odd); cudaFree(field_analytic); cudaFree(rho_even); cudaFree(rho_odd); cudaFree(rho); cudaFree(error_block); fclose(output_field); fclose(output_rho); return EXIT_SUCCESS; } double EVALUATE_ERROR(int N, int N_block, double norm, double* error_block) { double error = 0.0; for (int i=0; i<N_block; i++) error += error_block[i]; return sqrt(error)/norm; } //void LAPLACIAN_SOR(int N, double dx, double omega, double* field_even, double* field_odd, double *rho_even, double *rho_odd, int **neighbor_even, int **neighbor_odd) //{ //# pragma omp parallel for // for (int i_E=N/2; i_E<(N*N)/2-N/2; i_E++) // { // int i_x = i_E%(N/2); // int i_y = i_E/(N/2); // if (i_y%2==0) // { // if (i_x!=0) // field_even[i_E] += 0.25*omega*( field_odd[neighbor_even[0][i_E]] + field_odd[neighbor_even[1][i_E]] + field_odd[neighbor_even[2][i_E]] + field_odd[neighbor_even[3][i_E]] - dx*dx*rho_even[i_E] - 4.*field_even[i_E]); // } // else // { // if (i_x!=(N/2)-1) // field_even[i_E] += 0.25*omega*( field_odd[neighbor_even[0][i_E]] + field_odd[neighbor_even[1][i_E]] + field_odd[neighbor_even[2][i_E]] + field_odd[neighbor_even[3][i_E]] - dx*dx*rho_even[i_E] - 4.*field_even[i_E]); // } // } //# pragma omp parallel for // for (int i_O=N/2; i_O<(N*N)/2-N/2; i_O++) // { // int i_x = i_O%(N/2); // int i_y = i_O/(N/2); // if (i_y%2==0) // { // if (i_x!=(N/2)-1) // field_odd[i_O] += 0.25*omega*( field_even[neighbor_odd[0][i_O]] + field_even[neighbor_odd[1][i_O]] + field_even[neighbor_odd[2][i_O]] + field_even[neighbor_odd[3][i_O]] - dx*dx*rho_odd[i_O] - 4.*field_odd[i_O]); // } // else // { // if (i_x!=0) // field_odd[i_O] += 0.25*omega*( field_even[neighbor_odd[0][i_O]] + field_even[neighbor_odd[1][i_O]] + field_even[neighbor_odd[2][i_O]] + field_even[neighbor_odd[3][i_O]] - dx*dx*rho_odd[i_O] - 4.*field_odd[i_O]); // } // } //} void FPRINTF(FILE *output_file, int N, double *array) { for (int j=0; j<N; j++) { for (int i=0; i<N; i++) fprintf(output_file, "%.8e\t", array[i+j*N]); fprintf(output_file, "\n"); } }
379f2d79cfc8b93e9e3f054f0f55de5f7e124854.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*! * Copyright 2017 XGBoost contributors */ // GPU implementation of objective function. // Necessary to avoid extra copying of data to CPU. #include <dmlc/omp.h> #include <thrust/device_vector.h> #include <thrust/copy.h> #include <xgboost/logging.h> #include <xgboost/objective.h> #include <cmath> #include <memory> #include <vector> #include "../common/span.h" #include "../common/device_helpers.cuh" #include "../common/host_device_vector.h" #include "./regression_loss.h" namespace xgboost { namespace obj { using dh::DVec; DMLC_REGISTRY_FILE_TAG(regression_obj_gpu); struct GPURegLossParam : public dmlc::Parameter<GPURegLossParam> { float scale_pos_weight; int n_gpus; int gpu_id; // declare parameters DMLC_DECLARE_PARAMETER(GPURegLossParam) { DMLC_DECLARE_FIELD(scale_pos_weight).set_default(1.0f).set_lower_bound(0.0f) .describe("Scale the weight of positive examples by this factor"); DMLC_DECLARE_FIELD(n_gpus).set_default(1).set_lower_bound(-1) .describe("Number of GPUs to use for multi-gpu algorithms (NOT IMPLEMENTED)"); DMLC_DECLARE_FIELD(gpu_id) .set_lower_bound(0) .set_default(0) .describe("gpu to use for objective function evaluation"); } }; // GPU kernel for gradient computation template<typename Loss> __global__ void get_gradient_k (common::Span<GradientPair> out_gpair, common::Span<int> label_correct, common::Span<const float> preds, common::Span<const float> labels, const float * __restrict__ weights, int n, float scale_pos_weight) { int i = threadIdx.x + blockIdx.x * blockDim.x; if (i >= n) return; float p = Loss::PredTransform(preds[i]); float w = weights == nullptr ? 1.0f : weights[i]; float label = labels[i]; if (label == 1.0f) w *= scale_pos_weight; if (!Loss::CheckLabel(label)) atomicAnd(label_correct.data(), 0); out_gpair[i] = GradientPair (Loss::FirstOrderGradient(p, label) * w, Loss::SecondOrderGradient(p, label) * w); } // GPU kernel for predicate transformation template<typename Loss> __global__ void pred_transform_k(common::Span<float> preds, int n) { int i = threadIdx.x + blockIdx.x * blockDim.x; if (i >= n) return; preds[i] = Loss::PredTransform(preds[i]); } // regression loss function for evaluation on GPU (eventually) template<typename Loss> class GPURegLossObj : public ObjFunction { protected: HostDeviceVector<int> label_correct_; // allocate device data for n elements, do nothing if memory is allocated already void LazyResize() { } public: GPURegLossObj() {} void Configure(const std::vector<std::pair<std::string, std::string> >& args) override { param_.InitAllowUnknown(args); CHECK(param_.n_gpus != 0) << "Must have at least one device"; devices_ = GPUSet::All(param_.n_gpus).Normalised(param_.gpu_id); label_correct_.Reshard(devices_); label_correct_.Resize(devices_.Size()); } void GetGradient(const HostDeviceVector<float> &preds, const MetaInfo &info, int iter, HostDeviceVector<GradientPair>* out_gpair) override { CHECK_NE(info.labels_.Size(), 0U) << "label set cannot be empty"; CHECK_EQ(preds.Size(), info.labels_.Size()) << "labels are not correctly provided" << "preds.size=" << preds.Size() << ", label.size=" << info.labels_.Size(); size_t ndata = preds.Size(); preds.Reshard(devices_); info.labels_.Reshard(devices_); info.weights_.Reshard(devices_); out_gpair->Reshard(devices_); out_gpair->Resize(ndata); GetGradientDevice(preds, info, iter, out_gpair); } private: void GetGradientDevice(const HostDeviceVector<float>& preds, const MetaInfo &info, int iter, HostDeviceVector<GradientPair>* out_gpair) { label_correct_.Fill(1); // run the kernel #pragma omp parallel for schedule(static, 1) if (devices_.Size() > 1) for (int i = 0; i < devices_.Size(); ++i) { int d = devices_[i]; dh::safe_cuda(hipSetDevice(d)); const int block = 256; size_t n = preds.DeviceSize(d); if (n > 0) { hipLaunchKernelGGL(( get_gradient_k<Loss>), dim3(dh::DivRoundUp(n, block)), dim3(block), 0, 0, out_gpair->DeviceSpan(d), label_correct_.DeviceSpan(d), preds.DeviceSpan(d), info.labels_.DeviceSpan(d), info.weights_.Size() > 0 ? info.weights_.DevicePointer(d) : nullptr, n, param_.scale_pos_weight); dh::safe_cuda(hipGetLastError()); } dh::safe_cuda(hipDeviceSynchronize()); } // copy "label correct" flags back to host std::vector<int>& label_correct_h = label_correct_.HostVector(); for (int i = 0; i < devices_.Size(); ++i) { if (label_correct_h[i] == 0) LOG(FATAL) << Loss::LabelErrorMsg(); } } public: const char* DefaultEvalMetric() const override { return Loss::DefaultEvalMetric(); } void PredTransform(HostDeviceVector<float> *io_preds) override { io_preds->Reshard(devices_); size_t ndata = io_preds->Size(); PredTransformDevice(io_preds); } void PredTransformDevice(HostDeviceVector<float>* preds) { #pragma omp parallel for schedule(static, 1) if (devices_.Size() > 1) for (int i = 0; i < devices_.Size(); ++i) { int d = devices_[i]; dh::safe_cuda(hipSetDevice(d)); const int block = 256; size_t n = preds->DeviceSize(d); if (n > 0) { hipLaunchKernelGGL(( pred_transform_k<Loss>), dim3(dh::DivRoundUp(n, block)), dim3(block), 0, 0, preds->DeviceSpan(d), n); dh::safe_cuda(hipGetLastError()); } dh::safe_cuda(hipDeviceSynchronize()); } } float ProbToMargin(float base_score) const override { return Loss::ProbToMargin(base_score); } protected: GPURegLossParam param_; GPUSet devices_; }; // register the objective functions DMLC_REGISTER_PARAMETER(GPURegLossParam); XGBOOST_REGISTER_OBJECTIVE(GPULinearRegression, "gpu:reg:linear") .describe("Linear regression (computed on GPU).") .set_body([]() { return new GPURegLossObj<LinearSquareLoss>(); }); XGBOOST_REGISTER_OBJECTIVE(GPULogisticRegression, "gpu:reg:logistic") .describe("Logistic regression for probability regression task (computed on GPU).") .set_body([]() { return new GPURegLossObj<LogisticRegression>(); }); XGBOOST_REGISTER_OBJECTIVE(GPULogisticClassification, "gpu:binary:logistic") .describe("Logistic regression for binary classification task (computed on GPU).") .set_body([]() { return new GPURegLossObj<LogisticClassification>(); }); XGBOOST_REGISTER_OBJECTIVE(GPULogisticRaw, "gpu:binary:logitraw") .describe("Logistic regression for classification, output score " "before logistic transformation (computed on GPU)") .set_body([]() { return new GPURegLossObj<LogisticRaw>(); }); } // namespace obj } // namespace xgboost
379f2d79cfc8b93e9e3f054f0f55de5f7e124854.cu
/*! * Copyright 2017 XGBoost contributors */ // GPU implementation of objective function. // Necessary to avoid extra copying of data to CPU. #include <dmlc/omp.h> #include <thrust/device_vector.h> #include <thrust/copy.h> #include <xgboost/logging.h> #include <xgboost/objective.h> #include <cmath> #include <memory> #include <vector> #include "../common/span.h" #include "../common/device_helpers.cuh" #include "../common/host_device_vector.h" #include "./regression_loss.h" namespace xgboost { namespace obj { using dh::DVec; DMLC_REGISTRY_FILE_TAG(regression_obj_gpu); struct GPURegLossParam : public dmlc::Parameter<GPURegLossParam> { float scale_pos_weight; int n_gpus; int gpu_id; // declare parameters DMLC_DECLARE_PARAMETER(GPURegLossParam) { DMLC_DECLARE_FIELD(scale_pos_weight).set_default(1.0f).set_lower_bound(0.0f) .describe("Scale the weight of positive examples by this factor"); DMLC_DECLARE_FIELD(n_gpus).set_default(1).set_lower_bound(-1) .describe("Number of GPUs to use for multi-gpu algorithms (NOT IMPLEMENTED)"); DMLC_DECLARE_FIELD(gpu_id) .set_lower_bound(0) .set_default(0) .describe("gpu to use for objective function evaluation"); } }; // GPU kernel for gradient computation template<typename Loss> __global__ void get_gradient_k (common::Span<GradientPair> out_gpair, common::Span<int> label_correct, common::Span<const float> preds, common::Span<const float> labels, const float * __restrict__ weights, int n, float scale_pos_weight) { int i = threadIdx.x + blockIdx.x * blockDim.x; if (i >= n) return; float p = Loss::PredTransform(preds[i]); float w = weights == nullptr ? 1.0f : weights[i]; float label = labels[i]; if (label == 1.0f) w *= scale_pos_weight; if (!Loss::CheckLabel(label)) atomicAnd(label_correct.data(), 0); out_gpair[i] = GradientPair (Loss::FirstOrderGradient(p, label) * w, Loss::SecondOrderGradient(p, label) * w); } // GPU kernel for predicate transformation template<typename Loss> __global__ void pred_transform_k(common::Span<float> preds, int n) { int i = threadIdx.x + blockIdx.x * blockDim.x; if (i >= n) return; preds[i] = Loss::PredTransform(preds[i]); } // regression loss function for evaluation on GPU (eventually) template<typename Loss> class GPURegLossObj : public ObjFunction { protected: HostDeviceVector<int> label_correct_; // allocate device data for n elements, do nothing if memory is allocated already void LazyResize() { } public: GPURegLossObj() {} void Configure(const std::vector<std::pair<std::string, std::string> >& args) override { param_.InitAllowUnknown(args); CHECK(param_.n_gpus != 0) << "Must have at least one device"; devices_ = GPUSet::All(param_.n_gpus).Normalised(param_.gpu_id); label_correct_.Reshard(devices_); label_correct_.Resize(devices_.Size()); } void GetGradient(const HostDeviceVector<float> &preds, const MetaInfo &info, int iter, HostDeviceVector<GradientPair>* out_gpair) override { CHECK_NE(info.labels_.Size(), 0U) << "label set cannot be empty"; CHECK_EQ(preds.Size(), info.labels_.Size()) << "labels are not correctly provided" << "preds.size=" << preds.Size() << ", label.size=" << info.labels_.Size(); size_t ndata = preds.Size(); preds.Reshard(devices_); info.labels_.Reshard(devices_); info.weights_.Reshard(devices_); out_gpair->Reshard(devices_); out_gpair->Resize(ndata); GetGradientDevice(preds, info, iter, out_gpair); } private: void GetGradientDevice(const HostDeviceVector<float>& preds, const MetaInfo &info, int iter, HostDeviceVector<GradientPair>* out_gpair) { label_correct_.Fill(1); // run the kernel #pragma omp parallel for schedule(static, 1) if (devices_.Size() > 1) for (int i = 0; i < devices_.Size(); ++i) { int d = devices_[i]; dh::safe_cuda(cudaSetDevice(d)); const int block = 256; size_t n = preds.DeviceSize(d); if (n > 0) { get_gradient_k<Loss><<<dh::DivRoundUp(n, block), block>>> (out_gpair->DeviceSpan(d), label_correct_.DeviceSpan(d), preds.DeviceSpan(d), info.labels_.DeviceSpan(d), info.weights_.Size() > 0 ? info.weights_.DevicePointer(d) : nullptr, n, param_.scale_pos_weight); dh::safe_cuda(cudaGetLastError()); } dh::safe_cuda(cudaDeviceSynchronize()); } // copy "label correct" flags back to host std::vector<int>& label_correct_h = label_correct_.HostVector(); for (int i = 0; i < devices_.Size(); ++i) { if (label_correct_h[i] == 0) LOG(FATAL) << Loss::LabelErrorMsg(); } } public: const char* DefaultEvalMetric() const override { return Loss::DefaultEvalMetric(); } void PredTransform(HostDeviceVector<float> *io_preds) override { io_preds->Reshard(devices_); size_t ndata = io_preds->Size(); PredTransformDevice(io_preds); } void PredTransformDevice(HostDeviceVector<float>* preds) { #pragma omp parallel for schedule(static, 1) if (devices_.Size() > 1) for (int i = 0; i < devices_.Size(); ++i) { int d = devices_[i]; dh::safe_cuda(cudaSetDevice(d)); const int block = 256; size_t n = preds->DeviceSize(d); if (n > 0) { pred_transform_k<Loss><<<dh::DivRoundUp(n, block), block>>>( preds->DeviceSpan(d), n); dh::safe_cuda(cudaGetLastError()); } dh::safe_cuda(cudaDeviceSynchronize()); } } float ProbToMargin(float base_score) const override { return Loss::ProbToMargin(base_score); } protected: GPURegLossParam param_; GPUSet devices_; }; // register the objective functions DMLC_REGISTER_PARAMETER(GPURegLossParam); XGBOOST_REGISTER_OBJECTIVE(GPULinearRegression, "gpu:reg:linear") .describe("Linear regression (computed on GPU).") .set_body([]() { return new GPURegLossObj<LinearSquareLoss>(); }); XGBOOST_REGISTER_OBJECTIVE(GPULogisticRegression, "gpu:reg:logistic") .describe("Logistic regression for probability regression task (computed on GPU).") .set_body([]() { return new GPURegLossObj<LogisticRegression>(); }); XGBOOST_REGISTER_OBJECTIVE(GPULogisticClassification, "gpu:binary:logistic") .describe("Logistic regression for binary classification task (computed on GPU).") .set_body([]() { return new GPURegLossObj<LogisticClassification>(); }); XGBOOST_REGISTER_OBJECTIVE(GPULogisticRaw, "gpu:binary:logitraw") .describe("Logistic regression for classification, output score " "before logistic transformation (computed on GPU)") .set_body([]() { return new GPURegLossObj<LogisticRaw>(); }); } // namespace obj } // namespace xgboost
c74ee0d4760ffc67a5401210aca1e0bdf13ec07c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "../highPerformanceTimer/highPerformanceTimer.h" #include <iostream> #include <stdlib.h> #include <time.h> #include <omp.h> //#include <string> //use alt+b, u to build only this project using namespace std; typedef int ourVar_t; void CPUTest(); bool allocCPUMemory(ourVar_t** a, ourVar_t** b, ourVar_t** c, int size, int size_of_var = sizeof(ourVar_t)); void freeCPUMemory(ourVar_t* a, ourVar_t* b, ourVar_t* c); //double fillArrays(ourVar_t* a, ourVar_t* b, ourVar_t* c, int size_of_array, int iterations); void fillArray(ourVar_t* a, int size_of_array); void fill_C_Array(ourVar_t* a, int size_of_array); void printArrays(ourVar_t* a, ourVar_t* b, ourVar_t* c, int size_of_array); hipError_t GPUTest(); __global__ void addKernel(ourVar_t *c, ourVar_t *a, ourVar_t *b) { int i = blockIdx.x * blockDim.x + threadIdx.x; //int i = threadIdx.x; c[i] = a[i] + b[i]; } int size_of_array = 5; int iterations = 100; const int max_num = 15; int main(int argc, char* argv[]) { cout << endl; srand(time(NULL)); //addKernel, The grid size, the block size, the vecs to add // addKernel<<<1, size>>>(dev_c, dev_a, dev_b); //int i = blockIDx.x * blockDimx.x + threadIdx.x; try { //if there is a command line argument, set the ouput variable to it if (argc > 1) { size_of_array = atoi(argv[1]); cout << "The size of the array is " << size_of_array << endl; if (argc > 2) { iterations = atoi(argv[2]); cout << "The number of iterations is " << iterations << endl; } else { cout << "The number of iterations is 100" << endl; } } else { cout << "No Commmand Line Argument: Size of Array Defaulting to 5, Number of Iterations deafulting to 100" << endl; } cout << endl; CPUTest(); GPUTest(); } catch (char * err) { cerr << err << endl; } #ifdef _WIN32 || _WIN64 //system("pause"); #endif return 0; } void CPUTest() { //press alt+shift+(arrow key) to vertical edit ourVar_t* a = nullptr; ourVar_t* b = nullptr; ourVar_t* c = nullptr; HighPrecisionTime full; HighPrecisionTime avg; HighPrecisionTime summing; double fullTime = 0.0; double avgTime = 0.0; double sumTime = 0.0; if (!allocCPUMemory(&a, &b, &c, size_of_array)) { throw("Error Allocating Memory"); } avg.TimeSinceLastCall(); #pragma omp parallel for for (int i = 0; i < iterations; i++) { fillArray(a, size_of_array); fillArray(b, size_of_array); fill_C_Array(c, size_of_array); avgTime += avg.TimeSinceLastCall(); } full.TimeSinceLastCall(); summing.TimeSinceLastCall(); for (int times = 0; times < iterations; times++) { for (int i = 0; i < size_of_array; i++) { c[i] = a[i] + b[i]; } sumTime += summing.TimeSinceLastCall(); } //printArrays(a, b, c, size_of_array); freeCPUMemory(a, b, c); fullTime = full.TimeSinceLastCall(); //average here avgTime = avgTime / iterations; sumTime = sumTime / iterations; cout << "The average time for the CPU to fill all three vectors was: " << avgTime << endl; cout << " ------------------------ " << endl << endl; cout << "The average time it took the CPU to sum the vectors was: " << sumTime << endl; cout << "The full CPU run (summing + freeing) was: " << fullTime << endl; cout << endl; } bool allocCPUMemory(ourVar_t** a, ourVar_t** b, ourVar_t** c, int size, int size_of_var) { bool retVal = true; int memSize = size * size_of_var; *a = (ourVar_t*)malloc(memSize); *b = (ourVar_t*)malloc(memSize); *c = (ourVar_t*)malloc(memSize); if (*a == nullptr || *b == nullptr || *c == nullptr) { retVal = false; } return retVal; } void freeCPUMemory(ourVar_t* a, ourVar_t* b, ourVar_t* c) { if (a != nullptr) { free(a); } if (b != nullptr) { free(b); } if (c != nullptr) { free(c); } } void fillArray(ourVar_t* a, int size_of_array) { ourVar_t* pa = a; while (pa < &a[size_of_array]) { *pa = rand() % max_num; ++pa; } } void fill_C_Array(ourVar_t* a, int size_of_array) { ourVar_t* pa = a; while (pa < &a[size_of_array]) { *pa = 0; ++pa; } } void printArrays(ourVar_t* a, ourVar_t* b, ourVar_t* c, int size_of_array) { for (int i = 0; i < size_of_array; i++) { cout << a[i] << " : " << b[i] << " : " << c[i] << endl; } } hipError_t GPUTest() { ourVar_t* a = nullptr; ourVar_t* b = nullptr; ourVar_t* c = nullptr; if (!allocCPUMemory(&a, &b, &c, size_of_array)) { throw("Error Allocating Memory"); } #pragma omp parallel for for (int i = 0; i < iterations; i++) { fillArray(a, size_of_array); fillArray(b, size_of_array); fill_C_Array(c, size_of_array); } HighPrecisionTime full; HighPrecisionTime summing; double fullTime = 0.0; double sumTime = 0.0; int *dev_a = 0; int *dev_b = 0; int *dev_c = 0; hipError_t cudaStatus; hipDeviceProp_t prop; hipGetDeviceProperties(&prop, 0); int maxThreadsPerBlock = prop.maxThreadsPerBlock; int numberOfBlocks = size_of_array / maxThreadsPerBlock + 1; full.TimeSinceLastCall(); // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = hipSetDevice(0); if (cudaStatus != hipSuccess) { throw("hipSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } // Allocate GPU buffers for three vectors (two input, one output) . cudaStatus = hipMalloc((void**)&dev_c, size_of_array * sizeof(ourVar_t)); if (cudaStatus != hipSuccess) { throw("hipMalloc-1 failed!"); goto Error; } cudaStatus = hipMalloc((void**)&dev_a, size_of_array * sizeof(ourVar_t)); if (cudaStatus != hipSuccess) { throw("hipMalloc-2 failed!"); goto Error; } cudaStatus = hipMalloc((void**)&dev_b, size_of_array * sizeof(ourVar_t)); if (cudaStatus != hipSuccess) { throw("hipMalloc-3 failed!"); goto Error; } // Copy input vectors from host memory to GPU buffers. cudaStatus = hipMemcpy(dev_a, a, size_of_array * sizeof(ourVar_t), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { throw("hipMemcpy-1 failed!"); goto Error; } cudaStatus = hipMemcpy(dev_b, b, size_of_array * sizeof(ourVar_t), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { throw("hipMemcpy-2 failed!"); goto Error; } summing.TimeSinceLastCall(); // Launch a kernel on the GPU with one thread for each element. //addKernel, The grid size, the block size, the vecs to add for (int i = 0; i < iterations; i++) { hipLaunchKernelGGL(( addKernel) , dim3(numberOfBlocks), dim3(maxThreadsPerBlock) , 0, 0, dev_c, dev_a, dev_b); sumTime += summing.TimeSinceLastCall(); } // Check for any errors launching the kernel cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus)); goto Error; } // hipDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = hipMemcpy(c, dev_c, size_of_array * sizeof(ourVar_t), hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { throw("hipMemcpy-3 failed!"); goto Error; } fullTime = full.TimeSinceLastCall(); sumTime = sumTime / iterations; cout << "The average time it took the GPU to sum the vectors was: " << sumTime << endl; cout << "The full GPU run (allocating + copying + summing) was: " << fullTime << endl; Error: hipFree(dev_c); hipFree(dev_a); hipFree(dev_b); return cudaStatus; }
c74ee0d4760ffc67a5401210aca1e0bdf13ec07c.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include "../highPerformanceTimer/highPerformanceTimer.h" #include <iostream> #include <stdlib.h> #include <time.h> #include <omp.h> //#include <string> //use alt+b, u to build only this project using namespace std; typedef int ourVar_t; void CPUTest(); bool allocCPUMemory(ourVar_t** a, ourVar_t** b, ourVar_t** c, int size, int size_of_var = sizeof(ourVar_t)); void freeCPUMemory(ourVar_t* a, ourVar_t* b, ourVar_t* c); //double fillArrays(ourVar_t* a, ourVar_t* b, ourVar_t* c, int size_of_array, int iterations); void fillArray(ourVar_t* a, int size_of_array); void fill_C_Array(ourVar_t* a, int size_of_array); void printArrays(ourVar_t* a, ourVar_t* b, ourVar_t* c, int size_of_array); cudaError_t GPUTest(); __global__ void addKernel(ourVar_t *c, ourVar_t *a, ourVar_t *b) { int i = blockIdx.x * blockDim.x + threadIdx.x; //int i = threadIdx.x; c[i] = a[i] + b[i]; } int size_of_array = 5; int iterations = 100; const int max_num = 15; int main(int argc, char* argv[]) { cout << endl; srand(time(NULL)); //addKernel, The grid size, the block size, the vecs to add // addKernel<<<1, size>>>(dev_c, dev_a, dev_b); //int i = blockIDx.x * blockDimx.x + threadIdx.x; try { //if there is a command line argument, set the ouput variable to it if (argc > 1) { size_of_array = atoi(argv[1]); cout << "The size of the array is " << size_of_array << endl; if (argc > 2) { iterations = atoi(argv[2]); cout << "The number of iterations is " << iterations << endl; } else { cout << "The number of iterations is 100" << endl; } } else { cout << "No Commmand Line Argument: Size of Array Defaulting to 5, Number of Iterations deafulting to 100" << endl; } cout << endl; CPUTest(); GPUTest(); } catch (char * err) { cerr << err << endl; } #ifdef _WIN32 || _WIN64 //system("pause"); #endif return 0; } void CPUTest() { //press alt+shift+(arrow key) to vertical edit ourVar_t* a = nullptr; ourVar_t* b = nullptr; ourVar_t* c = nullptr; HighPrecisionTime full; HighPrecisionTime avg; HighPrecisionTime summing; double fullTime = 0.0; double avgTime = 0.0; double sumTime = 0.0; if (!allocCPUMemory(&a, &b, &c, size_of_array)) { throw("Error Allocating Memory"); } avg.TimeSinceLastCall(); #pragma omp parallel for for (int i = 0; i < iterations; i++) { fillArray(a, size_of_array); fillArray(b, size_of_array); fill_C_Array(c, size_of_array); avgTime += avg.TimeSinceLastCall(); } full.TimeSinceLastCall(); summing.TimeSinceLastCall(); for (int times = 0; times < iterations; times++) { for (int i = 0; i < size_of_array; i++) { c[i] = a[i] + b[i]; } sumTime += summing.TimeSinceLastCall(); } //printArrays(a, b, c, size_of_array); freeCPUMemory(a, b, c); fullTime = full.TimeSinceLastCall(); //average here avgTime = avgTime / iterations; sumTime = sumTime / iterations; cout << "The average time for the CPU to fill all three vectors was: " << avgTime << endl; cout << " ------------------------ " << endl << endl; cout << "The average time it took the CPU to sum the vectors was: " << sumTime << endl; cout << "The full CPU run (summing + freeing) was: " << fullTime << endl; cout << endl; } bool allocCPUMemory(ourVar_t** a, ourVar_t** b, ourVar_t** c, int size, int size_of_var) { bool retVal = true; int memSize = size * size_of_var; *a = (ourVar_t*)malloc(memSize); *b = (ourVar_t*)malloc(memSize); *c = (ourVar_t*)malloc(memSize); if (*a == nullptr || *b == nullptr || *c == nullptr) { retVal = false; } return retVal; } void freeCPUMemory(ourVar_t* a, ourVar_t* b, ourVar_t* c) { if (a != nullptr) { free(a); } if (b != nullptr) { free(b); } if (c != nullptr) { free(c); } } void fillArray(ourVar_t* a, int size_of_array) { ourVar_t* pa = a; while (pa < &a[size_of_array]) { *pa = rand() % max_num; ++pa; } } void fill_C_Array(ourVar_t* a, int size_of_array) { ourVar_t* pa = a; while (pa < &a[size_of_array]) { *pa = 0; ++pa; } } void printArrays(ourVar_t* a, ourVar_t* b, ourVar_t* c, int size_of_array) { for (int i = 0; i < size_of_array; i++) { cout << a[i] << " : " << b[i] << " : " << c[i] << endl; } } cudaError_t GPUTest() { ourVar_t* a = nullptr; ourVar_t* b = nullptr; ourVar_t* c = nullptr; if (!allocCPUMemory(&a, &b, &c, size_of_array)) { throw("Error Allocating Memory"); } #pragma omp parallel for for (int i = 0; i < iterations; i++) { fillArray(a, size_of_array); fillArray(b, size_of_array); fill_C_Array(c, size_of_array); } HighPrecisionTime full; HighPrecisionTime summing; double fullTime = 0.0; double sumTime = 0.0; int *dev_a = 0; int *dev_b = 0; int *dev_c = 0; cudaError_t cudaStatus; cudaDeviceProp prop; cudaGetDeviceProperties(&prop, 0); int maxThreadsPerBlock = prop.maxThreadsPerBlock; int numberOfBlocks = size_of_array / maxThreadsPerBlock + 1; full.TimeSinceLastCall(); // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = cudaSetDevice(0); if (cudaStatus != cudaSuccess) { throw("cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } // Allocate GPU buffers for three vectors (two input, one output) . cudaStatus = cudaMalloc((void**)&dev_c, size_of_array * sizeof(ourVar_t)); if (cudaStatus != cudaSuccess) { throw("cudaMalloc-1 failed!"); goto Error; } cudaStatus = cudaMalloc((void**)&dev_a, size_of_array * sizeof(ourVar_t)); if (cudaStatus != cudaSuccess) { throw("cudaMalloc-2 failed!"); goto Error; } cudaStatus = cudaMalloc((void**)&dev_b, size_of_array * sizeof(ourVar_t)); if (cudaStatus != cudaSuccess) { throw("cudaMalloc-3 failed!"); goto Error; } // Copy input vectors from host memory to GPU buffers. cudaStatus = cudaMemcpy(dev_a, a, size_of_array * sizeof(ourVar_t), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { throw("cudaMemcpy-1 failed!"); goto Error; } cudaStatus = cudaMemcpy(dev_b, b, size_of_array * sizeof(ourVar_t), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { throw("cudaMemcpy-2 failed!"); goto Error; } summing.TimeSinceLastCall(); // Launch a kernel on the GPU with one thread for each element. //addKernel, The grid size, the block size, the vecs to add for (int i = 0; i < iterations; i++) { addKernel <<< numberOfBlocks, maxThreadsPerBlock >>> (dev_c, dev_a, dev_b); sumTime += summing.TimeSinceLastCall(); } // Check for any errors launching the kernel cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus)); goto Error; } // cudaDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = cudaMemcpy(c, dev_c, size_of_array * sizeof(ourVar_t), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { throw("cudaMemcpy-3 failed!"); goto Error; } fullTime = full.TimeSinceLastCall(); sumTime = sumTime / iterations; cout << "The average time it took the GPU to sum the vectors was: " << sumTime << endl; cout << "The full GPU run (allocating + copying + summing) was: " << fullTime << endl; Error: cudaFree(dev_c); cudaFree(dev_a); cudaFree(dev_b); return cudaStatus; }
66a52234caa88b2291c6a51227fd6ec72a97205b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /// /// matmultKernel00.cu /// For CSU CS575 Spring 2011 /// Instructor: Wim Bohm /// Based on code from the CUDA Programming Guide /// Modified by Wim Bohm and David Newman /// Created: 2011-01-27 /// Last Modified: 2011-02-23 DVN /// /// Multiplies two matrices using CUDA: A x B = C /// /// Copy this file and modify the MatMultKernel device function for /// each of your experiments. /// #include "matmultKernel.h" // Define a gpu kernel to perform matrix multiplication // of A x B = C. __global__ void MatMulKernel(Matrix A, Matrix B, Matrix C){ // matrix blocks float *Asub, *Bsub, *Csub; // Putting these into registers speeds access. int thread_row = threadIdx.y; int thread_col = threadIdx.x; int block_row = blockIdx.y; int block_col = blockIdx.x; // Each THREAD BLOCK computes one sub matrix Csub of C // EACH THREAD creates its own matrix descriptor Csub Csub = &C.elements[C.stride * FOOTPRINT_SIZE * block_row + FOOTPRINT_SIZE * block_col]; // Each thread computes one element of Csub in its copy of CValue float Cvalue = 0; // Loop over all sub matrices in block_row of A and block_col of B // required to compute Csub. Block multiply each pair of sub matrices // and accumulate results for (int m = 0; m < (A.width / FOOTPRINT_SIZE); ++m){ // Get Asub and Bsub descriptors Asub = &A.elements[A.stride * FOOTPRINT_SIZE * block_row + FOOTPRINT_SIZE * m]; Bsub = &B.elements[B.stride * FOOTPRINT_SIZE * m + FOOTPRINT_SIZE * block_col]; // Copy ELEMENTS OF ASub and Bsub into shared memory // EACH THREAD loads ONE ELEMENT of ASub and ONE of Bsub // Notice: it does not need to be the element it requires to // compute its Cvalue, as long as all elements are // collaboratively read. // Notice: every thread declares shared_A and shared_B in shared memory // even though a thread block has only one shared_A and one shared_B __shared__ float shared_A[FOOTPRINT_SIZE][FOOTPRINT_SIZE]; __shared__ float shared_B[FOOTPRINT_SIZE][FOOTPRINT_SIZE]; {//do this for 4 different indecies // Each thread copies just 4 element of shared_A and one element of shared_B shared_A[thread_row][thread_col] = Asub[thread_row * A.stride + thread_col]; shared_B[thread_row][thread_col] = Bsub[thread_row * B.stride + thread_col]; } // Synchronize to ensure all elements are read __syncthreads(); // Do an inproduct of one row of shared_A and one col of shared_B // computing one Cvalue by accumulation #pragma unroll for(int e=0; e<BLOCK_SIZE; ++e) Cvalue += shared_A[thread_row][e] * shared_B[e][thread_col]; // Synchronize to ensure all Cvalues have been incremented // before reading in the next shared_A AND shared_B BLOCKS __syncthreads(); } // Write Csub to GLOBAL memory. // Each thread writes its own cell value. Csub[thread_row * C.stride + thread_col] = Cvalue; }
66a52234caa88b2291c6a51227fd6ec72a97205b.cu
/// /// matmultKernel00.cu /// For CSU CS575 Spring 2011 /// Instructor: Wim Bohm /// Based on code from the CUDA Programming Guide /// Modified by Wim Bohm and David Newman /// Created: 2011-01-27 /// Last Modified: 2011-02-23 DVN /// /// Multiplies two matrices using CUDA: A x B = C /// /// Copy this file and modify the MatMultKernel device function for /// each of your experiments. /// #include "matmultKernel.h" // Define a gpu kernel to perform matrix multiplication // of A x B = C. __global__ void MatMulKernel(Matrix A, Matrix B, Matrix C){ // matrix blocks float *Asub, *Bsub, *Csub; // Putting these into registers speeds access. int thread_row = threadIdx.y; int thread_col = threadIdx.x; int block_row = blockIdx.y; int block_col = blockIdx.x; // Each THREAD BLOCK computes one sub matrix Csub of C // EACH THREAD creates its own matrix descriptor Csub Csub = &C.elements[C.stride * FOOTPRINT_SIZE * block_row + FOOTPRINT_SIZE * block_col]; // Each thread computes one element of Csub in its copy of CValue float Cvalue = 0; // Loop over all sub matrices in block_row of A and block_col of B // required to compute Csub. Block multiply each pair of sub matrices // and accumulate results for (int m = 0; m < (A.width / FOOTPRINT_SIZE); ++m){ // Get Asub and Bsub descriptors Asub = &A.elements[A.stride * FOOTPRINT_SIZE * block_row + FOOTPRINT_SIZE * m]; Bsub = &B.elements[B.stride * FOOTPRINT_SIZE * m + FOOTPRINT_SIZE * block_col]; // Copy ELEMENTS OF ASub and Bsub into shared memory // EACH THREAD loads ONE ELEMENT of ASub and ONE of Bsub // Notice: it does not need to be the element it requires to // compute its Cvalue, as long as all elements are // collaboratively read. // Notice: every thread declares shared_A and shared_B in shared memory // even though a thread block has only one shared_A and one shared_B __shared__ float shared_A[FOOTPRINT_SIZE][FOOTPRINT_SIZE]; __shared__ float shared_B[FOOTPRINT_SIZE][FOOTPRINT_SIZE]; {//do this for 4 different indecies // Each thread copies just 4 element of shared_A and one element of shared_B shared_A[thread_row][thread_col] = Asub[thread_row * A.stride + thread_col]; shared_B[thread_row][thread_col] = Bsub[thread_row * B.stride + thread_col]; } // Synchronize to ensure all elements are read __syncthreads(); // Do an inproduct of one row of shared_A and one col of shared_B // computing one Cvalue by accumulation #pragma unroll for(int e=0; e<BLOCK_SIZE; ++e) Cvalue += shared_A[thread_row][e] * shared_B[e][thread_col]; // Synchronize to ensure all Cvalues have been incremented // before reading in the next shared_A AND shared_B BLOCKS __syncthreads(); } // Write Csub to GLOBAL memory. // Each thread writes its own cell value. Csub[thread_row * C.stride + thread_col] = Cvalue; }
0e8dceb2cd3a44ce10d72e50882ad2ab670af495.hip
// !!! This is a file automatically generated by hipify!!! /********* Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of NVIDIA CORPORATION nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *********/ #include <iostream> #include "math.h" #include "stdlib.h" #include "grid_gpu.cuh" #include "Defines.h" #include "hip/hip_runtime.h" #ifdef __HDF5_INPUT #include <vector> #include "H5Cpp.h" #include "vis.h" using namespace H5; std::vector<struct vis> HDF5_to_struct(H5File* file); #endif //With managed memory, grid.cpp must be compiled as CUDA //in which case float2 and double2 are predefined. //typedef struct {float x,y;} float2; //typedef struct {double x,y;} double2; #define single 77 #if PRECISION==single #define PRECISION float #endif #if OUTPRECISION==single #define OUTPRECISION float #endif #ifndef PRECISION #define PRECISION double #endif #define PASTER(x) x ## 2 #define EVALUATOR(x) PASTER(x) #define PRECISION2 EVALUATOR(PRECISION) #ifndef OUTPRECISION #define OUTPRECISION PRECISION #endif #define OUTPRECISION2 EVALUATOR(OUTPRECISION) void init_gcf(PRECISION2 *gcf, size_t size) { for (size_t sub_x=0; sub_x<GCF_GRID; sub_x++ ) for (size_t sub_y=0; sub_y<GCF_GRID; sub_y++ ) for(size_t x=0; x<size; x++) for(size_t y=0; y<size; y++) { //Some nonsense GCF PRECISION tmp = sin(6.28*x/size/GCF_GRID)*exp(-(1.0*x*x+1.0*y*y*sub_y)/size/size/2); gcf[size*size*(sub_x+sub_y*GCF_GRID)+x+y*size].x = tmp*sin(1.0*x*sub_x/(y+1))+0.4; gcf[size*size*(sub_x+sub_y*GCF_GRID)+x+y*size].y = tmp*cos(1.0*x*sub_x/(y+1))-0.2; //std::cout << tmp << gcf[x+y*size].x << gcf[x+y*size].y << std::endl; } } void gridCPU(PRECISION2* out, PRECISION2 *in, PRECISION2 *in_vals, size_t npts, size_t img_dim, PRECISION2 *gcf, size_t gcf_dim) { //degrid on the CPU // out (out) - the output image // in (in) - the input locations // in_vals (in) - input values // npts (in) - number of locations // img_dim (in) - dimension of the image // gcf (in) - the gridding convolution function // gcf_dim (in) - dimension of the GCF //Zero the output for (size_t n=0;n<IMG_SIZE*IMG_SIZE; n++) out[n].x = out[n].y = 0.0; //offset gcf to point to the middle for cleaner code later gcf += GCF_DIM*(GCF_DIM-1)/2-1; double* out1 = (double*)out; double* out2 = (double*)(out+POLARIZATIONS*img_dim*img_dim/2); //#pragma acc parallel loop copyout(out[0:NPOINTS]) copyin(in[0:NPOINTS],gcf[0:GCF_GRID*GCF_GRID*GCF_DIM*GCF_DIM],img[IMG_SIZE*IMG_SIZE]) gang //#pragma omp parallel for for(size_t n=0; n<npts; n++) { //std::cout << "in = " << in[n].x << ", " << in[n].y << std::endl; int sub_x = floorf(GCF_GRID*(in[n].x-floorf(in[n].x))); int sub_y = floorf(GCF_GRID*(in[n].y-floorf(in[n].y))); //std::cout << "sub = " << sub_x << ", " << sub_y << std::endl; int main_x = floor(in[n].x); int main_y = floor(in[n].y); //std::cout << "main = " << main_x << ", " << main_y << std::endl; // #pragma acc parallel loop collapse(2) reduction(+:sum_r,sum_i) vector //#pragma omp parallel for collapse(2) reduction(+:sum_r, sum_i) for (int a=GCF_DIM/2; a>-GCF_DIM/2 ;a--) for (int b=GCF_DIM/2; b>-GCF_DIM/2 ;b--) { PRECISION r2 = gcf[GCF_DIM*GCF_DIM*(GCF_GRID*sub_y+sub_x) + GCF_DIM*b+a].x; PRECISION i2 = gcf[GCF_DIM*GCF_DIM*(GCF_GRID*sub_y+sub_x) + GCF_DIM*b+a].y; PRECISION r1, i1; r1 = in_vals[n].x; i1 = in_vals[n].y; if (main_x+a < 0 || main_y+b < 0 || main_x+a >= IMG_SIZE || main_y+b >= IMG_SIZE) { } else { #ifdef DEBUG1 out1[main_x+a+IMG_SIZE*(main_y+b)] += 1; out2[main_x+a+IMG_SIZE*(main_y+b)] += n; #else out1[main_x+a+IMG_SIZE*(main_y+b)] += r1*r2-i1*i2; out2[main_x+a+IMG_SIZE*(main_y+b)] += r1*i2+r2*i1; #endif } } //std::cout << "val = " << out[n].r << "+ i" << out[n].i << std::endl; } gcf -= GCF_DIM*(GCF_DIM-1)/2-1; } void gridCPU_pz(OUTPRECISION2* out, PRECISION2 *in, PRECISION2 *in_vals, size_t npts, size_t img_dim, PRECISION2 *gcf, size_t gcf_dim) { //degrid on the CPU // out (out) - the output image // in (in) - the input locations // in_vals (in) - input values // npts (in) - number of locations // img_dim (in) - dimension of the image // gcf (in) - the gridding convolution function // gcf_dim (in) - dimension of the GCF //Zero the output //offset gcf to point to the middle for cleaner code later gcf += GCF_DIM*(GCF_DIM-1)/2-1; double* out1 = (double*)out; double* out2 = ((double*)out)+POLARIZATIONS*img_dim*img_dim; //#pragma acc parallel loop copyout(out[0:NPOINTS]) copyin(in[0:NPOINTS],gcf[0:GCF_GRID*GCF_GRID*GCF_DIM*GCF_DIM],img[IMG_SIZE*IMG_SIZE]) gang //#pragma omp parallel for for(size_t n=0; n<npts; n++) { //std::cout << "in = " << in[n].x << ", " << in[n].y << std::endl; int sub_x = floorf(GCF_GRID*(in[n].x-floorf(in[n].x))); int sub_y = floorf(GCF_GRID*(in[n].y-floorf(in[n].y))); //std::cout << "sub = " << sub_x << ", " << sub_y << std::endl; int main_x = floor(in[n].x); int main_y = floor(in[n].y); //std::cout << "main = " << main_x << ", " << main_y << std::endl; // #pragma acc parallel loop collapse(2) reduction(+:sum_r,sum_i) vector //#pragma omp parallel for collapse(2) reduction(+:sum_r, sum_i) for (int a=GCF_DIM/2; a>-GCF_DIM/2 ;a--) for (int b=GCF_DIM/2; b>-GCF_DIM/2 ;b--) { PRECISION r2 = gcf[GCF_DIM*GCF_DIM*(GCF_GRID*sub_y+sub_x) + GCF_DIM*b+a].x; PRECISION i2 = gcf[GCF_DIM*GCF_DIM*(GCF_GRID*sub_y+sub_x) + GCF_DIM*b+a].y; PRECISION r1, i1; if (main_x+a < 0 || main_y+b < 0 || main_x+a >= IMG_SIZE || main_y+b >= IMG_SIZE) { } else { for (int p=0;p< POLARIZATIONS;p++) { r1 = in_vals[POLARIZATIONS*n+p].x; i1 = in_vals[POLARIZATIONS*n+p].y; #ifdef DEBUG1 out1[main_x+a+IMG_SIZE*(main_y+b)+p*IMG_SIZE*IMG_SIZE] += 1; out2[main_x+a+IMG_SIZE*(main_y+b)+p*IMG_SIZE*IMG_SIZE] += n; #else out1[main_x+a+IMG_SIZE*(main_y+b)+p*IMG_SIZE*IMG_SIZE] += r1*r2-i1*i2; out2[main_x+a+IMG_SIZE*(main_y+b)+p*IMG_SIZE*IMG_SIZE] += r1*i2+r2*i1; #endif } } } //std::cout << "val = " << out[n].r << "+ i" << out[n].i << std::endl; } gcf -= GCF_DIM*(GCF_DIM-1)/2-1; } template <class T,class Thalf> int w_comp_main(const void* A, const void* B) { Thalf quota, rema, quotb, remb; rema = modf((*((T*)A)).x, &quota); remb = modf((*((T*)B)).x, &quotb); if (quota > quotb) return 1; if (quota < quotb) return -1; else { rema = modf((*((T*)A)).y, &quota); remb = modf((*((T*)B)).y, &quotb); if (quota > quotb) return 1; if (quota < quotb) return -1; else return 0; } return 0; } template <class T,class Thalf> int w_comp_sub(const void* A, const void* B) { Thalf quota, rema, quotb, remb; rema = modf((*((T*)A)).x, &quota); remb = modf((*((T*)B)).x, &quotb); int sub_xa = (int) (GCF_GRID*rema); int sub_xb = (int) (GCF_GRID*remb); rema = modf((*((T*)A)).y, &quota); remb = modf((*((T*)B)).y, &quotb); int suba = (int) (GCF_GRID*rema) + GCF_GRID*sub_xa; int subb = (int) (GCF_GRID*remb) + GCF_GRID*sub_xb; if (suba > subb) return 1; if (suba < subb) return -1; return 0; } template <class T,class Thalf> int w_comp_full(const void* A, const void* B) { int result = w_comp_sub<T,Thalf>(A,B); if (0==result) return w_comp_main<T,Thalf>(A,B); else return result; } #if 0 struct comp_grid { int blockgrid, blocksize; public: comp_grid(int img_dim, int gcf_dim) { blocksize = gcf_dim/2; blockgrid = img_dim/blocksize; } int __cdecl operator () (const void* A, const void* B) const { int gridxa = (*(int2*)A).x/GCF_GRID; int gridxb = (*(int2*)B).x/GCF_GRID; int gridya = (*(int2*)A).y/GCF_GRID; int gridyb = (*(int2*)B).y/GCF_GRID; if (gridya > gridyb) return 1; if (gridya < gridyb) return -1; if (gridxa > gridxb) return 1; if (gridxa < gridxb) return -1; int suba = GCF_GRID*((*(int2*)A).x%GCF_GRID) + (*(int2*)A).y%GCF_GRID; int subb = GCF_GRID*((*(int2*)B).x%GCF_GRID) + (*(int2*)B).y%GCF_GRID; if (suba > subb) return 1; if (suba < subb) return -1; return 0; } }; #else template <class T, class Thalf> int comp_grid (const void* A, const void* B) { int blocksize = GCF_DIM/2; int mainxa = floorf((*(T*)A).x); int mainxb = floorf((*(T*)B).x); int mainya = floorf((*(T*)A).y); int mainyb = floorf((*(T*)B).y); int gridxa = mainxa/blocksize; int gridxb = mainxb/blocksize; int gridya = mainya/blocksize; int gridyb = mainyb/blocksize; if (gridya*(IMG_SIZE+blocksize-1)/blocksize+gridxa > gridyb*(IMG_SIZE+blocksize-1)/blocksize+gridxb) return 1; if (gridya*(IMG_SIZE+blocksize-1)/blocksize+gridxa < gridyb*(IMG_SIZE+blocksize-1)/blocksize+gridxb) return -1; Thalf suba = GCF_GRID*((*(T*)A).x-mainxa) + (*(T*)A).y-mainya; Thalf subb = GCF_GRID*((*(T*)B).x-mainxb) + (*(T*)B).y-mainyb; if (suba > subb) return 1; if (suba < subb) return -1; return 0; } #endif int main(int argc, char** argv) { #ifdef __MANAGED OUTPRECISION2 *out; PRECISION2 *in, *in_vals, *gcf; hipMallocManaged(&out, sizeof(OUTPRECISION2)*(IMG_SIZE*IMG_SIZE+2*IMG_SIZE*GCF_DIM+2*GCF_DIM)*POLARIZATIONS); hipMallocManaged(&in, sizeof(PRECISION2)*NPOINTS); hipMallocManaged(&in_vals, sizeof(PRECISION2)*NPOINTS*POLARIZATIONS); hipMallocManaged(&gcf, sizeof(PRECISION2)*64*GCF_DIM*GCF_DIM); #else OUTPRECISION2* out = (OUTPRECISION2*) malloc(sizeof(OUTPRECISION2)*(IMG_SIZE*IMG_SIZE+2*IMG_SIZE*GCF_DIM+2*GCF_DIM)*POLARIZATIONS); PRECISION2* in = (PRECISION2*) malloc(sizeof(PRECISION2)*NPOINTS); PRECISION2* in_vals = (PRECISION2*) malloc(sizeof(PRECISION2)*NPOINTS*POLARIZATIONS); PRECISION2 *gcf = (PRECISION2*) malloc(64*GCF_DIM*GCF_DIM*sizeof(PRECISION2)); #endif int npts=NPOINTS; // *** Report run parameters *** printf("*** GPU Gridding ***\n"); #ifdef DEBUG1 printf("\n Debug\n\n"); #endif #ifdef __GATHER printf(" Gather strategy\n"); #else #ifdef __MOVING_WINDOW printf(" Moving Window strategy\n"); #else printf(" Simple scatter strategy\n"); #endif #ifdef __NOATOMIC printf(" No atomics\n"); #endif #endif #if PRECISION==double printf(" Double precision\n"); #else printf(" Single precision\n"); #endif printf(" Image size %dx%d\n", IMG_SIZE, IMG_SIZE); printf(" GCF size %dx%d\n", GCF_DIM, GCF_DIM); printf(" %d polarizations\n", POLARIZATIONS); printf(" %d visibilities\n", npts); printf(" Subgrid: 1/%d\n", GCF_GRID); #ifdef __FILE_INPUT printf(" File input\n"); #endif #ifdef __HDF5_INPUT printf(" HDF5 input\n"); #endif #ifdef __COMPUTE_GCF printf(" Computed GCF\n"); #endif printf("\n\n\n"); init_gcf(gcf, GCF_DIM); #ifdef __FILE_INPUT char filename[400]; if (argc>1) { filename = argv[1]; } else { sprintf(filename, "%s", "UVW_in.dat"); } FILE *uvw_f = fopen("UVW_in.dat", "r"); int junka,junkb,junkc; float fjunka, fjunkb, fjunkc; float max_x, min_x, max_y, min_y; max_x = max_y = INT_MIN; min_x = min_y = INT_MAX; for(size_t n=0; n<npts; n++) { fscanf(uvw_f, "%d,%d,%d: %f, %f, %f\n", &junka, &junkb, &junkc, &fjunka, &fjunkb, &fjunkc); in[n].x = fjunka*IMG_SIZE/2048.; in[n].y = fjunkb*IMG_SIZE/2048.; min_x = in[n].x < min_x ? in[n].x : min_x; max_x = in[n].x > max_x ? in[n].x : max_x; min_y = in[n].y < min_y ? in[n].y : min_y; max_y = in[n].y > max_y ? in[n].y : max_y; for (int p=0;p<POLARIZATIONS;p++) { in_vals[POLARIZATIONS*n+p].x = ((float)rand())/RAND_MAX; in_vals[POLARIZATIONS*n+p].y = ((float)rand())/RAND_MAX; } } printf("%f -- %f, %f -- %f\n", min_x, max_x, min_y, max_y); fclose(uvw_f); #else #ifdef __HDF5_INPUT //char* filename[]="vis.hdf5"; #if 0 if (argc>1) { sprintf(filename, "%s", argv[1]); } else { sprintf(filename, "%s", "vis.h5"); } #endif H5File* file; if (argc>1) file = new H5File(H5std_string(argv[1]), H5F_ACC_RDONLY); else file = new H5File(H5std_string("vis.h5"), H5F_ACC_RDONLY); try { //For scoping visarray std::vector<struct vis> visarray = HDF5_to_struct(file); file->close(); size_t total_sz = visarray.size(); free(in); in = (PRECISION2*) malloc(sizeof(PRECISION2)*total_sz); free(in_vals); in_vals = (PRECISION2*) malloc(sizeof(PRECISION2)*total_sz*POLARIZATIONS); float inminx = INT_MAX; float inminy = INT_MAX; float inmaxx = -INT_MAX; float inmaxy = -INT_MAX; for (int q=0;q<total_sz;q++) { in[q].x = visarray[q].u; in[q].y = visarray[q].v; for (int p=0;p<POLARIZATIONS;p++) { in_vals[POLARIZATIONS*q+p].x = visarray[q].r; in_vals[POLARIZATIONS*q+p].y = visarray[q].i; } inminx = inminx < in[q].x ? inminx : in[q].x; inminy = inminy < in[q].y ? inminy : in[q].y; inmaxx = inmaxx > in[q].x ? inmaxx : in[q].x; inmaxy = inmaxy > in[q].y ? inmaxy : in[q].y; } printf("Image limits: (%f, %f) -- (%f, %f)\n", inminx, inminy, inmaxx, inmaxy); npts = total_sz; printf(" %d visibilities\n", npts); } catch( FileIException error ) { error.printError(); return -1; } // catch failure caused by the DataSet operations catch( DataSetIException error ) { error.printError(); return -1; } // catch failure caused by the DataSpace operations catch( DataSpaceIException error ) { error.printError(); return -1; } // catch failure caused by the DataSpace operations catch( DataTypeIException error ) { error.printError(); } #else srand(1541617); for(size_t n=0; n<npts; n++) { in[n].x = ((float)rand())/RAND_MAX*IMG_SIZE; in[n].y = ((float)rand())/RAND_MAX*IMG_SIZE; for (int p=0;p<POLARIZATIONS;p++) { in_vals[POLARIZATIONS*n+p].x = ((float)rand())/RAND_MAX; in_vals[POLARIZATIONS*n+p].y = ((float)rand())/RAND_MAX; } } #endif //HDF5_INPUT #endif //Zero the data in the offset areas //for (int x=-IMG_SIZE*GCF_DIM-GCF_DIM;x<0;x++) { // out[x].x = 0.0; out[x].y = 0.0; // } for (int x=0;x<IMG_SIZE*GCF_DIM*POLARIZATIONS+GCF_DIM*POLARIZATIONS;x++) { out[x].x=0.0; out[x].y=0.0; out[x+(IMG_SIZE*IMG_SIZE+IMG_SIZE*GCF_DIM+GCF_DIM)*POLARIZATIONS].x = 0.0; out[x+(IMG_SIZE*IMG_SIZE+IMG_SIZE*GCF_DIM+GCF_DIM)*POLARIZATIONS].y = 0.0; } #ifdef __GATHER std::qsort(in, npts, sizeof(PRECISION2), comp_grid<PRECISION2,PRECISION>); #else #ifdef __MOVING_WINDOW std::qsort(in, npts, sizeof(PRECISION2), w_comp_main<PRECISION2,PRECISION>); #else std::qsort(in, npts, sizeof(PRECISION2), w_comp_sub<PRECISION2,PRECISION>); #endif #endif //auto tmp = in[0]; //in[0] = in[204]; //in[204]=tmp; std::cout << "Computing on GPU..." << std::endl; gridGPU(out,in,in_vals,npts,IMG_SIZE,gcf,GCF_DIM); #ifdef __CPU_CHECK std::cout << "Computing on CPU..." << std::endl; OUTPRECISION2 *out_cpu=(OUTPRECISION2*)malloc(sizeof(OUTPRECISION2)*(IMG_SIZE*IMG_SIZE+2*IMG_SIZE*GCF_DIM+2*GCF_DIM)*POLARIZATIONS); memset(out_cpu, 0, sizeof(OUTPRECISION2)*(IMG_SIZE*IMG_SIZE+2*IMG_SIZE*GCF_DIM+2*GCF_DIM)*POLARIZATIONS); gridCPU_pz(out_cpu+IMG_SIZE*GCF_DIM+GCF_DIM,in,in_vals,npts,IMG_SIZE,gcf,GCF_DIM); //gridCPU(out+IMG_SIZE*GCF_DIM+GCF_DIM,in,in_vals,npts,IMG_SIZE,gcf,GCF_DIM); #endif #ifdef __CPU_CHECK std::cout << "Checking results against CPU:" << std::endl; std::cout.precision(11); for (size_t yy = 0; yy < IMG_SIZE; yy++) { for (size_t xx = 0; xx < IMG_SIZE; xx++) { int n = GCF_DIM+IMG_SIZE*GCF_DIM+yy*IMG_SIZE+xx; for (int p = 0; p < IMG_SIZE*IMG_SIZE*POLARIZATIONS; p+=IMG_SIZE*IMG_SIZE) { if (fabs(out[n+p].x-out_cpu[n+p].x) > 0.000001 || fabs(out[n+p].y-out_cpu[n+p].y) > 0.000001 ) std::cout << xx << ", " << yy << "[" << p/IMG_SIZE/IMG_SIZE << "] : " << "(" << n+p-(GCF_DIM+IMG_SIZE*GCF_DIM) << ") " << out[n+p].x << ", " << out[n+p].y << " vs. " << out_cpu[n+p].x << ", " << out_cpu[n+p].y << std::endl; } } } //std::cout << "free out_cpu" << std::endl; //free(out_cpu);out_cpu=NULL; #endif #ifdef __MANAGED hipFree(out);out=NULL; hipFree(in);in=NULL; hipFree(in_vals);in_vals=NULL; hipFree(gcf);gcf=NULL; #else free(out);out=NULL; free(in);in=NULL; free(in_vals);in_vals=NULL; free(gcf);gcf=NULL; #endif }
0e8dceb2cd3a44ce10d72e50882ad2ab670af495.cu
/********* Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of NVIDIA CORPORATION nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *********/ #include <iostream> #include "math.h" #include "stdlib.h" #include "grid_gpu.cuh" #include "Defines.h" #include "cuda.h" #ifdef __HDF5_INPUT #include <vector> #include "H5Cpp.h" #include "vis.h" using namespace H5; std::vector<struct vis> HDF5_to_struct(H5File* file); #endif //With managed memory, grid.cpp must be compiled as CUDA //in which case float2 and double2 are predefined. //typedef struct {float x,y;} float2; //typedef struct {double x,y;} double2; #define single 77 #if PRECISION==single #define PRECISION float #endif #if OUTPRECISION==single #define OUTPRECISION float #endif #ifndef PRECISION #define PRECISION double #endif #define PASTER(x) x ## 2 #define EVALUATOR(x) PASTER(x) #define PRECISION2 EVALUATOR(PRECISION) #ifndef OUTPRECISION #define OUTPRECISION PRECISION #endif #define OUTPRECISION2 EVALUATOR(OUTPRECISION) void init_gcf(PRECISION2 *gcf, size_t size) { for (size_t sub_x=0; sub_x<GCF_GRID; sub_x++ ) for (size_t sub_y=0; sub_y<GCF_GRID; sub_y++ ) for(size_t x=0; x<size; x++) for(size_t y=0; y<size; y++) { //Some nonsense GCF PRECISION tmp = sin(6.28*x/size/GCF_GRID)*exp(-(1.0*x*x+1.0*y*y*sub_y)/size/size/2); gcf[size*size*(sub_x+sub_y*GCF_GRID)+x+y*size].x = tmp*sin(1.0*x*sub_x/(y+1))+0.4; gcf[size*size*(sub_x+sub_y*GCF_GRID)+x+y*size].y = tmp*cos(1.0*x*sub_x/(y+1))-0.2; //std::cout << tmp << gcf[x+y*size].x << gcf[x+y*size].y << std::endl; } } void gridCPU(PRECISION2* out, PRECISION2 *in, PRECISION2 *in_vals, size_t npts, size_t img_dim, PRECISION2 *gcf, size_t gcf_dim) { //degrid on the CPU // out (out) - the output image // in (in) - the input locations // in_vals (in) - input values // npts (in) - number of locations // img_dim (in) - dimension of the image // gcf (in) - the gridding convolution function // gcf_dim (in) - dimension of the GCF //Zero the output for (size_t n=0;n<IMG_SIZE*IMG_SIZE; n++) out[n].x = out[n].y = 0.0; //offset gcf to point to the middle for cleaner code later gcf += GCF_DIM*(GCF_DIM-1)/2-1; double* out1 = (double*)out; double* out2 = (double*)(out+POLARIZATIONS*img_dim*img_dim/2); //#pragma acc parallel loop copyout(out[0:NPOINTS]) copyin(in[0:NPOINTS],gcf[0:GCF_GRID*GCF_GRID*GCF_DIM*GCF_DIM],img[IMG_SIZE*IMG_SIZE]) gang //#pragma omp parallel for for(size_t n=0; n<npts; n++) { //std::cout << "in = " << in[n].x << ", " << in[n].y << std::endl; int sub_x = floorf(GCF_GRID*(in[n].x-floorf(in[n].x))); int sub_y = floorf(GCF_GRID*(in[n].y-floorf(in[n].y))); //std::cout << "sub = " << sub_x << ", " << sub_y << std::endl; int main_x = floor(in[n].x); int main_y = floor(in[n].y); //std::cout << "main = " << main_x << ", " << main_y << std::endl; // #pragma acc parallel loop collapse(2) reduction(+:sum_r,sum_i) vector //#pragma omp parallel for collapse(2) reduction(+:sum_r, sum_i) for (int a=GCF_DIM/2; a>-GCF_DIM/2 ;a--) for (int b=GCF_DIM/2; b>-GCF_DIM/2 ;b--) { PRECISION r2 = gcf[GCF_DIM*GCF_DIM*(GCF_GRID*sub_y+sub_x) + GCF_DIM*b+a].x; PRECISION i2 = gcf[GCF_DIM*GCF_DIM*(GCF_GRID*sub_y+sub_x) + GCF_DIM*b+a].y; PRECISION r1, i1; r1 = in_vals[n].x; i1 = in_vals[n].y; if (main_x+a < 0 || main_y+b < 0 || main_x+a >= IMG_SIZE || main_y+b >= IMG_SIZE) { } else { #ifdef DEBUG1 out1[main_x+a+IMG_SIZE*(main_y+b)] += 1; out2[main_x+a+IMG_SIZE*(main_y+b)] += n; #else out1[main_x+a+IMG_SIZE*(main_y+b)] += r1*r2-i1*i2; out2[main_x+a+IMG_SIZE*(main_y+b)] += r1*i2+r2*i1; #endif } } //std::cout << "val = " << out[n].r << "+ i" << out[n].i << std::endl; } gcf -= GCF_DIM*(GCF_DIM-1)/2-1; } void gridCPU_pz(OUTPRECISION2* out, PRECISION2 *in, PRECISION2 *in_vals, size_t npts, size_t img_dim, PRECISION2 *gcf, size_t gcf_dim) { //degrid on the CPU // out (out) - the output image // in (in) - the input locations // in_vals (in) - input values // npts (in) - number of locations // img_dim (in) - dimension of the image // gcf (in) - the gridding convolution function // gcf_dim (in) - dimension of the GCF //Zero the output //offset gcf to point to the middle for cleaner code later gcf += GCF_DIM*(GCF_DIM-1)/2-1; double* out1 = (double*)out; double* out2 = ((double*)out)+POLARIZATIONS*img_dim*img_dim; //#pragma acc parallel loop copyout(out[0:NPOINTS]) copyin(in[0:NPOINTS],gcf[0:GCF_GRID*GCF_GRID*GCF_DIM*GCF_DIM],img[IMG_SIZE*IMG_SIZE]) gang //#pragma omp parallel for for(size_t n=0; n<npts; n++) { //std::cout << "in = " << in[n].x << ", " << in[n].y << std::endl; int sub_x = floorf(GCF_GRID*(in[n].x-floorf(in[n].x))); int sub_y = floorf(GCF_GRID*(in[n].y-floorf(in[n].y))); //std::cout << "sub = " << sub_x << ", " << sub_y << std::endl; int main_x = floor(in[n].x); int main_y = floor(in[n].y); //std::cout << "main = " << main_x << ", " << main_y << std::endl; // #pragma acc parallel loop collapse(2) reduction(+:sum_r,sum_i) vector //#pragma omp parallel for collapse(2) reduction(+:sum_r, sum_i) for (int a=GCF_DIM/2; a>-GCF_DIM/2 ;a--) for (int b=GCF_DIM/2; b>-GCF_DIM/2 ;b--) { PRECISION r2 = gcf[GCF_DIM*GCF_DIM*(GCF_GRID*sub_y+sub_x) + GCF_DIM*b+a].x; PRECISION i2 = gcf[GCF_DIM*GCF_DIM*(GCF_GRID*sub_y+sub_x) + GCF_DIM*b+a].y; PRECISION r1, i1; if (main_x+a < 0 || main_y+b < 0 || main_x+a >= IMG_SIZE || main_y+b >= IMG_SIZE) { } else { for (int p=0;p< POLARIZATIONS;p++) { r1 = in_vals[POLARIZATIONS*n+p].x; i1 = in_vals[POLARIZATIONS*n+p].y; #ifdef DEBUG1 out1[main_x+a+IMG_SIZE*(main_y+b)+p*IMG_SIZE*IMG_SIZE] += 1; out2[main_x+a+IMG_SIZE*(main_y+b)+p*IMG_SIZE*IMG_SIZE] += n; #else out1[main_x+a+IMG_SIZE*(main_y+b)+p*IMG_SIZE*IMG_SIZE] += r1*r2-i1*i2; out2[main_x+a+IMG_SIZE*(main_y+b)+p*IMG_SIZE*IMG_SIZE] += r1*i2+r2*i1; #endif } } } //std::cout << "val = " << out[n].r << "+ i" << out[n].i << std::endl; } gcf -= GCF_DIM*(GCF_DIM-1)/2-1; } template <class T,class Thalf> int w_comp_main(const void* A, const void* B) { Thalf quota, rema, quotb, remb; rema = modf((*((T*)A)).x, &quota); remb = modf((*((T*)B)).x, &quotb); if (quota > quotb) return 1; if (quota < quotb) return -1; else { rema = modf((*((T*)A)).y, &quota); remb = modf((*((T*)B)).y, &quotb); if (quota > quotb) return 1; if (quota < quotb) return -1; else return 0; } return 0; } template <class T,class Thalf> int w_comp_sub(const void* A, const void* B) { Thalf quota, rema, quotb, remb; rema = modf((*((T*)A)).x, &quota); remb = modf((*((T*)B)).x, &quotb); int sub_xa = (int) (GCF_GRID*rema); int sub_xb = (int) (GCF_GRID*remb); rema = modf((*((T*)A)).y, &quota); remb = modf((*((T*)B)).y, &quotb); int suba = (int) (GCF_GRID*rema) + GCF_GRID*sub_xa; int subb = (int) (GCF_GRID*remb) + GCF_GRID*sub_xb; if (suba > subb) return 1; if (suba < subb) return -1; return 0; } template <class T,class Thalf> int w_comp_full(const void* A, const void* B) { int result = w_comp_sub<T,Thalf>(A,B); if (0==result) return w_comp_main<T,Thalf>(A,B); else return result; } #if 0 struct comp_grid { int blockgrid, blocksize; public: comp_grid(int img_dim, int gcf_dim) { blocksize = gcf_dim/2; blockgrid = img_dim/blocksize; } int __cdecl operator () (const void* A, const void* B) const { int gridxa = (*(int2*)A).x/GCF_GRID; int gridxb = (*(int2*)B).x/GCF_GRID; int gridya = (*(int2*)A).y/GCF_GRID; int gridyb = (*(int2*)B).y/GCF_GRID; if (gridya > gridyb) return 1; if (gridya < gridyb) return -1; if (gridxa > gridxb) return 1; if (gridxa < gridxb) return -1; int suba = GCF_GRID*((*(int2*)A).x%GCF_GRID) + (*(int2*)A).y%GCF_GRID; int subb = GCF_GRID*((*(int2*)B).x%GCF_GRID) + (*(int2*)B).y%GCF_GRID; if (suba > subb) return 1; if (suba < subb) return -1; return 0; } }; #else template <class T, class Thalf> int comp_grid (const void* A, const void* B) { int blocksize = GCF_DIM/2; int mainxa = floorf((*(T*)A).x); int mainxb = floorf((*(T*)B).x); int mainya = floorf((*(T*)A).y); int mainyb = floorf((*(T*)B).y); int gridxa = mainxa/blocksize; int gridxb = mainxb/blocksize; int gridya = mainya/blocksize; int gridyb = mainyb/blocksize; if (gridya*(IMG_SIZE+blocksize-1)/blocksize+gridxa > gridyb*(IMG_SIZE+blocksize-1)/blocksize+gridxb) return 1; if (gridya*(IMG_SIZE+blocksize-1)/blocksize+gridxa < gridyb*(IMG_SIZE+blocksize-1)/blocksize+gridxb) return -1; Thalf suba = GCF_GRID*((*(T*)A).x-mainxa) + (*(T*)A).y-mainya; Thalf subb = GCF_GRID*((*(T*)B).x-mainxb) + (*(T*)B).y-mainyb; if (suba > subb) return 1; if (suba < subb) return -1; return 0; } #endif int main(int argc, char** argv) { #ifdef __MANAGED OUTPRECISION2 *out; PRECISION2 *in, *in_vals, *gcf; cudaMallocManaged(&out, sizeof(OUTPRECISION2)*(IMG_SIZE*IMG_SIZE+2*IMG_SIZE*GCF_DIM+2*GCF_DIM)*POLARIZATIONS); cudaMallocManaged(&in, sizeof(PRECISION2)*NPOINTS); cudaMallocManaged(&in_vals, sizeof(PRECISION2)*NPOINTS*POLARIZATIONS); cudaMallocManaged(&gcf, sizeof(PRECISION2)*64*GCF_DIM*GCF_DIM); #else OUTPRECISION2* out = (OUTPRECISION2*) malloc(sizeof(OUTPRECISION2)*(IMG_SIZE*IMG_SIZE+2*IMG_SIZE*GCF_DIM+2*GCF_DIM)*POLARIZATIONS); PRECISION2* in = (PRECISION2*) malloc(sizeof(PRECISION2)*NPOINTS); PRECISION2* in_vals = (PRECISION2*) malloc(sizeof(PRECISION2)*NPOINTS*POLARIZATIONS); PRECISION2 *gcf = (PRECISION2*) malloc(64*GCF_DIM*GCF_DIM*sizeof(PRECISION2)); #endif int npts=NPOINTS; // *** Report run parameters *** printf("*** GPU Gridding ***\n"); #ifdef DEBUG1 printf("\n Debug\n\n"); #endif #ifdef __GATHER printf(" Gather strategy\n"); #else #ifdef __MOVING_WINDOW printf(" Moving Window strategy\n"); #else printf(" Simple scatter strategy\n"); #endif #ifdef __NOATOMIC printf(" No atomics\n"); #endif #endif #if PRECISION==double printf(" Double precision\n"); #else printf(" Single precision\n"); #endif printf(" Image size %dx%d\n", IMG_SIZE, IMG_SIZE); printf(" GCF size %dx%d\n", GCF_DIM, GCF_DIM); printf(" %d polarizations\n", POLARIZATIONS); printf(" %d visibilities\n", npts); printf(" Subgrid: 1/%d\n", GCF_GRID); #ifdef __FILE_INPUT printf(" File input\n"); #endif #ifdef __HDF5_INPUT printf(" HDF5 input\n"); #endif #ifdef __COMPUTE_GCF printf(" Computed GCF\n"); #endif printf("\n\n\n"); init_gcf(gcf, GCF_DIM); #ifdef __FILE_INPUT char filename[400]; if (argc>1) { filename = argv[1]; } else { sprintf(filename, "%s", "UVW_in.dat"); } FILE *uvw_f = fopen("UVW_in.dat", "r"); int junka,junkb,junkc; float fjunka, fjunkb, fjunkc; float max_x, min_x, max_y, min_y; max_x = max_y = INT_MIN; min_x = min_y = INT_MAX; for(size_t n=0; n<npts; n++) { fscanf(uvw_f, "%d,%d,%d: %f, %f, %f\n", &junka, &junkb, &junkc, &fjunka, &fjunkb, &fjunkc); in[n].x = fjunka*IMG_SIZE/2048.; in[n].y = fjunkb*IMG_SIZE/2048.; min_x = in[n].x < min_x ? in[n].x : min_x; max_x = in[n].x > max_x ? in[n].x : max_x; min_y = in[n].y < min_y ? in[n].y : min_y; max_y = in[n].y > max_y ? in[n].y : max_y; for (int p=0;p<POLARIZATIONS;p++) { in_vals[POLARIZATIONS*n+p].x = ((float)rand())/RAND_MAX; in_vals[POLARIZATIONS*n+p].y = ((float)rand())/RAND_MAX; } } printf("%f -- %f, %f -- %f\n", min_x, max_x, min_y, max_y); fclose(uvw_f); #else #ifdef __HDF5_INPUT //char* filename[]="vis.hdf5"; #if 0 if (argc>1) { sprintf(filename, "%s", argv[1]); } else { sprintf(filename, "%s", "vis.h5"); } #endif H5File* file; if (argc>1) file = new H5File(H5std_string(argv[1]), H5F_ACC_RDONLY); else file = new H5File(H5std_string("vis.h5"), H5F_ACC_RDONLY); try { //For scoping visarray std::vector<struct vis> visarray = HDF5_to_struct(file); file->close(); size_t total_sz = visarray.size(); free(in); in = (PRECISION2*) malloc(sizeof(PRECISION2)*total_sz); free(in_vals); in_vals = (PRECISION2*) malloc(sizeof(PRECISION2)*total_sz*POLARIZATIONS); float inminx = INT_MAX; float inminy = INT_MAX; float inmaxx = -INT_MAX; float inmaxy = -INT_MAX; for (int q=0;q<total_sz;q++) { in[q].x = visarray[q].u; in[q].y = visarray[q].v; for (int p=0;p<POLARIZATIONS;p++) { in_vals[POLARIZATIONS*q+p].x = visarray[q].r; in_vals[POLARIZATIONS*q+p].y = visarray[q].i; } inminx = inminx < in[q].x ? inminx : in[q].x; inminy = inminy < in[q].y ? inminy : in[q].y; inmaxx = inmaxx > in[q].x ? inmaxx : in[q].x; inmaxy = inmaxy > in[q].y ? inmaxy : in[q].y; } printf("Image limits: (%f, %f) -- (%f, %f)\n", inminx, inminy, inmaxx, inmaxy); npts = total_sz; printf(" %d visibilities\n", npts); } catch( FileIException error ) { error.printError(); return -1; } // catch failure caused by the DataSet operations catch( DataSetIException error ) { error.printError(); return -1; } // catch failure caused by the DataSpace operations catch( DataSpaceIException error ) { error.printError(); return -1; } // catch failure caused by the DataSpace operations catch( DataTypeIException error ) { error.printError(); } #else srand(1541617); for(size_t n=0; n<npts; n++) { in[n].x = ((float)rand())/RAND_MAX*IMG_SIZE; in[n].y = ((float)rand())/RAND_MAX*IMG_SIZE; for (int p=0;p<POLARIZATIONS;p++) { in_vals[POLARIZATIONS*n+p].x = ((float)rand())/RAND_MAX; in_vals[POLARIZATIONS*n+p].y = ((float)rand())/RAND_MAX; } } #endif //HDF5_INPUT #endif //Zero the data in the offset areas //for (int x=-IMG_SIZE*GCF_DIM-GCF_DIM;x<0;x++) { // out[x].x = 0.0; out[x].y = 0.0; // } for (int x=0;x<IMG_SIZE*GCF_DIM*POLARIZATIONS+GCF_DIM*POLARIZATIONS;x++) { out[x].x=0.0; out[x].y=0.0; out[x+(IMG_SIZE*IMG_SIZE+IMG_SIZE*GCF_DIM+GCF_DIM)*POLARIZATIONS].x = 0.0; out[x+(IMG_SIZE*IMG_SIZE+IMG_SIZE*GCF_DIM+GCF_DIM)*POLARIZATIONS].y = 0.0; } #ifdef __GATHER std::qsort(in, npts, sizeof(PRECISION2), comp_grid<PRECISION2,PRECISION>); #else #ifdef __MOVING_WINDOW std::qsort(in, npts, sizeof(PRECISION2), w_comp_main<PRECISION2,PRECISION>); #else std::qsort(in, npts, sizeof(PRECISION2), w_comp_sub<PRECISION2,PRECISION>); #endif #endif //auto tmp = in[0]; //in[0] = in[204]; //in[204]=tmp; std::cout << "Computing on GPU..." << std::endl; gridGPU(out,in,in_vals,npts,IMG_SIZE,gcf,GCF_DIM); #ifdef __CPU_CHECK std::cout << "Computing on CPU..." << std::endl; OUTPRECISION2 *out_cpu=(OUTPRECISION2*)malloc(sizeof(OUTPRECISION2)*(IMG_SIZE*IMG_SIZE+2*IMG_SIZE*GCF_DIM+2*GCF_DIM)*POLARIZATIONS); memset(out_cpu, 0, sizeof(OUTPRECISION2)*(IMG_SIZE*IMG_SIZE+2*IMG_SIZE*GCF_DIM+2*GCF_DIM)*POLARIZATIONS); gridCPU_pz(out_cpu+IMG_SIZE*GCF_DIM+GCF_DIM,in,in_vals,npts,IMG_SIZE,gcf,GCF_DIM); //gridCPU(out+IMG_SIZE*GCF_DIM+GCF_DIM,in,in_vals,npts,IMG_SIZE,gcf,GCF_DIM); #endif #ifdef __CPU_CHECK std::cout << "Checking results against CPU:" << std::endl; std::cout.precision(11); for (size_t yy = 0; yy < IMG_SIZE; yy++) { for (size_t xx = 0; xx < IMG_SIZE; xx++) { int n = GCF_DIM+IMG_SIZE*GCF_DIM+yy*IMG_SIZE+xx; for (int p = 0; p < IMG_SIZE*IMG_SIZE*POLARIZATIONS; p+=IMG_SIZE*IMG_SIZE) { if (fabs(out[n+p].x-out_cpu[n+p].x) > 0.000001 || fabs(out[n+p].y-out_cpu[n+p].y) > 0.000001 ) std::cout << xx << ", " << yy << "[" << p/IMG_SIZE/IMG_SIZE << "] : " << "(" << n+p-(GCF_DIM+IMG_SIZE*GCF_DIM) << ") " << out[n+p].x << ", " << out[n+p].y << " vs. " << out_cpu[n+p].x << ", " << out_cpu[n+p].y << std::endl; } } } //std::cout << "free out_cpu" << std::endl; //free(out_cpu);out_cpu=NULL; #endif #ifdef __MANAGED cudaFree(out);out=NULL; cudaFree(in);in=NULL; cudaFree(in_vals);in_vals=NULL; cudaFree(gcf);gcf=NULL; #else free(out);out=NULL; free(in);in=NULL; free(in_vals);in_vals=NULL; free(gcf);gcf=NULL; #endif }
881a718fb18a4c40815a6ded6d43fd8a67c5c45c.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright 2011-2016 Maxim Milakov * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "negative_log_likelihood_layer_tester_cuda.h" #include <hip/hip_runtime.h> #include "../negative_log_likelihood_layer.h" namespace nnforge { namespace cuda { extern __shared__ float arr_sh[]; __global__ void negative_log_likelihood_kernel( float * __restrict output, const float * __restrict predicted, const float * __restrict actual, const float * __restrict scale_mask, int input_feature_map_count, int elem_count_per_feature_map, float scale, int entry_count) { int feature_map_id = threadIdx.x; int neuron_id = blockIdx.x; int entry_id = blockIdx.y; int threadblock_size = blockDim.x; float err = 0.0F; int output_offset = entry_id * elem_count_per_feature_map + neuron_id; float mask = 1.0F; if (scale_mask) mask = scale_mask[output_offset]; int thread_id = threadIdx.x; if (mask != 0.0F) { int input_offset = (entry_id * input_feature_map_count + feature_map_id) * elem_count_per_feature_map + neuron_id; while (feature_map_id < input_feature_map_count) { float actual_val = actual[input_offset]; float predicted_val = predicted[input_offset]; err -= (actual_val > 0.0F) ? actual_val * __logf(max(predicted_val, 1.0e-20F)) : 0.0F; feature_map_id += threadblock_size; input_offset += threadblock_size * elem_count_per_feature_map; } int lane_id = thread_id & 31; #pragma unroll for(int tx = 16; tx > 0; tx >>= 1) err += __shfl_down(err, tx); int warp_count = threadblock_size >> 5; if (warp_count > 1) { if (lane_id == 0) arr_sh[thread_id >> 5] = err; __syncthreads(); if (thread_id < 32) { err = 0.0F; if (thread_id < warp_count) err = arr_sh[thread_id]; #pragma unroll for(int tx = 4; tx > 0; tx >>= 1) err += __shfl_down(err, tx); } } } if (thread_id == 0) output[output_offset] = err * (mask * scale); } void negative_log_likelihood_layer_tester_cuda::enqueue_forward_propagation( hipStream_t stream_id, cuda_linear_buffer_device::ptr output_buffer, const std::vector<cuda_linear_buffer_device::const_ptr>& schema_data, const std::vector<cuda_linear_buffer_device::const_ptr>& data, const std::vector<cuda_linear_buffer_device::const_ptr>& data_custom, const std::vector<cuda_linear_buffer_device::const_ptr>& input_buffers, const std::vector<cuda_linear_buffer_device::const_ptr>& persistent_working_data, cuda_linear_buffer_device::ptr temporary_working_fixed_buffer, cuda_linear_buffer_device::ptr temporary_working_per_entry_buffer, unsigned int entry_count) { int threadblock_size = get_threadblock_size(input_configuration_specific_list[0].feature_map_count); const float * scale_mask = 0; if (input_buffers.size() > 2) scale_mask = *input_buffers[2]; int smem_size = ((threadblock_size + 32 - 1) / 32) * sizeof(float); hipLaunchKernelGGL(( negative_log_likelihood_kernel), dim3(dim3(input_elem_count_per_feature_map_list[0], entry_count)), dim3(threadblock_size), smem_size, stream_id, *output_buffer, *input_buffers[0], *input_buffers[1], scale_mask, input_configuration_specific_list[0].feature_map_count, input_elem_count_per_feature_map_list[0], scale, entry_count); } void negative_log_likelihood_layer_tester_cuda::tester_configured() { std::shared_ptr<const negative_log_likelihood_layer> layer_derived = std::dynamic_pointer_cast<const negative_log_likelihood_layer>(layer_schema); scale = layer_derived->scale; } int negative_log_likelihood_layer_tester_cuda::get_threadblock_size(int input_feature_map_count) { int threadblock_size; if (input_feature_map_count < 256) { threadblock_size = (input_feature_map_count + 32 - 1) / 32 * 32; } else { int threadblock_count = (input_feature_map_count + 256 - 1) / 256; threadblock_size = (input_feature_map_count + threadblock_count - 1) / threadblock_count; threadblock_size = (threadblock_size + 32 - 1) / 32 * 32; } return threadblock_size; } } }
881a718fb18a4c40815a6ded6d43fd8a67c5c45c.cu
/* * Copyright 2011-2016 Maxim Milakov * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "negative_log_likelihood_layer_tester_cuda.h" #include <cuda_runtime.h> #include "../negative_log_likelihood_layer.h" namespace nnforge { namespace cuda { extern __shared__ float arr_sh[]; __global__ void negative_log_likelihood_kernel( float * __restrict output, const float * __restrict predicted, const float * __restrict actual, const float * __restrict scale_mask, int input_feature_map_count, int elem_count_per_feature_map, float scale, int entry_count) { int feature_map_id = threadIdx.x; int neuron_id = blockIdx.x; int entry_id = blockIdx.y; int threadblock_size = blockDim.x; float err = 0.0F; int output_offset = entry_id * elem_count_per_feature_map + neuron_id; float mask = 1.0F; if (scale_mask) mask = scale_mask[output_offset]; int thread_id = threadIdx.x; if (mask != 0.0F) { int input_offset = (entry_id * input_feature_map_count + feature_map_id) * elem_count_per_feature_map + neuron_id; while (feature_map_id < input_feature_map_count) { float actual_val = actual[input_offset]; float predicted_val = predicted[input_offset]; err -= (actual_val > 0.0F) ? actual_val * __logf(max(predicted_val, 1.0e-20F)) : 0.0F; feature_map_id += threadblock_size; input_offset += threadblock_size * elem_count_per_feature_map; } int lane_id = thread_id & 31; #pragma unroll for(int tx = 16; tx > 0; tx >>= 1) err += __shfl_down(err, tx); int warp_count = threadblock_size >> 5; if (warp_count > 1) { if (lane_id == 0) arr_sh[thread_id >> 5] = err; __syncthreads(); if (thread_id < 32) { err = 0.0F; if (thread_id < warp_count) err = arr_sh[thread_id]; #pragma unroll for(int tx = 4; tx > 0; tx >>= 1) err += __shfl_down(err, tx); } } } if (thread_id == 0) output[output_offset] = err * (mask * scale); } void negative_log_likelihood_layer_tester_cuda::enqueue_forward_propagation( cudaStream_t stream_id, cuda_linear_buffer_device::ptr output_buffer, const std::vector<cuda_linear_buffer_device::const_ptr>& schema_data, const std::vector<cuda_linear_buffer_device::const_ptr>& data, const std::vector<cuda_linear_buffer_device::const_ptr>& data_custom, const std::vector<cuda_linear_buffer_device::const_ptr>& input_buffers, const std::vector<cuda_linear_buffer_device::const_ptr>& persistent_working_data, cuda_linear_buffer_device::ptr temporary_working_fixed_buffer, cuda_linear_buffer_device::ptr temporary_working_per_entry_buffer, unsigned int entry_count) { int threadblock_size = get_threadblock_size(input_configuration_specific_list[0].feature_map_count); const float * scale_mask = 0; if (input_buffers.size() > 2) scale_mask = *input_buffers[2]; int smem_size = ((threadblock_size + 32 - 1) / 32) * sizeof(float); negative_log_likelihood_kernel<<<dim3(input_elem_count_per_feature_map_list[0], entry_count), threadblock_size, smem_size, stream_id>>>( *output_buffer, *input_buffers[0], *input_buffers[1], scale_mask, input_configuration_specific_list[0].feature_map_count, input_elem_count_per_feature_map_list[0], scale, entry_count); } void negative_log_likelihood_layer_tester_cuda::tester_configured() { std::shared_ptr<const negative_log_likelihood_layer> layer_derived = std::dynamic_pointer_cast<const negative_log_likelihood_layer>(layer_schema); scale = layer_derived->scale; } int negative_log_likelihood_layer_tester_cuda::get_threadblock_size(int input_feature_map_count) { int threadblock_size; if (input_feature_map_count < 256) { threadblock_size = (input_feature_map_count + 32 - 1) / 32 * 32; } else { int threadblock_count = (input_feature_map_count + 256 - 1) / 256; threadblock_size = (input_feature_map_count + threadblock_count - 1) / threadblock_count; threadblock_size = (threadblock_size + 32 - 1) / 32 * 32; } return threadblock_size; } } }
62a14c9d10cac659085de03943c2efb045cf1d2d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include "shared.h" #include "constants.h" void cuda_last_error_check (const char *message); // Add rows kernel & related operations __global__ void evolve_gpu_kernel(float* mat, int n, int m, int iters, int* iteration_counters); __global__ void copy_matrix(float* mat, float* out, int n, int m); __global__ void row_avg(float* rowavg, float* mat, int n, int m); void evolve_gpu(float* rowsum, float* mat1d, int n, int m, int iters, struct DeviceStats* stats); void set_duration(Timer* timer, hipEvent_t* start, hipEvent_t* stop); extern struct Options options; // Global config var void perform_gpu_evolution(float* out_mat1d, float* mat1d, struct DeviceStats* stats) { int n = options.rows; int m = options.cols; int iters = options.iterations; evolve_gpu(out_mat1d, mat1d, n, m, iters, stats); //print_compute_results((char*) "GPU Results:", rowsum, n, m); } void evolve_gpu(float* out_mat1d, float* mat1d, int n, int m, int iters, struct DeviceStats* stats) { hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); // Compute execution GPU config int elems_per_thread = (int) ceil((float) m / (float) BLOCK_SIZE); printf("Block size: %d*%d = %d, x_blocks per grid: %d, y_blocks per grid: %d\n", BLOCK_SIZE, 1, BLOCK_SIZE, n, elems_per_thread); // Host: alloc float* rowavg = (float*) malloc(n*sizeof(float)); int iteration_counters[n*elems_per_thread]; for (int i = 0; i < n*elems_per_thread; i++) { iteration_counters[i] = 0; } // Device: alloc float* mat1d_GPU; float* rowavg_GPU; int* iteration_counters_GPU; hipEventRecord(start); hipMalloc((void**) &mat1d_GPU, n*m*sizeof(float)); hipMalloc((void**) &rowavg_GPU, n*sizeof(float)); hipMalloc((void**) &iteration_counters_GPU, n*elems_per_thread*sizeof(int)); hipEventRecord(stop); set_duration(&(stats->allocation), &start, &stop); // Host->Device copy hipEventRecord(start); hipMemcpy(mat1d_GPU, mat1d, n*m*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(rowavg_GPU, rowavg, n*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(iteration_counters_GPU, iteration_counters, n*elems_per_thread*sizeof(int), hipMemcpyHostToDevice); hipEventRecord(stop); set_duration(&(stats->to_gpu_transfer), &start, &stop); // Device: execution + timing dim3 dimBlock(BLOCK_SIZE, 1); dim3 dimGrid(n, elems_per_thread); // TODO: check n != m hipEventRecord(start); hipLaunchKernelGGL(( evolve_gpu_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, mat1d_GPU, n, m, iters, iteration_counters_GPU); hipEventRecord(stop); set_duration(&(stats->gpu_compute), &start, &stop); cuda_last_error_check("evolve_gpu"); // Print row average if (options.show_average != 0) { dim3 dimBlock(1, 1); dim3 dimGrid(n, 1); hipEventRecord(start); hipLaunchKernelGGL(( row_avg), dim3(dimGrid), dim3(dimBlock), 0, 0, rowavg_GPU, mat1d_GPU, n, m); hipEventRecord(stop); set_duration(&(stats->row_avg), &start, &stop); cuda_last_error_check("row_average_gpu"); hipMemcpy(rowavg, rowavg_GPU, n*sizeof(float), hipMemcpyDeviceToHost); print_row_avg(rowavg, n, 0); } // Device->Host copy hipEventRecord(start); hipMemcpy(out_mat1d, mat1d_GPU, n*m*sizeof(float), hipMemcpyDeviceToHost); hipEventRecord(stop); set_duration(&(stats->to_cpu_transfer), &start, &stop); hipFree(mat1d_GPU); } // Kernels __global__ void evolve_gpu_kernel(float* mat, int n, int m, int iters, int* iteration_counters) { int dx = threadIdx.x; // Column within a tile int row_no = blockIdx.x; // Row number of matrix int i = blockIdx.y; // Starting column of tile int tiles_per_row = (int) ceil((float) m / (float) BLOCK_SIZE); int cols = (i != tiles_per_row) ? m : m % BLOCK_SIZE; int iter_counter_index = row_no*tiles_per_row + blockIdx.y; __shared__ float row[BLOCK_SIZE]; // One row per block __shared__ float out_row[BLOCK_SIZE]; // One row per thread // Load row: global -> shared & barrier sync if (dx < cols) { row[dx] = mat[row_no*m + i*BLOCK_SIZE + dx]; } __syncthreads(); // Left-most tile's columns 0 and 1 if (i == 0 && dx >= 0 && dx <= 1) { out_row[dx] = row[dx]; } // Evolve iterations if (dx > 1) { for (int iter = 0; iter < iters; iter++) { // Propagate row if (dx < cols) { out_row[dx] = ((1.9*row[dx-2]) + (1.5*row[dx-1]) + row[dx] + (0.5*row[(dx+1)%BLOCK_SIZE]) + (0.1*row[(dx+2)%BLOCK_SIZE])) / (float) 5; } __syncthreads(); // Copy over row if (dx < cols) row[dx] = out_row[dx]; if (tiles_per_row > 1 && dx == 0) { // Only barrier sync inter-block when there is more than 1 tile per row atomicAdd(&(iteration_counters[iter_counter_index]), 1); // Atomic incremenet iteration counter if (blockIdx.y == 0) { // Leftmost tile while (iteration_counters[iter_counter_index+1] < iter) { // Block until right neighbour in sync } } else if (blockIdx.y == tiles_per_row-1) { // Right most tile while (iteration_counters[iter_counter_index-1] < iter) { // Block until left neighbour in sync } } else { while (iteration_counters[iter_counter_index-1] < iter && iteration_counters[iter_counter_index+1] < iter) { // Block until left and right neighbours are in sync } } } __syncthreads(); } } // Store row: shared -> global if (dx < cols) mat[row_no*m + i*BLOCK_SIZE + dx] = row[dx]; } __global__ void row_avg(float* rowavg, float* mat1d, int n, int m) { int x = blockIdx.x; int y = threadIdx.y; if (y == 0 && x < n && x >= 0) { double sum = 0; for (int i = 0; i < m; i++) { sum += mat1d[x*m + i]; } double avg = sum / (double) m; rowavg[x] = (float) avg; } } // GPU specific util fns: for cuda error check and duration calc from cuda events void cuda_last_error_check (const char *message) { hipError_t err = hipGetLastError(); if(hipSuccess != err) { printf("[CUDA] [ERROR] %s: %s\n", message, hipGetErrorString(err)); exit(EXIT_FAILURE); } } void set_duration(Timer* timer, hipEvent_t* start, hipEvent_t* stop) { float milliseconds = 0; hipEventSynchronize(*stop); hipEventElapsedTime(&milliseconds, *start, *stop); timer->duration_ms = milliseconds; }
62a14c9d10cac659085de03943c2efb045cf1d2d.cu
#include <stdio.h> #include "shared.h" #include "constants.h" void cuda_last_error_check (const char *message); // Add rows kernel & related operations __global__ void evolve_gpu_kernel(float* mat, int n, int m, int iters, int* iteration_counters); __global__ void copy_matrix(float* mat, float* out, int n, int m); __global__ void row_avg(float* rowavg, float* mat, int n, int m); void evolve_gpu(float* rowsum, float* mat1d, int n, int m, int iters, struct DeviceStats* stats); void set_duration(Timer* timer, cudaEvent_t* start, cudaEvent_t* stop); extern struct Options options; // Global config var void perform_gpu_evolution(float* out_mat1d, float* mat1d, struct DeviceStats* stats) { int n = options.rows; int m = options.cols; int iters = options.iterations; evolve_gpu(out_mat1d, mat1d, n, m, iters, stats); //print_compute_results((char*) "GPU Results:", rowsum, n, m); } void evolve_gpu(float* out_mat1d, float* mat1d, int n, int m, int iters, struct DeviceStats* stats) { cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); // Compute execution GPU config int elems_per_thread = (int) ceil((float) m / (float) BLOCK_SIZE); printf("Block size: %d*%d = %d, x_blocks per grid: %d, y_blocks per grid: %d\n", BLOCK_SIZE, 1, BLOCK_SIZE, n, elems_per_thread); // Host: alloc float* rowavg = (float*) malloc(n*sizeof(float)); int iteration_counters[n*elems_per_thread]; for (int i = 0; i < n*elems_per_thread; i++) { iteration_counters[i] = 0; } // Device: alloc float* mat1d_GPU; float* rowavg_GPU; int* iteration_counters_GPU; cudaEventRecord(start); cudaMalloc((void**) &mat1d_GPU, n*m*sizeof(float)); cudaMalloc((void**) &rowavg_GPU, n*sizeof(float)); cudaMalloc((void**) &iteration_counters_GPU, n*elems_per_thread*sizeof(int)); cudaEventRecord(stop); set_duration(&(stats->allocation), &start, &stop); // Host->Device copy cudaEventRecord(start); cudaMemcpy(mat1d_GPU, mat1d, n*m*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(rowavg_GPU, rowavg, n*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(iteration_counters_GPU, iteration_counters, n*elems_per_thread*sizeof(int), cudaMemcpyHostToDevice); cudaEventRecord(stop); set_duration(&(stats->to_gpu_transfer), &start, &stop); // Device: execution + timing dim3 dimBlock(BLOCK_SIZE, 1); dim3 dimGrid(n, elems_per_thread); // TODO: check n != m cudaEventRecord(start); evolve_gpu_kernel<<<dimGrid, dimBlock>>>(mat1d_GPU, n, m, iters, iteration_counters_GPU); cudaEventRecord(stop); set_duration(&(stats->gpu_compute), &start, &stop); cuda_last_error_check("evolve_gpu"); // Print row average if (options.show_average != 0) { dim3 dimBlock(1, 1); dim3 dimGrid(n, 1); cudaEventRecord(start); row_avg<<<dimGrid, dimBlock>>>(rowavg_GPU, mat1d_GPU, n, m); cudaEventRecord(stop); set_duration(&(stats->row_avg), &start, &stop); cuda_last_error_check("row_average_gpu"); cudaMemcpy(rowavg, rowavg_GPU, n*sizeof(float), cudaMemcpyDeviceToHost); print_row_avg(rowavg, n, 0); } // Device->Host copy cudaEventRecord(start); cudaMemcpy(out_mat1d, mat1d_GPU, n*m*sizeof(float), cudaMemcpyDeviceToHost); cudaEventRecord(stop); set_duration(&(stats->to_cpu_transfer), &start, &stop); cudaFree(mat1d_GPU); } // Kernels __global__ void evolve_gpu_kernel(float* mat, int n, int m, int iters, int* iteration_counters) { int dx = threadIdx.x; // Column within a tile int row_no = blockIdx.x; // Row number of matrix int i = blockIdx.y; // Starting column of tile int tiles_per_row = (int) ceil((float) m / (float) BLOCK_SIZE); int cols = (i != tiles_per_row) ? m : m % BLOCK_SIZE; int iter_counter_index = row_no*tiles_per_row + blockIdx.y; __shared__ float row[BLOCK_SIZE]; // One row per block __shared__ float out_row[BLOCK_SIZE]; // One row per thread // Load row: global -> shared & barrier sync if (dx < cols) { row[dx] = mat[row_no*m + i*BLOCK_SIZE + dx]; } __syncthreads(); // Left-most tile's columns 0 and 1 if (i == 0 && dx >= 0 && dx <= 1) { out_row[dx] = row[dx]; } // Evolve iterations if (dx > 1) { for (int iter = 0; iter < iters; iter++) { // Propagate row if (dx < cols) { out_row[dx] = ((1.9*row[dx-2]) + (1.5*row[dx-1]) + row[dx] + (0.5*row[(dx+1)%BLOCK_SIZE]) + (0.1*row[(dx+2)%BLOCK_SIZE])) / (float) 5; } __syncthreads(); // Copy over row if (dx < cols) row[dx] = out_row[dx]; if (tiles_per_row > 1 && dx == 0) { // Only barrier sync inter-block when there is more than 1 tile per row atomicAdd(&(iteration_counters[iter_counter_index]), 1); // Atomic incremenet iteration counter if (blockIdx.y == 0) { // Leftmost tile while (iteration_counters[iter_counter_index+1] < iter) { // Block until right neighbour in sync } } else if (blockIdx.y == tiles_per_row-1) { // Right most tile while (iteration_counters[iter_counter_index-1] < iter) { // Block until left neighbour in sync } } else { while (iteration_counters[iter_counter_index-1] < iter && iteration_counters[iter_counter_index+1] < iter) { // Block until left and right neighbours are in sync } } } __syncthreads(); } } // Store row: shared -> global if (dx < cols) mat[row_no*m + i*BLOCK_SIZE + dx] = row[dx]; } __global__ void row_avg(float* rowavg, float* mat1d, int n, int m) { int x = blockIdx.x; int y = threadIdx.y; if (y == 0 && x < n && x >= 0) { double sum = 0; for (int i = 0; i < m; i++) { sum += mat1d[x*m + i]; } double avg = sum / (double) m; rowavg[x] = (float) avg; } } // GPU specific util fns: for cuda error check and duration calc from cuda events void cuda_last_error_check (const char *message) { cudaError_t err = cudaGetLastError(); if(cudaSuccess != err) { printf("[CUDA] [ERROR] %s: %s\n", message, cudaGetErrorString(err)); exit(EXIT_FAILURE); } } void set_duration(Timer* timer, cudaEvent_t* start, cudaEvent_t* stop) { float milliseconds = 0; cudaEventSynchronize(*stop); cudaEventElapsedTime(&milliseconds, *start, *stop); timer->duration_ms = milliseconds; }
59b8dd8cac003b27d7d08975e6581fa8f81649fc.hip
// !!! This is a file automatically generated by hipify!!! /* SorensonPar.cu Parallel Implementation of Algorithm 4.1 as discussed in Sorenson and Parberry's 1994 paper "Two Fast Parallel Prime Number Sieves". Authors: Daniel Anzaldo David Frank Antonio Lanfranchi */ // Visual Studio Dependencies (Can be commented out) #include "hip/hip_runtime.h" #include "device_launch_parameters.h" // C dependencies #include <stdio.h> #include <stdlib.h> #include <math.h> #include <sys/time.h> // C++ dependencies #include <algorithm> typedef unsigned long long big; // GLOBAL VARIABLES-------------------------------------- typedef struct Wheel_t // Struct-of-Arrays Wheel { bool * rp; // Numbers relatively prime to m big * dist; // D s.t. x + d is the smallest integer >dist[x] relatively prime to m } Wheel_k; bool * S; // Global shared bit array of numbers up to N int P; // Global number of processors bool check_cuda_status = false; // turn to false when running on circe /* These are for tracking time */ struct timezone myTimezone; struct timeval startTime, endTime; // HOST FUNCTION HEADERS--------------------------------- /* gcd Host version of the Euclidean Method */ __host__ big gcd(big u, big v); /* EratosthenesSieve HELPER: for Algorithm 4.1 Sequential Portion The most basic form of generating primes. Used to help find the first k primes. Returns the k-th prime. */ big EratosthenesSieve(long double x); /* Algorithm 4.1 Sequential Portion Running Time: O(sqrt(n)) Space: O(sqrt(n)) up to O(sqrt(n)/log log n) */ hipError_t algorithm4_1(big n); /* Algorithm 4.1 Helper: Parallel Sieve All CUDA-related functionality goes here. This code will change for different kernel versions. */ hipError_t parallelSieve( big n, big k, big m, const Wheel_k &wheel, big range); /* Frees the memory allocated on the device and returns any errors*/ hipError_t cleanup(bool *d_S, Wheel_k &wheel, hipError_t cudaStatus); /* Set a checkpoint and show the total running time in seconds */ double report_running_time(const char *arr); // DEVICE MATH FUNCTIONS--------------------------------- /* gcd_d Device version of the Euclidean Method find number c such that: a = sc, b = tc */ __device__ big gcd_d(big a, big b) { big tmp; while (b!=0) { tmp = a; a = b; b = tmp%b; } return a; } /* gcd_d Device version of the Binary Method with bit arithmetic */ /* __device__ big gcd_d(big u, big v) { big g = 1; while ((u % 2 == 0) && (v % 2 == 0)) { g <<= 1; u >>= 1; v >>= 1; } while (u != 0 && v != 0) if (u % 2 == 0) u >>= 1; else if (v % 2 == 0) v >>= 1; else if (u > v) u = (u - v) >> 1; else v = (v - u) >> 1; return (g * (u + v)); } */ /* sqrt_d Device version of the Square Root Function Babylonian Method */ __device__ big sqrt_d(big a) { big root = a/2; for (big n = 0; n < 10; n++) { root = 0.5 * (root + (a/root)); } return root; } __device__ big min_d(big a, big b) { return (a < b) ? a : b; } __device__ big max_d(big a, big b) { return (a > b) ? a : b; } // ALGORITHM 4.1 KERNEL VERSIONS------------------------- /* Algorithm 4.1: Parallel Sieve Kernel version 1 Parallelization: O(sqrt(n)) processors Space: O(sqrt(n)) up to O(sqrt(n)/log log n) PRAM Mode: Exclusive Read, Exclusive Write (EREW) Remarks: No optimizations yet performed. For n = 1 billion, it uses 31623 threads */ __global__ void parallelSieveKernel( big n, big k, big m, Wheel_k d_wheel, big range, bool *d_S) { big sqrt_N = sqrt_d(n); // Express the sieve in thread mode. big i = threadIdx.x + blockIdx.x * blockDim.x; // Threads beyond n will not do work. if (i <= n) { big L = range * i + 1; big R = min_d(range * (i + 1), n); /* Range Sieving */ for (big x = L; x < R; x++) d_S[x] = d_wheel.rp[x % m]; /* For every prime from prime[k] up to sqrt(N) */ for (big q = k; q < sqrt_N; q++) { if (d_S[q]) { /* Compute smallest f s.t. gcd_d(qf, m) == 1, qf >= max_d(L, q^2) */ big f = max_d(q - 1, (big)((L / q) - 1)); /* f = f + W_k[f mod m].dist */ f += d_wheel.dist[f % m]; /* Remove the multiples of current prime */ while ((q * f) <= R) { // EREW Precaution. May need to be atomic operation. if (!(d_S[q * f])) d_S[q * f] = false; f += d_wheel.dist[f % m]; } } } } } /* TODO: Algorithm 4.1: Parallel Sieve Kernel version 2 Remarks: Prime table S within [0, sqrt(n)] migrated to const memory Wheel completely migrated to const memory Beware that const memory is only 64kB. Benchmark with the Profiler first before creating this! */ __global__ void parallelSieveKernel2( big n, big k, big m, Wheel_k d_wheel, big range, bool *d_S); /* TODO: Algorithm 4.1: Parallel Sieve Kernel version 3 Remarks: Prime table S within [0, sqrt(n)] migrated to const memory Wheel completely migrated to const memory Probable use of the shared memory Probable use of registers Beware that register is only 4B or 32b. Beware that const memory is only 64kB. Benchmark with the Profiler first before creating this! */ __global__ void parallelSieveKernel3( big n, big k, big m, Wheel_k d_wheel, big range, bool *d_S); /* MAIN To run this add the ff. args: 1. N = the number up to which you're sieving */ int main(int argc, char **argv) { big N = (big)strtoull(argv[1], NULL, 10); S = new bool[N]; //(bool*)malloc(N * sizeof(bool)); printf("Find primes up to: %llu\n\n", N); /* start counting time */ gettimeofday(&startTime, &myTimezone); hipError_t x = algorithm4_1(N); /* check the total running time */ report_running_time("Algorithm 4.1"); if (check_cuda_status) { if (x != hipSuccess) { printf("Algorithm 4.1 failed to execute!"); return 1; } } // Display the primes. for (int i = 0; i < N; i++) if (S[i]) printf("%llu ", i); delete[] S; return 0; } // HOST FUNCTION DEFINITIONS----------------------------- // Euclidean Method __host__ big gcd(big u, big v) { big tmp; while (v != 0) { tmp = u; u = v; v = tmp%v; } return u; } // Binary Method /* __host__ big gcd(big u, big v) { big g = 1; while ((u % 2 == 0) && (v % 2 == 0)) { g <<= 1; u >>= 1; v >>= 1; } while (u != 0 && v != 0) if (u % 2 == 0) u >>= 1; else if (v % 2 == 0) v >>= 1; else if (u > v) u = (u - v) >> 1; else v = (v - u) >> 1; return (g * (u + v)); } */ big EratosthenesSieve(long double k, big n) { big kthPrime = 0; // 0 and 1 are non-primes. S[0] = S[1] = false; for (big i = 2; i < n; i++) S[i] = true; // Simple Sieving Operation. for (big i = 2; i < (big)sqrtl(n); i++) if (S[i]) { int j; for (j = i*i; j < n; j += i) S[j] = false; } // Find the k-th prime. for (big i = k; i > 2; i--) if (S[i]) kthPrime = i; return kthPrime; } hipError_t algorithm4_1(big n) { /* VARIABLES */ big range; big sqrt_N = (big)sqrtl((long double)n); Wheel_k wheel; /* Allocation of wheel */ wheel.rp = new bool[n]; wheel.dist = new big[n]; /* Find the first k primes K = maximal s.t. S[K] <= (log N) / 4 Find primes up to sqrt(N) */ big k = EratosthenesSieve(log10l((long double)n) / 4, n); /* Find the product of the first k primes m */ big m = 1; for (big ii = 0; ii < k; ii++) if (S[ii]) m *= ii; /* Compute k-th wheel W_k FUTURE OPTIMIZATION: Delegate kernel for computation */ for (big x = 0; x < n; x++) { // True if rp[x] is relatively prime to m wheel.rp[x] = (gcd(x, m) == 1); /* This is d s.t. x + d is the smallest integer >dist[x] relatively prime to m */ int d = 0; while (gcd(x + d, m) != 1) d++; wheel.dist[x] = d; } /* Delta = ceil(n/p) */ range = (big)ceill(n / (long double)P); /* PARALLEL PART */ hipError_t parallelStatus = parallelSieve(n, k, m, wheel, range); if (check_cuda_status) { if (parallelStatus != hipSuccess) { fprintf(stderr, "parallelSieve() failed!"); } } /* FREE */ delete[] wheel.rp; delete[] wheel.dist; return parallelStatus; } hipError_t parallelSieve( big n, big k, big m, const Wheel_k &wheel, big range) { hipError_t cudaStatus; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); /* The Number Field S will be migrated to GLOBAL memory OPTIMIZATION: ranges will be migrated to SHARED memory OPTIMIZATION: [0, sqrt(n)] will be migrated to CONSTANT memory */ bool * d_S = NULL; // The Wheel Precomputed Table // will be migrated to GLOBAL memory // OPTIMIZATION: may be migrated to CONSTANT memory as well Wheel_k d_wheel; d_wheel.rp = NULL; d_wheel.dist = NULL; // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = hipSetDevice(0); if (check_cuda_status) { if (cudaStatus != hipSuccess) { fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?\n"); return cudaStatus; } } // Measure start time for CUDA portion hipEventRecord(start, 0); // CUDA Memory Allocations. cudaStatus = hipMalloc((void**)&d_S, n * sizeof(bool)); if (check_cuda_status) { if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed on number field S!\n"); return cleanup(d_S, d_wheel, cudaStatus); } } cudaStatus = hipMalloc((void**)&(d_wheel.rp), n * sizeof(bool)); if (check_cuda_status) { if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed on wheel.rp!\n"); return cleanup(d_S, d_wheel, cudaStatus); } } cudaStatus = hipMalloc((void**)&(d_wheel.dist), n * sizeof(big)); if (check_cuda_status) { if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed on wheel.dist!\n"); return cleanup(d_S, d_wheel, cudaStatus); } } // cudaMemCpy -> Device cudaStatus = hipMemcpy(d_S, S, n * sizeof(bool), hipMemcpyHostToDevice); if (check_cuda_status) { if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed! S->d_S.\n"); return cleanup(d_S, d_wheel, cudaStatus); } } cudaStatus = hipMemcpy(d_wheel.rp, wheel.rp, n * sizeof(bool), hipMemcpyHostToDevice); if (check_cuda_status) { if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed! wheel.rp->d_wheel.rp\n"); return cleanup(d_S, d_wheel, cudaStatus); } } cudaStatus = hipMemcpy(d_wheel.dist, wheel.dist, n * sizeof(big), hipMemcpyHostToDevice); if (check_cuda_status) { if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed! wheel.dist->d_wheel.dist\n"); return cleanup(d_S, d_wheel, cudaStatus); } } // Kernel Call dim3 gridSize(ceill(ceill(sqrt(n))/256), 1, 1); dim3 blockSize(256, 1, 1); hipLaunchKernelGGL(( parallelSieveKernel), dim3(gridSize), dim3(blockSize), 0, 0, n, k, m, wheel, range, d_S); cudaStatus = hipGetLastError(); if (check_cuda_status) { if (cudaStatus != hipSuccess) { fprintf(stderr, "parallelSieveKernel launch failed: %s\n", hipGetErrorString(cudaStatus)); return cleanup(d_S, d_wheel, cudaStatus); } } cudaStatus = hipDeviceSynchronize(); if (check_cuda_status) { if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); return cleanup(d_S, d_wheel, cudaStatus); } } // cudaMemCpy -> Host cudaStatus = hipMemcpy(S, d_S, n * sizeof(bool), hipMemcpyDeviceToHost); if (check_cuda_status) { if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed! d_S->S.\n"); return cleanup(d_S, d_wheel, cudaStatus); } } // Measure stop time for CUDA portion hipEventRecord(stop, 0); hipEventSynchronize(stop); float elapsedTime; hipEventElapsedTime(&elapsedTime, start, stop); printf("Time to generate: %0.5f ms\n", elapsedTime); // hipFree return cleanup(d_S, d_wheel, cudaStatus); } hipError_t cleanup(bool *d_S, Wheel_k &wheel, hipError_t cudaStatus) { hipFree(d_S); hipFree(wheel.rp); hipFree(wheel.dist); return cudaStatus; } /* set a checkpoint and show the (natural) running time in seconds */ double report_running_time(const char *arr) { long sec_diff, usec_diff; gettimeofday(&endTime, &myTimezone); sec_diff = endTime.tv_sec - startTime.tv_sec; usec_diff= endTime.tv_usec-startTime.tv_usec; if(usec_diff < 0) { sec_diff --; usec_diff += 1000000; } printf("Running time for %s: %ld.%06ld sec\n\n", arr, sec_diff, usec_diff); return (double)(sec_diff*1.0 + usec_diff/1000000.0); }
59b8dd8cac003b27d7d08975e6581fa8f81649fc.cu
/* SorensonPar.cu Parallel Implementation of Algorithm 4.1 as discussed in Sorenson and Parberry's 1994 paper "Two Fast Parallel Prime Number Sieves". Authors: Daniel Anzaldo David Frank Antonio Lanfranchi */ // Visual Studio Dependencies (Can be commented out) #include "cuda_runtime.h" #include "device_launch_parameters.h" // C dependencies #include <stdio.h> #include <stdlib.h> #include <math.h> #include <sys/time.h> // C++ dependencies #include <algorithm> typedef unsigned long long big; // GLOBAL VARIABLES-------------------------------------- typedef struct Wheel_t // Struct-of-Arrays Wheel { bool * rp; // Numbers relatively prime to m big * dist; // D s.t. x + d is the smallest integer >dist[x] relatively prime to m } Wheel_k; bool * S; // Global shared bit array of numbers up to N int P; // Global number of processors bool check_cuda_status = false; // turn to false when running on circe /* These are for tracking time */ struct timezone myTimezone; struct timeval startTime, endTime; // HOST FUNCTION HEADERS--------------------------------- /* gcd Host version of the Euclidean Method */ __host__ big gcd(big u, big v); /* EratosthenesSieve HELPER: for Algorithm 4.1 Sequential Portion The most basic form of generating primes. Used to help find the first k primes. Returns the k-th prime. */ big EratosthenesSieve(long double x); /* Algorithm 4.1 Sequential Portion Running Time: O(sqrt(n)) Space: O(sqrt(n)) up to O(sqrt(n)/log log n) */ cudaError_t algorithm4_1(big n); /* Algorithm 4.1 Helper: Parallel Sieve All CUDA-related functionality goes here. This code will change for different kernel versions. */ cudaError_t parallelSieve( big n, big k, big m, const Wheel_k &wheel, big range); /* Frees the memory allocated on the device and returns any errors*/ cudaError_t cleanup(bool *d_S, Wheel_k &wheel, cudaError_t cudaStatus); /* Set a checkpoint and show the total running time in seconds */ double report_running_time(const char *arr); // DEVICE MATH FUNCTIONS--------------------------------- /* gcd_d Device version of the Euclidean Method find number c such that: a = sc, b = tc */ __device__ big gcd_d(big a, big b) { big tmp; while (b!=0) { tmp = a; a = b; b = tmp%b; } return a; } /* gcd_d Device version of the Binary Method with bit arithmetic */ /* __device__ big gcd_d(big u, big v) { big g = 1; while ((u % 2 == 0) && (v % 2 == 0)) { g <<= 1; u >>= 1; v >>= 1; } while (u != 0 && v != 0) if (u % 2 == 0) u >>= 1; else if (v % 2 == 0) v >>= 1; else if (u > v) u = (u - v) >> 1; else v = (v - u) >> 1; return (g * (u + v)); } */ /* sqrt_d Device version of the Square Root Function Babylonian Method */ __device__ big sqrt_d(big a) { big root = a/2; for (big n = 0; n < 10; n++) { root = 0.5 * (root + (a/root)); } return root; } __device__ big min_d(big a, big b) { return (a < b) ? a : b; } __device__ big max_d(big a, big b) { return (a > b) ? a : b; } // ALGORITHM 4.1 KERNEL VERSIONS------------------------- /* Algorithm 4.1: Parallel Sieve Kernel version 1 Parallelization: O(sqrt(n)) processors Space: O(sqrt(n)) up to O(sqrt(n)/log log n) PRAM Mode: Exclusive Read, Exclusive Write (EREW) Remarks: No optimizations yet performed. For n = 1 billion, it uses 31623 threads */ __global__ void parallelSieveKernel( big n, big k, big m, Wheel_k d_wheel, big range, bool *d_S) { big sqrt_N = sqrt_d(n); // Express the sieve in thread mode. big i = threadIdx.x + blockIdx.x * blockDim.x; // Threads beyond n will not do work. if (i <= n) { big L = range * i + 1; big R = min_d(range * (i + 1), n); /* Range Sieving */ for (big x = L; x < R; x++) d_S[x] = d_wheel.rp[x % m]; /* For every prime from prime[k] up to sqrt(N) */ for (big q = k; q < sqrt_N; q++) { if (d_S[q]) { /* Compute smallest f s.t. gcd_d(qf, m) == 1, qf >= max_d(L, q^2) */ big f = max_d(q - 1, (big)((L / q) - 1)); /* f = f + W_k[f mod m].dist */ f += d_wheel.dist[f % m]; /* Remove the multiples of current prime */ while ((q * f) <= R) { // EREW Precaution. May need to be atomic operation. if (!(d_S[q * f])) d_S[q * f] = false; f += d_wheel.dist[f % m]; } } } } } /* TODO: Algorithm 4.1: Parallel Sieve Kernel version 2 Remarks: Prime table S within [0, sqrt(n)] migrated to const memory Wheel completely migrated to const memory Beware that const memory is only 64kB. Benchmark with the Profiler first before creating this! */ __global__ void parallelSieveKernel2( big n, big k, big m, Wheel_k d_wheel, big range, bool *d_S); /* TODO: Algorithm 4.1: Parallel Sieve Kernel version 3 Remarks: Prime table S within [0, sqrt(n)] migrated to const memory Wheel completely migrated to const memory Probable use of the shared memory Probable use of registers Beware that register is only 4B or 32b. Beware that const memory is only 64kB. Benchmark with the Profiler first before creating this! */ __global__ void parallelSieveKernel3( big n, big k, big m, Wheel_k d_wheel, big range, bool *d_S); /* MAIN To run this add the ff. args: 1. N = the number up to which you're sieving */ int main(int argc, char **argv) { big N = (big)strtoull(argv[1], NULL, 10); S = new bool[N]; //(bool*)malloc(N * sizeof(bool)); printf("Find primes up to: %llu\n\n", N); /* start counting time */ gettimeofday(&startTime, &myTimezone); cudaError_t x = algorithm4_1(N); /* check the total running time */ report_running_time("Algorithm 4.1"); if (check_cuda_status) { if (x != cudaSuccess) { printf("Algorithm 4.1 failed to execute!"); return 1; } } // Display the primes. for (int i = 0; i < N; i++) if (S[i]) printf("%llu ", i); delete[] S; return 0; } // HOST FUNCTION DEFINITIONS----------------------------- // Euclidean Method __host__ big gcd(big u, big v) { big tmp; while (v != 0) { tmp = u; u = v; v = tmp%v; } return u; } // Binary Method /* __host__ big gcd(big u, big v) { big g = 1; while ((u % 2 == 0) && (v % 2 == 0)) { g <<= 1; u >>= 1; v >>= 1; } while (u != 0 && v != 0) if (u % 2 == 0) u >>= 1; else if (v % 2 == 0) v >>= 1; else if (u > v) u = (u - v) >> 1; else v = (v - u) >> 1; return (g * (u + v)); } */ big EratosthenesSieve(long double k, big n) { big kthPrime = 0; // 0 and 1 are non-primes. S[0] = S[1] = false; for (big i = 2; i < n; i++) S[i] = true; // Simple Sieving Operation. for (big i = 2; i < (big)sqrtl(n); i++) if (S[i]) { int j; for (j = i*i; j < n; j += i) S[j] = false; } // Find the k-th prime. for (big i = k; i > 2; i--) if (S[i]) kthPrime = i; return kthPrime; } cudaError_t algorithm4_1(big n) { /* VARIABLES */ big range; big sqrt_N = (big)sqrtl((long double)n); Wheel_k wheel; /* Allocation of wheel */ wheel.rp = new bool[n]; wheel.dist = new big[n]; /* Find the first k primes K = maximal s.t. S[K] <= (log N) / 4 Find primes up to sqrt(N) */ big k = EratosthenesSieve(log10l((long double)n) / 4, n); /* Find the product of the first k primes m */ big m = 1; for (big ii = 0; ii < k; ii++) if (S[ii]) m *= ii; /* Compute k-th wheel W_k FUTURE OPTIMIZATION: Delegate kernel for computation */ for (big x = 0; x < n; x++) { // True if rp[x] is relatively prime to m wheel.rp[x] = (gcd(x, m) == 1); /* This is d s.t. x + d is the smallest integer >dist[x] relatively prime to m */ int d = 0; while (gcd(x + d, m) != 1) d++; wheel.dist[x] = d; } /* Delta = ceil(n/p) */ range = (big)ceill(n / (long double)P); /* PARALLEL PART */ cudaError_t parallelStatus = parallelSieve(n, k, m, wheel, range); if (check_cuda_status) { if (parallelStatus != cudaSuccess) { fprintf(stderr, "parallelSieve() failed!"); } } /* FREE */ delete[] wheel.rp; delete[] wheel.dist; return parallelStatus; } cudaError_t parallelSieve( big n, big k, big m, const Wheel_k &wheel, big range) { cudaError_t cudaStatus; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); /* The Number Field S will be migrated to GLOBAL memory OPTIMIZATION: ranges will be migrated to SHARED memory OPTIMIZATION: [0, sqrt(n)] will be migrated to CONSTANT memory */ bool * d_S = NULL; // The Wheel Precomputed Table // will be migrated to GLOBAL memory // OPTIMIZATION: may be migrated to CONSTANT memory as well Wheel_k d_wheel; d_wheel.rp = NULL; d_wheel.dist = NULL; // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = cudaSetDevice(0); if (check_cuda_status) { if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?\n"); return cudaStatus; } } // Measure start time for CUDA portion cudaEventRecord(start, 0); // CUDA Memory Allocations. cudaStatus = cudaMalloc((void**)&d_S, n * sizeof(bool)); if (check_cuda_status) { if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed on number field S!\n"); return cleanup(d_S, d_wheel, cudaStatus); } } cudaStatus = cudaMalloc((void**)&(d_wheel.rp), n * sizeof(bool)); if (check_cuda_status) { if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed on wheel.rp!\n"); return cleanup(d_S, d_wheel, cudaStatus); } } cudaStatus = cudaMalloc((void**)&(d_wheel.dist), n * sizeof(big)); if (check_cuda_status) { if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed on wheel.dist!\n"); return cleanup(d_S, d_wheel, cudaStatus); } } // cudaMemCpy -> Device cudaStatus = cudaMemcpy(d_S, S, n * sizeof(bool), cudaMemcpyHostToDevice); if (check_cuda_status) { if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed! S->d_S.\n"); return cleanup(d_S, d_wheel, cudaStatus); } } cudaStatus = cudaMemcpy(d_wheel.rp, wheel.rp, n * sizeof(bool), cudaMemcpyHostToDevice); if (check_cuda_status) { if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed! wheel.rp->d_wheel.rp\n"); return cleanup(d_S, d_wheel, cudaStatus); } } cudaStatus = cudaMemcpy(d_wheel.dist, wheel.dist, n * sizeof(big), cudaMemcpyHostToDevice); if (check_cuda_status) { if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed! wheel.dist->d_wheel.dist\n"); return cleanup(d_S, d_wheel, cudaStatus); } } // Kernel Call dim3 gridSize(ceill(ceill(sqrt(n))/256), 1, 1); dim3 blockSize(256, 1, 1); parallelSieveKernel<<<gridSize, blockSize>>>(n, k, m, wheel, range, d_S); cudaStatus = cudaGetLastError(); if (check_cuda_status) { if (cudaStatus != cudaSuccess) { fprintf(stderr, "parallelSieveKernel launch failed: %s\n", cudaGetErrorString(cudaStatus)); return cleanup(d_S, d_wheel, cudaStatus); } } cudaStatus = cudaDeviceSynchronize(); if (check_cuda_status) { if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); return cleanup(d_S, d_wheel, cudaStatus); } } // cudaMemCpy -> Host cudaStatus = cudaMemcpy(S, d_S, n * sizeof(bool), cudaMemcpyDeviceToHost); if (check_cuda_status) { if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed! d_S->S.\n"); return cleanup(d_S, d_wheel, cudaStatus); } } // Measure stop time for CUDA portion cudaEventRecord(stop, 0); cudaEventSynchronize(stop); float elapsedTime; cudaEventElapsedTime(&elapsedTime, start, stop); printf("Time to generate: %0.5f ms\n", elapsedTime); // cudaFree return cleanup(d_S, d_wheel, cudaStatus); } cudaError_t cleanup(bool *d_S, Wheel_k &wheel, cudaError_t cudaStatus) { cudaFree(d_S); cudaFree(wheel.rp); cudaFree(wheel.dist); return cudaStatus; } /* set a checkpoint and show the (natural) running time in seconds */ double report_running_time(const char *arr) { long sec_diff, usec_diff; gettimeofday(&endTime, &myTimezone); sec_diff = endTime.tv_sec - startTime.tv_sec; usec_diff= endTime.tv_usec-startTime.tv_usec; if(usec_diff < 0) { sec_diff --; usec_diff += 1000000; } printf("Running time for %s: %ld.%06ld sec\n\n", arr, sec_diff, usec_diff); return (double)(sec_diff*1.0 + usec_diff/1000000.0); }
3f43e59ca6385158fab037d1251ff32478f7a172.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date January 2019 @author Mark Gates @author Azzam Haidar @author Ichitaro Yamazaki @precisions normal z -> s d c */ #include "magma_internal.h" #define BLK_X 64 #define BLK_Y 32 /******************************************************************************/ /* Divides matrix into ceil( m/BLK_X ) x ceil( n/BLK_Y ) blocks. Each block has BLK_X threads. Each thread loops across one row, updating BLK_Y entries. Code similar to zlaset, zlacpy, zlag2c, clag2z, zgeadd. */ static __device__ void zlacpy_sym_in_full_device( int m, int n, const magmaDoubleComplex *dA, int ldda, magmaDoubleComplex *dB, int lddb ) { int ind = blockIdx.x*BLK_X + threadIdx.x; int iby = blockIdx.y*BLK_Y; /* check if full block-column */ bool full = (iby + BLK_Y <= n); /* do only rows inside matrix */ if ( ind < m ) { dA += ind + iby*ldda; dB += ind + iby*lddb; if ( full ) { // full block-column #pragma unroll for( int j=0; j < BLK_Y; ++j ) { dB[j*lddb] = dA[j*ldda]; } } else { // partial block-column for( int j=0; j < BLK_Y && iby+j < n; ++j ) { dB[j*lddb] = dA[j*ldda]; } } } } /******************************************************************************/ /* Similar to zlacpy_full, but updates only the diagonal and below. Blocks that are fully above the diagonal exit immediately. Code similar to zlaset, zlacpy, zlat2c, clat2z. */ static __device__ void zlacpy_sym_in_lower_device( int m, int n, magma_int_t *rows, magma_int_t *perm, const magmaDoubleComplex *dA, int ldda, magmaDoubleComplex *dB, int lddb ) { int ind = blockIdx.x*BLK_X + threadIdx.x; // row int iby = blockIdx.y*BLK_Y; // col /* check if full block-column && (below diag) */ bool full = (iby + BLK_Y <= n); for (int jj=0; jj < n; jj++) { perm[rows[2*jj+1]] = rows[2*jj]; } /* do only rows inside matrix, and blocks not above diag */ if ( ind < m ) { if ( full ) { // full block-column, off-diagonal block //#pragma unroll for( int jj=0; jj < BLK_Y; ++jj ) { int j = rows[2*(iby+jj)]; if (perm[ind] <= j) dB[ind + (iby+jj)*lddb] = MAGMA_Z_CONJ( dA[j + perm[ind]*ldda] ); else dB[ind + (iby+jj)*lddb] = dA[perm[ind] + j*ldda]; } } else { // either partial block-column or diagonal block for( int jj=0; jj < BLK_Y && iby+jj < n; ++jj ) { int j = rows[2*(iby+jj)]; if (perm[ind] <= j) dB[ind + (iby+jj)*lddb] = MAGMA_Z_CONJ( dA[j + perm[ind]*ldda] ); else dB[ind + (iby+jj)*lddb] = dA[perm[ind] + j*ldda]; } } } } /* Similar to zlacpy_full, but updates only the diagonal and above. Blocks that are fully below the diagonal exit immediately. Code similar to zlaset, zlacpy, zlat2c, clat2z. */ static __device__ void zlacpy_sym_in_upper_device( int m, int n, const magmaDoubleComplex *dA, int ldda, magmaDoubleComplex *dB, int lddb ) { int ind = blockIdx.x*BLK_X + threadIdx.x; int iby = blockIdx.y*BLK_Y; /* check if full block-column && (above diag) */ bool full = (iby + BLK_Y <= n && (ind + BLK_X <= iby)); /* do only rows inside matrix, and blocks not below diag */ if ( ind < m && ind < iby + BLK_Y ) { dA += ind + iby*ldda; dB += ind + iby*lddb; if ( full ) { // full block-column, off-diagonal block #pragma unroll for( int j=0; j < BLK_Y; ++j ) { dB[j*lddb] = dA[j*ldda]; } } else { // either partial block-column or diagonal block for( int j=0; j < BLK_Y && iby+j < n; ++j ) { if ( ind <= iby+j ) { dB[j*lddb] = dA[j*ldda]; } } } } } /******************************************************************************/ /* kernel wrappers to call the device functions. */ __global__ void zlacpy_sym_in_full_kernel( int m, int n, const magmaDoubleComplex *dA, int ldda, magmaDoubleComplex *dB, int lddb ) { zlacpy_sym_in_full_device(m, n, dA, ldda, dB, lddb); } __global__ void zlacpy_sym_in_lower_kernel( int m, int n, magma_int_t *rows, magma_int_t *perm, const magmaDoubleComplex *dA, int ldda, magmaDoubleComplex *dB, int lddb ) { zlacpy_sym_in_lower_device(m, n, rows, perm, dA, ldda, dB, lddb); } __global__ void zlacpy_sym_in_upper_kernel( int m, int n, const magmaDoubleComplex *dA, int ldda, magmaDoubleComplex *dB, int lddb ) { zlacpy_sym_in_upper_device(m, n, dA, ldda, dB, lddb); } /***************************************************************************//** Purpose ------- ZLACPY_SYM_IN copies all or part of a two-dimensional matrix dA to another matrix dB. This is the same as ZLACPY, but adds queue argument. Arguments --------- @param[in] uplo magma_uplo_t Specifies the part of the matrix dA to be copied to dB. - = MagmaUpper: Upper triangular part - = MagmaLower: Lower triangular part - = MagmaFull: All of the matrix dA @param[in] m INTEGER The number of rows of the matrix dA. M >= 0. @param[in] n INTEGER The number of rows that are swapped. N >= 0. @param[in] rows INTEGER array, on GPU, dimension (2*n) On entry, it stores the new pivots such that rows[i]-th and rows[n+i]-th rows are swapped. @param[in,out] perm INTEGER array, on GPU, dimension (m) On entry, it stores the identity permutation array. On exit, it is updated with the new pivots given by rows such that i-th row will be the original perm[i]-th row after the pivots are applied. @param[in] dA COMPLEX_16 array, dimension (LDDA,N) The M-by-N matrix dA. If UPLO = MagmaUpper, only the upper triangle or trapezoid is accessed; if UPLO = MagmaLower, only the lower triangle or trapezoid is accessed. @param[in] ldda INTEGER The leading dimension of the array dA. LDDA >= max(1,M). @param[out] dB COMPLEX_16 array, dimension (LDDB,N) On exit, dB = stores the columns after the pivots are applied. @param[in] lddb INTEGER The leading dimension of the array dB. LDDB >= max(1,M). @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_lacpy *******************************************************************************/ extern "C" void magmablas_zlacpy_sym_in( magma_uplo_t uplo, magma_int_t m, magma_int_t n, magma_int_t *rows, magma_int_t *perm, magmaDoubleComplex_const_ptr dA, magma_int_t ldda, magmaDoubleComplex_ptr dB, magma_int_t lddb, magma_queue_t queue ) { magma_int_t info = 0; if ( uplo != MagmaLower && uplo != MagmaUpper && uplo != MagmaFull ) info = -1; else if ( m < 0 ) info = -2; else if ( n < 0 ) info = -3; else if ( ldda < max(1,m)) info = -5; else if ( lddb < max(1,m)) info = -7; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return; //info; } if ( m == 0 || n == 0 ) { return; } dim3 threads( BLK_X, 1 ); dim3 grid( magma_ceildiv(m, BLK_X), magma_ceildiv(n, BLK_Y) ); if ( uplo == MagmaLower ) { hipLaunchKernelGGL(( zlacpy_sym_in_lower_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, rows, perm, dA, ldda, dB, lddb ); } else if ( uplo == MagmaUpper ) { hipLaunchKernelGGL(( zlacpy_sym_in_upper_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, dA, ldda, dB, lddb ); } else { hipLaunchKernelGGL(( zlacpy_sym_in_full_kernel) , dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, dA, ldda, dB, lddb ); } }
3f43e59ca6385158fab037d1251ff32478f7a172.cu
/* -- MAGMA (version 2.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date January 2019 @author Mark Gates @author Azzam Haidar @author Ichitaro Yamazaki @precisions normal z -> s d c */ #include "magma_internal.h" #define BLK_X 64 #define BLK_Y 32 /******************************************************************************/ /* Divides matrix into ceil( m/BLK_X ) x ceil( n/BLK_Y ) blocks. Each block has BLK_X threads. Each thread loops across one row, updating BLK_Y entries. Code similar to zlaset, zlacpy, zlag2c, clag2z, zgeadd. */ static __device__ void zlacpy_sym_in_full_device( int m, int n, const magmaDoubleComplex *dA, int ldda, magmaDoubleComplex *dB, int lddb ) { int ind = blockIdx.x*BLK_X + threadIdx.x; int iby = blockIdx.y*BLK_Y; /* check if full block-column */ bool full = (iby + BLK_Y <= n); /* do only rows inside matrix */ if ( ind < m ) { dA += ind + iby*ldda; dB += ind + iby*lddb; if ( full ) { // full block-column #pragma unroll for( int j=0; j < BLK_Y; ++j ) { dB[j*lddb] = dA[j*ldda]; } } else { // partial block-column for( int j=0; j < BLK_Y && iby+j < n; ++j ) { dB[j*lddb] = dA[j*ldda]; } } } } /******************************************************************************/ /* Similar to zlacpy_full, but updates only the diagonal and below. Blocks that are fully above the diagonal exit immediately. Code similar to zlaset, zlacpy, zlat2c, clat2z. */ static __device__ void zlacpy_sym_in_lower_device( int m, int n, magma_int_t *rows, magma_int_t *perm, const magmaDoubleComplex *dA, int ldda, magmaDoubleComplex *dB, int lddb ) { int ind = blockIdx.x*BLK_X + threadIdx.x; // row int iby = blockIdx.y*BLK_Y; // col /* check if full block-column && (below diag) */ bool full = (iby + BLK_Y <= n); for (int jj=0; jj < n; jj++) { perm[rows[2*jj+1]] = rows[2*jj]; } /* do only rows inside matrix, and blocks not above diag */ if ( ind < m ) { if ( full ) { // full block-column, off-diagonal block //#pragma unroll for( int jj=0; jj < BLK_Y; ++jj ) { int j = rows[2*(iby+jj)]; if (perm[ind] <= j) dB[ind + (iby+jj)*lddb] = MAGMA_Z_CONJ( dA[j + perm[ind]*ldda] ); else dB[ind + (iby+jj)*lddb] = dA[perm[ind] + j*ldda]; } } else { // either partial block-column or diagonal block for( int jj=0; jj < BLK_Y && iby+jj < n; ++jj ) { int j = rows[2*(iby+jj)]; if (perm[ind] <= j) dB[ind + (iby+jj)*lddb] = MAGMA_Z_CONJ( dA[j + perm[ind]*ldda] ); else dB[ind + (iby+jj)*lddb] = dA[perm[ind] + j*ldda]; } } } } /* Similar to zlacpy_full, but updates only the diagonal and above. Blocks that are fully below the diagonal exit immediately. Code similar to zlaset, zlacpy, zlat2c, clat2z. */ static __device__ void zlacpy_sym_in_upper_device( int m, int n, const magmaDoubleComplex *dA, int ldda, magmaDoubleComplex *dB, int lddb ) { int ind = blockIdx.x*BLK_X + threadIdx.x; int iby = blockIdx.y*BLK_Y; /* check if full block-column && (above diag) */ bool full = (iby + BLK_Y <= n && (ind + BLK_X <= iby)); /* do only rows inside matrix, and blocks not below diag */ if ( ind < m && ind < iby + BLK_Y ) { dA += ind + iby*ldda; dB += ind + iby*lddb; if ( full ) { // full block-column, off-diagonal block #pragma unroll for( int j=0; j < BLK_Y; ++j ) { dB[j*lddb] = dA[j*ldda]; } } else { // either partial block-column or diagonal block for( int j=0; j < BLK_Y && iby+j < n; ++j ) { if ( ind <= iby+j ) { dB[j*lddb] = dA[j*ldda]; } } } } } /******************************************************************************/ /* kernel wrappers to call the device functions. */ __global__ void zlacpy_sym_in_full_kernel( int m, int n, const magmaDoubleComplex *dA, int ldda, magmaDoubleComplex *dB, int lddb ) { zlacpy_sym_in_full_device(m, n, dA, ldda, dB, lddb); } __global__ void zlacpy_sym_in_lower_kernel( int m, int n, magma_int_t *rows, magma_int_t *perm, const magmaDoubleComplex *dA, int ldda, magmaDoubleComplex *dB, int lddb ) { zlacpy_sym_in_lower_device(m, n, rows, perm, dA, ldda, dB, lddb); } __global__ void zlacpy_sym_in_upper_kernel( int m, int n, const magmaDoubleComplex *dA, int ldda, magmaDoubleComplex *dB, int lddb ) { zlacpy_sym_in_upper_device(m, n, dA, ldda, dB, lddb); } /***************************************************************************//** Purpose ------- ZLACPY_SYM_IN copies all or part of a two-dimensional matrix dA to another matrix dB. This is the same as ZLACPY, but adds queue argument. Arguments --------- @param[in] uplo magma_uplo_t Specifies the part of the matrix dA to be copied to dB. - = MagmaUpper: Upper triangular part - = MagmaLower: Lower triangular part - = MagmaFull: All of the matrix dA @param[in] m INTEGER The number of rows of the matrix dA. M >= 0. @param[in] n INTEGER The number of rows that are swapped. N >= 0. @param[in] rows INTEGER array, on GPU, dimension (2*n) On entry, it stores the new pivots such that rows[i]-th and rows[n+i]-th rows are swapped. @param[in,out] perm INTEGER array, on GPU, dimension (m) On entry, it stores the identity permutation array. On exit, it is updated with the new pivots given by rows such that i-th row will be the original perm[i]-th row after the pivots are applied. @param[in] dA COMPLEX_16 array, dimension (LDDA,N) The M-by-N matrix dA. If UPLO = MagmaUpper, only the upper triangle or trapezoid is accessed; if UPLO = MagmaLower, only the lower triangle or trapezoid is accessed. @param[in] ldda INTEGER The leading dimension of the array dA. LDDA >= max(1,M). @param[out] dB COMPLEX_16 array, dimension (LDDB,N) On exit, dB = stores the columns after the pivots are applied. @param[in] lddb INTEGER The leading dimension of the array dB. LDDB >= max(1,M). @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_lacpy *******************************************************************************/ extern "C" void magmablas_zlacpy_sym_in( magma_uplo_t uplo, magma_int_t m, magma_int_t n, magma_int_t *rows, magma_int_t *perm, magmaDoubleComplex_const_ptr dA, magma_int_t ldda, magmaDoubleComplex_ptr dB, magma_int_t lddb, magma_queue_t queue ) { magma_int_t info = 0; if ( uplo != MagmaLower && uplo != MagmaUpper && uplo != MagmaFull ) info = -1; else if ( m < 0 ) info = -2; else if ( n < 0 ) info = -3; else if ( ldda < max(1,m)) info = -5; else if ( lddb < max(1,m)) info = -7; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return; //info; } if ( m == 0 || n == 0 ) { return; } dim3 threads( BLK_X, 1 ); dim3 grid( magma_ceildiv(m, BLK_X), magma_ceildiv(n, BLK_Y) ); if ( uplo == MagmaLower ) { zlacpy_sym_in_lower_kernel<<< grid, threads, 0, queue->cuda_stream() >>> ( m, n, rows, perm, dA, ldda, dB, lddb ); } else if ( uplo == MagmaUpper ) { zlacpy_sym_in_upper_kernel<<< grid, threads, 0, queue->cuda_stream() >>> ( m, n, dA, ldda, dB, lddb ); } else { zlacpy_sym_in_full_kernel <<< grid, threads, 0, queue->cuda_stream() >>> ( m, n, dA, ldda, dB, lddb ); } }
a3b7873e7a44be553455e76088765bb20c8fc348.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cmath> __global__ void call_min(double* first, const double* second) { first[threadIdx.x] = std::fmin(first[threadIdx.x], second[threadIdx.x]); }
a3b7873e7a44be553455e76088765bb20c8fc348.cu
#include <cmath> __global__ void call_min(double* first, const double* second) { first[threadIdx.x] = std::fmin(first[threadIdx.x], second[threadIdx.x]); }
13a5ae168663ac2399d5fc76f6a25044b2fd774b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" extern "C" __global__ void vector_add(float *c, float *a, float *b, int n) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) { c[i] = a[i] + b[i]; } }
13a5ae168663ac2399d5fc76f6a25044b2fd774b.cu
extern "C" __global__ void vector_add(float *c, float *a, float *b, int n) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) { c[i] = a[i] + b[i]; } }
ef380ed5bb40936f3adfcc5ff2c5dfad88b95a3d.hip
// !!! This is a file automatically generated by hipify!!! // // Created by shijiashuai on 5/7/18. // #include "thundergbm/updater/exact_updater.h" void ExactUpdater::lsh_hash_init(unsigned n_bucket, unsigned num_table, unsigned num_dimension, unsigned p_norm, float r, unsigned numP, int seed){ lsh_table.param.n_bucket = n_bucket; lsh_table.param.n_table = num_table; lsh_table.param.n_dimension = num_dimension; lsh_table.param.p_norm = p_norm; lsh_table.param.r = r; lsh_table.param.n_comp = numP; lsh_table.param.seed = seed; lsh_table.init(); } void ExactUpdater::grow(Tree &tree, const vector<std::shared_ptr<SparseColumns>> &v_columns, InsStat &stats) { TIMED_SCOPE(timerObj, "grow tree"); int n_instances = stats.n_instances; int cur_device = 0; // int cur_device = param.use_gpu_id; LOG(TRACE) << "broadcast tree and stats"; v_stats.resize(n_devices); v_trees_gpu.resize(n_devices); init_tree(tree, stats); DO_ON_MULTI_DEVICES(n_devices, [&](int device_id) { //stats int n_instances = stats.n_instances; v_stats[device_id].reset(new InsStat()); InsStat &gpu_stats = *v_stats[device_id]; gpu_stats.resize(n_instances); gpu_stats.gh_pair.copy_from(stats.gh_pair.host_data(), n_instances); gpu_stats.nid.copy_from(stats.nid.host_data(), n_instances); gpu_stats.y.copy_from(stats.y.host_data(), n_instances); gpu_stats.y_predict.copy_from(stats.y_predict.host_data(), n_instances); //tree v_trees_gpu[device_id].reset(new Tree()); Tree &gpu_tree = *v_trees_gpu[device_id]; gpu_tree.nodes.resize(tree.nodes.size()); gpu_tree.nodes.copy_from(tree.nodes.host_data(), tree.nodes.size()); }); for (int i = 0; i < depth; ++i) { LOG(TRACE) << "growing tree at depth " << i; vector<SyncArray<SplitPoint>> local_sp(n_devices); { TIMED_SCOPE(timerObj, "find split"); DO_ON_MULTI_DEVICES(n_devices, [&](int device_id) { LOG(TRACE) << string_format("finding split on device %d", device_id); find_split(i, *v_columns[device_id], *v_trees_gpu[device_id], *v_stats[device_id], local_sp[device_id]); }); } int n_max_nodes_in_level = 1 << i;//2^i int nid_offset = (1 << i) - 1;//2^i - 1 SyncArray<SplitPoint> global_sp(n_max_nodes_in_level); { TIMED_SCOPE(timerObj, "split point all reduce"); split_point_all_reduce(local_sp, global_sp, i); } //do split { TIMED_SCOPE(timerObj, "update tree"); update_tree(tree, global_sp); } //broadcast tree LOG(TRACE) << "broadcasting updated tree"; DO_ON_MULTI_DEVICES(n_devices, [&](int device_id) { v_trees_gpu[device_id]->nodes.copy_from(tree.nodes.host_data(), tree.nodes.size()); }); { vector<bool> v_has_split(n_devices); TIMED_SCOPE(timerObj, "reset ins2node id"); LOG(TRACE) << "reset ins2node id"; DO_ON_MULTI_DEVICES(n_devices, [&](int device_id) { v_has_split[device_id] = reset_ins2node_id(*v_stats[device_id], *v_trees_gpu[device_id], *v_columns[device_id]); }); LOG(TRACE) << "gathering ins2node id"; //get final result of the reset instance id to node id bool has_split = false; for (int d = 0; d < n_devices; d++) { has_split |= v_has_split[d]; } if (!has_split) { LOG(INFO) << "no splittable nodes, stop"; break; } } //get global ins2node id { TIMED_SCOPE(timerObj, "global ins2node id"); SyncArray<int> local_ins2node_id(n_instances); auto local_ins2node_id_data = local_ins2node_id.device_data(); auto global_ins2node_id_data = stats.nid.device_data(); for (int d = 0; d < n_devices; d++) { CUDA_CHECK(hipMemcpyPeerAsync(local_ins2node_id_data, cur_device, v_stats[d]->nid.device_data(), d, sizeof(int) * n_instances)); hipDeviceSynchronize(); device_loop(n_instances, [=]__device__(int i) { global_ins2node_id_data[i] = (global_ins2node_id_data[i] > local_ins2node_id_data[i]) ? global_ins2node_id_data[i] : local_ins2node_id_data[i]; }); } } //processing missing value { TIMED_SCOPE(timerObj, "process missing value"); LOG(TRACE) << "update ins2node id for each missing fval"; auto global_ins2node_id_data = stats.nid.device_data();//essential auto nodes_data = v_trees_gpu[0]->nodes.device_data();//already broadcast above device_loop(n_instances, [=]__device__(int iid) { int nid = global_ins2node_id_data[iid]; //if the instance is not on leaf node and not goes down if (nodes_data[nid].splittable() && nid < nid_offset + n_max_nodes_in_level) { //let the instance goes down const Tree::TreeNode &node = nodes_data[nid]; if (node.default_right) global_ins2node_id_data[iid] = node.rch_index; else global_ins2node_id_data[iid] = node.lch_index; } }); LOG(DEBUG) << "new nid = " << stats.nid; //broadcast ins2node id DO_ON_MULTI_DEVICES(n_devices, [&](int device_id) { v_stats[device_id]->nid.copy_from(stats.nid.host_data(), stats.nid.size()); }); } } tree.nodes.copy_from(v_trees_gpu[0]->nodes); } void ExactUpdater::split_point_all_reduce(const vector<SyncArray<SplitPoint>> &local_sp, SyncArray<SplitPoint> &global_sp, int depth) { //get global best split of each node int n_max_nodes_in_level = 1 << depth;//2^i int nid_offset = (1 << depth) - 1;//2^i - 1 auto global_sp_data = global_sp.host_data(); vector<bool> active_sp(n_max_nodes_in_level); for (int n = 0; n < n_max_nodes_in_level; n++) { global_sp_data[n].nid = n + nid_offset; global_sp_data[n].gain = -1.0f; active_sp[n] = false; } for (int device_id = 0; device_id < n_devices; device_id++) { auto local_sp_data = local_sp[device_id].host_data(); for (int j = 0; j < local_sp[device_id].size(); j++) { int sp_nid = local_sp_data[j].nid; if (sp_nid == -1) continue; int global_pos = sp_nid - nid_offset; if (!active_sp[global_pos]) global_sp_data[global_pos] = local_sp_data[j]; else global_sp_data[global_pos] = (global_sp_data[global_pos].gain >= local_sp_data[j].gain) ? global_sp_data[global_pos] : local_sp_data[j]; active_sp[global_pos] = true; } } //set inactive sp for (int n = 0; n < n_max_nodes_in_level; n++) { if (!active_sp[n]) global_sp_data[n].nid = -1; } LOG(DEBUG) << "global best split point = " << global_sp; } void ExactUpdater::init_tree(Tree &tree, const InsStat &stats) { tree.init(depth); //init root node Tree::TreeNode &root_node = tree.nodes.host_data()[0]; root_node.sum_gh_pair = stats.sum_gh; root_node.is_valid = true; root_node.calc_weight(lambda); LOG(DEBUG) << "root sum gh " << root_node.sum_gh_pair; } void ExactUpdater::find_split(int level, const SparseColumns &columns, const Tree &tree, const InsStat &stats, SyncArray<SplitPoint> &sp) { int n_max_nodes_in_level = static_cast<int>(pow(2, level)); int nid_offset = static_cast<int>(pow(2, level) - 1); int n_column = columns.n_column; int n_partition = n_column * n_max_nodes_in_level; int nnz = columns.nnz; int n_block = ::min((nnz / n_column - 1) / 256 + 1, 32 * 56); LOG(TRACE) << "start finding split"; //find the best split locally { using namespace thrust; SyncArray<int> fvid2pid(nnz); { TIMED_SCOPE(timerObj, "fvid2pid"); //input const int *nid_data = stats.nid.device_data(); const int *iid_data = columns.csc_row_ind.device_data(); LOG(TRACE) << "after using v_stats and columns"; //output int *fvid2pid_data = fvid2pid.device_data(); device_loop_2d( n_column, columns.csc_col_ptr.device_data(), [=]__device__(int col_id, int fvid) { //feature value id -> instance id -> node id int nid = nid_data[iid_data[fvid]]; int pid; //if this node is leaf node, move it to the end if (nid < nid_offset) pid = INT_MAX;//todo negative else pid = (nid - nid_offset) * n_column + col_id; fvid2pid_data[fvid] = pid; }, n_block); LOG(DEBUG) << "fvid2pid " << fvid2pid; } //gather g/h pairs and do prefix sum int n_split; SyncArray<GHPair> gh_prefix_sum; SyncArray<GHPair> missing_gh(n_partition); SyncArray<int> rle_pid; SyncArray<float_type> rle_fval; { //get feature value id mapping for partition, new -> old SyncArray<int> fvid_new2old(nnz); { TIMED_SCOPE(timerObj, "fvid_new2old"); sequence(cuda::par, fvid_new2old.device_data(), fvid_new2old.device_end(), 0); stable_sort_by_key( cuda::par, fvid2pid.device_data(), fvid2pid.device_end(), fvid_new2old.device_data(), thrust::less<int>()); LOG(DEBUG) << "sorted fvid2pid " << fvid2pid; LOG(DEBUG) << "fvid_new2old " << fvid_new2old; } //do prefix sum { TIMED_SCOPE(timerObj, "do prefix sum"); SyncArray<GHPair> rle_gh(nnz); SyncArray<int_float> rle_key(nnz); //same feature value in the same part has the same key. auto key_iter = make_zip_iterator( make_tuple( fvid2pid.device_data(), make_permutation_iterator( columns.csc_val.device_data(), fvid_new2old.device_data())));//use fvid_new2old to access csc_val //apply RLE compression n_split = reduce_by_key( cuda::par, key_iter, key_iter + nnz, make_permutation_iterator( //ins id -> gh pair stats.gh_pair.device_data(), make_permutation_iterator( //old fvid -> ins id columns.csc_row_ind.device_data(), fvid_new2old.device_data())), //new fvid -> old fvid rle_key.device_data(), rle_gh.device_data() ).first - rle_key.device_data(); gh_prefix_sum.resize(n_split); rle_pid.resize(n_split); rle_fval.resize(n_split); const auto rle_gh_data = rle_gh.device_data(); const auto rle_key_data = rle_key.device_data(); auto gh_prefix_sum_data = gh_prefix_sum.device_data(); auto rle_pid_data = rle_pid.device_data(); auto rle_fval_data = rle_fval.device_data(); device_loop(n_split, [=]__device__(int i) { gh_prefix_sum_data[i] = rle_gh_data[i]; rle_pid_data[i] = get<0>(rle_key_data[i]); rle_fval_data[i] = get<1>(rle_key_data[i]); }); inclusive_scan_by_key( cuda::par, rle_pid.device_data(), rle_pid.device_end(), gh_prefix_sum.device_data(), gh_prefix_sum.device_data()); // LOG(DEBUG) << "gh prefix sum = " << gh_prefix_sum; LOG(DEBUG) << "reduced pid = " << rle_pid; LOG(DEBUG) << "reduced fval = " << rle_fval; } //calculate missing value for each partition { TIMED_SCOPE(timerObj, "calculate missing value"); SyncArray<int> pid_ptr(n_partition + 1); counting_iterator<int> search_begin(0); upper_bound(cuda::par, rle_pid.device_data(), rle_pid.device_end(), search_begin, search_begin + n_partition, pid_ptr.device_data() + 1); LOG(DEBUG) << "pid_ptr = " << pid_ptr; auto pid_ptr_data = pid_ptr.device_data(); auto rle_pid_data = rle_pid.device_data(); auto rle_fval_data = rle_fval.device_data(); float_type rt_eps = this->rt_eps; device_loop(n_split, [=]__device__(int i) { int pid = rle_pid_data[i]; if (pid == INT_MAX) return; float_type f = rle_fval_data[i]; if ((pid_ptr_data[pid + 1] - 1) == i)//the last RLE rle_fval_data[i] = (f - fabsf(rle_fval_data[pid_ptr_data[pid]]) - rt_eps); else //FIXME read/write collision rle_fval_data[i] = (f + rle_fval_data[i + 1]) * 0.5f; }); const auto gh_prefix_sum_data = gh_prefix_sum.device_data(); const auto node_data = tree.nodes.device_data(); auto missing_gh_data = missing_gh.device_data(); device_loop(n_partition, [=]__device__(int pid) { int nid = pid / n_column + nid_offset; if (pid_ptr_data[pid + 1] != pid_ptr_data[pid]) missing_gh_data[pid] = node_data[nid].sum_gh_pair - gh_prefix_sum_data[pid_ptr_data[pid + 1] - 1]; }); // LOG(DEBUG) << "missing gh = " << missing_gh; } } //calculate gain of each split SyncArray<float_type> gain(n_split); SyncArray<bool> default_right(n_split); { TIMED_SCOPE(timerObj, "calculate gain"); auto compute_gain = []__device__(GHPair father, GHPair lch, GHPair rch, float_type min_child_weight, float_type lambda) -> float_type { if (lch.h >= min_child_weight && rch.h >= min_child_weight) return (lch.g * lch.g) / (lch.h + lambda) + (rch.g * rch.g) / (rch.h + lambda) - (father.g * father.g) / (father.h + lambda); else return 0; }; int *fvid2pid_data = fvid2pid.device_data(); const Tree::TreeNode *nodes_data = tree.nodes.device_data(); GHPair *gh_prefix_sum_data = gh_prefix_sum.device_data(); float_type *gain_data = gain.device_data(); bool *default_right_data = default_right.device_data(); const auto rle_pid_data = rle_pid.device_data(); const auto missing_gh_data = missing_gh.device_data(); auto rle_fval_data = rle_fval.device_data(); //for lambda expression float_type mcw = min_child_weight; float_type l = lambda; device_loop(n_split, [=]__device__(int i) { int pid = rle_pid_data[i]; int nid0 = pid / n_column; int nid = nid0 + nid_offset; if (pid == INT_MAX) return; GHPair father_gh = nodes_data[nid].sum_gh_pair; GHPair p_missing_gh = missing_gh_data[pid]; GHPair rch_gh = gh_prefix_sum_data[i]; float_type max_gain = compute_gain(father_gh, father_gh - rch_gh, rch_gh, mcw, l); if (p_missing_gh.h > 1) { rch_gh = rch_gh + p_missing_gh; float_type temp_gain = compute_gain(father_gh, father_gh - rch_gh, rch_gh, mcw, l); if (temp_gain > 0 && temp_gain - max_gain > 0.1) { max_gain = temp_gain; default_right_data[i] = true; } } gain_data[i] = max_gain; }); LOG(DEBUG) << "gain = " << gain; } //get best gain and the index of best gain for each feature and each node SyncArray<int_float> best_idx_gain(n_max_nodes_in_level); int n_nodes_in_level; { TIMED_SCOPE(timerObj, "get best gain"); auto arg_max = []__device__(const int_float &a, const int_float &b) { if (get<1>(a) == get<1>(b)) return get<0>(a) < get<0>(b) ? a : b; else return get<1>(a) > get<1>(b) ? a : b; }; auto in_same_node = [=]__device__(const int a, const int b) { return (a / n_column) == (b / n_column); }; //reduce to get best split of each node for this feature SyncArray<int> key_test(n_max_nodes_in_level); n_nodes_in_level = reduce_by_key( cuda::par, rle_pid.device_data(), rle_pid.device_end(), make_zip_iterator(make_tuple(counting_iterator<int>(0), gain.device_data())), key_test.device_data(),//make_discard_iterator(), best_idx_gain.device_data(), in_same_node, arg_max).second - best_idx_gain.device_data(); LOG(DEBUG) << "#nodes in level = " << n_nodes_in_level; LOG(DEBUG) << "best pid = " << key_test; LOG(DEBUG) << "best idx & gain = " << best_idx_gain; } //get split points const int_float *best_idx_gain_data = best_idx_gain.device_data(); const auto rle_pid_data = rle_pid.device_data(); GHPair *gh_prefix_sum_data = gh_prefix_sum.device_data(); const auto rle_fval_data = rle_fval.device_data(); const auto missing_gh_data = missing_gh.device_data(); bool *default_right_data = default_right.device_data(); sp.resize(n_nodes_in_level); auto sp_data = sp.device_data(); int column_offset = columns.column_offset; device_loop(n_nodes_in_level, [=]__device__(int i) { int_float bst = best_idx_gain_data[i]; float_type best_split_gain = get<1>(bst); int split_index = get<0>(bst); int pid = rle_pid_data[split_index]; sp_data[i].split_fea_id = (pid == INT_MAX) ? -1 : (pid % n_column) + column_offset; sp_data[i].nid = (pid == INT_MAX) ? -1 : (pid / n_column + nid_offset); sp_data[i].gain = best_split_gain; if (pid != INT_MAX) {//avoid split_index out of bound sp_data[i].fval = rle_fval_data[split_index]; sp_data[i].fea_missing_gh = missing_gh_data[pid]; sp_data[i].default_right = default_right_data[split_index]; sp_data[i].rch_sum_gh = gh_prefix_sum_data[split_index]; } }); } LOG(DEBUG) << "split points (gain/fea_id/nid): " << sp; } void ExactUpdater::update_tree(Tree &tree, const SyncArray<SplitPoint> &sp) { auto sp_data = sp.device_data(); int n_nodes_in_level = sp.size(); Tree::TreeNode *nodes_data = tree.nodes.device_data(); float_type rt_eps = this->rt_eps; float_type lambda = this->lambda; device_loop(n_nodes_in_level, [=]__device__(int i) { float_type best_split_gain = sp_data[i].gain; if (best_split_gain > rt_eps) { //do split if (sp_data[i].nid == -1) return; int nid = sp_data[i].nid; Tree::TreeNode &node = nodes_data[nid]; node.gain = best_split_gain; Tree::TreeNode &lch = nodes_data[node.lch_index];//left child Tree::TreeNode &rch = nodes_data[node.rch_index];//right child lch.is_valid = true; rch.is_valid = true; node.split_feature_id = sp_data[i].split_fea_id; GHPair p_missing_gh = sp_data[i].fea_missing_gh; //todo process begin node.split_value = sp_data[i].fval; rch.sum_gh_pair = sp_data[i].rch_sum_gh; if (sp_data[i].default_right) { rch.sum_gh_pair = rch.sum_gh_pair + p_missing_gh; node.default_right = true; } lch.sum_gh_pair = node.sum_gh_pair - rch.sum_gh_pair; lch.calc_weight(lambda); rch.calc_weight(lambda); } else { //set leaf if (sp_data[i].nid == -1) return; int nid = sp_data[i].nid; Tree::TreeNode &node = nodes_data[nid]; node.is_leaf = true; nodes_data[node.lch_index].is_valid = false; nodes_data[node.rch_index].is_valid = false; } // } }); } bool ExactUpdater::reset_ins2node_id(InsStat &stats, const Tree &tree, const SparseColumns &columns) { SyncArray<bool> has_splittable(1); //set new node id for each instance { TIMED_SCOPE(timerObj, "get new node id"); int *nid_data = stats.nid.device_data(); const int *iid_data = columns.csc_row_ind.device_data(); const Tree::TreeNode *nodes_data = tree.nodes.device_data(); const int *col_ptr_data = columns.csc_col_ptr.device_data(); const float_type *f_val_data = columns.csc_val.device_data(); has_splittable.host_data()[0] = false; bool *h_s_data = has_splittable.device_data(); int column_offset = columns.column_offset; int n_column = columns.n_column; int nnz = columns.nnz; int n_block = ::min((nnz / n_column - 1) / 256 + 1, 32 * 56); LOG(TRACE) << "update ins2node id for each fval"; device_loop_2d(n_column, col_ptr_data, [=]__device__(int col_id, int fvid) { //feature value id -> instance id int iid = iid_data[fvid]; //instance id -> node id int nid = nid_data[iid]; //node id -> node const Tree::TreeNode &node = nodes_data[nid]; //if the node splits on this feature if (node.splittable() && node.split_feature_id == col_id + column_offset) { h_s_data[0] = true; if (f_val_data[fvid] < node.split_value) //goes to left child nid_data[iid] = node.lch_index; else //right child nid_data[iid] = node.rch_index; } }, n_block); } LOG(DEBUG) << "new tree_id = " << stats.nid; // LOG(DEBUG) << v_trees_gpu[cur_device_id].nodes; return has_splittable.host_data()[0]; } std::ostream &operator<<(std::ostream &os, const int_float &rhs) { os << string_format("%d/%f", thrust::get<0>(rhs), thrust::get<1>(rhs)); return os; }
ef380ed5bb40936f3adfcc5ff2c5dfad88b95a3d.cu
// // Created by shijiashuai on 5/7/18. // #include "thundergbm/updater/exact_updater.h" void ExactUpdater::lsh_hash_init(unsigned n_bucket, unsigned num_table, unsigned num_dimension, unsigned p_norm, float r, unsigned numP, int seed){ lsh_table.param.n_bucket = n_bucket; lsh_table.param.n_table = num_table; lsh_table.param.n_dimension = num_dimension; lsh_table.param.p_norm = p_norm; lsh_table.param.r = r; lsh_table.param.n_comp = numP; lsh_table.param.seed = seed; lsh_table.init(); } void ExactUpdater::grow(Tree &tree, const vector<std::shared_ptr<SparseColumns>> &v_columns, InsStat &stats) { TIMED_SCOPE(timerObj, "grow tree"); int n_instances = stats.n_instances; int cur_device = 0; // int cur_device = param.use_gpu_id; LOG(TRACE) << "broadcast tree and stats"; v_stats.resize(n_devices); v_trees_gpu.resize(n_devices); init_tree(tree, stats); DO_ON_MULTI_DEVICES(n_devices, [&](int device_id) { //stats int n_instances = stats.n_instances; v_stats[device_id].reset(new InsStat()); InsStat &gpu_stats = *v_stats[device_id]; gpu_stats.resize(n_instances); gpu_stats.gh_pair.copy_from(stats.gh_pair.host_data(), n_instances); gpu_stats.nid.copy_from(stats.nid.host_data(), n_instances); gpu_stats.y.copy_from(stats.y.host_data(), n_instances); gpu_stats.y_predict.copy_from(stats.y_predict.host_data(), n_instances); //tree v_trees_gpu[device_id].reset(new Tree()); Tree &gpu_tree = *v_trees_gpu[device_id]; gpu_tree.nodes.resize(tree.nodes.size()); gpu_tree.nodes.copy_from(tree.nodes.host_data(), tree.nodes.size()); }); for (int i = 0; i < depth; ++i) { LOG(TRACE) << "growing tree at depth " << i; vector<SyncArray<SplitPoint>> local_sp(n_devices); { TIMED_SCOPE(timerObj, "find split"); DO_ON_MULTI_DEVICES(n_devices, [&](int device_id) { LOG(TRACE) << string_format("finding split on device %d", device_id); find_split(i, *v_columns[device_id], *v_trees_gpu[device_id], *v_stats[device_id], local_sp[device_id]); }); } int n_max_nodes_in_level = 1 << i;//2^i int nid_offset = (1 << i) - 1;//2^i - 1 SyncArray<SplitPoint> global_sp(n_max_nodes_in_level); { TIMED_SCOPE(timerObj, "split point all reduce"); split_point_all_reduce(local_sp, global_sp, i); } //do split { TIMED_SCOPE(timerObj, "update tree"); update_tree(tree, global_sp); } //broadcast tree LOG(TRACE) << "broadcasting updated tree"; DO_ON_MULTI_DEVICES(n_devices, [&](int device_id) { v_trees_gpu[device_id]->nodes.copy_from(tree.nodes.host_data(), tree.nodes.size()); }); { vector<bool> v_has_split(n_devices); TIMED_SCOPE(timerObj, "reset ins2node id"); LOG(TRACE) << "reset ins2node id"; DO_ON_MULTI_DEVICES(n_devices, [&](int device_id) { v_has_split[device_id] = reset_ins2node_id(*v_stats[device_id], *v_trees_gpu[device_id], *v_columns[device_id]); }); LOG(TRACE) << "gathering ins2node id"; //get final result of the reset instance id to node id bool has_split = false; for (int d = 0; d < n_devices; d++) { has_split |= v_has_split[d]; } if (!has_split) { LOG(INFO) << "no splittable nodes, stop"; break; } } //get global ins2node id { TIMED_SCOPE(timerObj, "global ins2node id"); SyncArray<int> local_ins2node_id(n_instances); auto local_ins2node_id_data = local_ins2node_id.device_data(); auto global_ins2node_id_data = stats.nid.device_data(); for (int d = 0; d < n_devices; d++) { CUDA_CHECK(cudaMemcpyPeerAsync(local_ins2node_id_data, cur_device, v_stats[d]->nid.device_data(), d, sizeof(int) * n_instances)); cudaDeviceSynchronize(); device_loop(n_instances, [=]__device__(int i) { global_ins2node_id_data[i] = (global_ins2node_id_data[i] > local_ins2node_id_data[i]) ? global_ins2node_id_data[i] : local_ins2node_id_data[i]; }); } } //processing missing value { TIMED_SCOPE(timerObj, "process missing value"); LOG(TRACE) << "update ins2node id for each missing fval"; auto global_ins2node_id_data = stats.nid.device_data();//essential auto nodes_data = v_trees_gpu[0]->nodes.device_data();//already broadcast above device_loop(n_instances, [=]__device__(int iid) { int nid = global_ins2node_id_data[iid]; //if the instance is not on leaf node and not goes down if (nodes_data[nid].splittable() && nid < nid_offset + n_max_nodes_in_level) { //let the instance goes down const Tree::TreeNode &node = nodes_data[nid]; if (node.default_right) global_ins2node_id_data[iid] = node.rch_index; else global_ins2node_id_data[iid] = node.lch_index; } }); LOG(DEBUG) << "new nid = " << stats.nid; //broadcast ins2node id DO_ON_MULTI_DEVICES(n_devices, [&](int device_id) { v_stats[device_id]->nid.copy_from(stats.nid.host_data(), stats.nid.size()); }); } } tree.nodes.copy_from(v_trees_gpu[0]->nodes); } void ExactUpdater::split_point_all_reduce(const vector<SyncArray<SplitPoint>> &local_sp, SyncArray<SplitPoint> &global_sp, int depth) { //get global best split of each node int n_max_nodes_in_level = 1 << depth;//2^i int nid_offset = (1 << depth) - 1;//2^i - 1 auto global_sp_data = global_sp.host_data(); vector<bool> active_sp(n_max_nodes_in_level); for (int n = 0; n < n_max_nodes_in_level; n++) { global_sp_data[n].nid = n + nid_offset; global_sp_data[n].gain = -1.0f; active_sp[n] = false; } for (int device_id = 0; device_id < n_devices; device_id++) { auto local_sp_data = local_sp[device_id].host_data(); for (int j = 0; j < local_sp[device_id].size(); j++) { int sp_nid = local_sp_data[j].nid; if (sp_nid == -1) continue; int global_pos = sp_nid - nid_offset; if (!active_sp[global_pos]) global_sp_data[global_pos] = local_sp_data[j]; else global_sp_data[global_pos] = (global_sp_data[global_pos].gain >= local_sp_data[j].gain) ? global_sp_data[global_pos] : local_sp_data[j]; active_sp[global_pos] = true; } } //set inactive sp for (int n = 0; n < n_max_nodes_in_level; n++) { if (!active_sp[n]) global_sp_data[n].nid = -1; } LOG(DEBUG) << "global best split point = " << global_sp; } void ExactUpdater::init_tree(Tree &tree, const InsStat &stats) { tree.init(depth); //init root node Tree::TreeNode &root_node = tree.nodes.host_data()[0]; root_node.sum_gh_pair = stats.sum_gh; root_node.is_valid = true; root_node.calc_weight(lambda); LOG(DEBUG) << "root sum gh " << root_node.sum_gh_pair; } void ExactUpdater::find_split(int level, const SparseColumns &columns, const Tree &tree, const InsStat &stats, SyncArray<SplitPoint> &sp) { int n_max_nodes_in_level = static_cast<int>(pow(2, level)); int nid_offset = static_cast<int>(pow(2, level) - 1); int n_column = columns.n_column; int n_partition = n_column * n_max_nodes_in_level; int nnz = columns.nnz; int n_block = std::min((nnz / n_column - 1) / 256 + 1, 32 * 56); LOG(TRACE) << "start finding split"; //find the best split locally { using namespace thrust; SyncArray<int> fvid2pid(nnz); { TIMED_SCOPE(timerObj, "fvid2pid"); //input const int *nid_data = stats.nid.device_data(); const int *iid_data = columns.csc_row_ind.device_data(); LOG(TRACE) << "after using v_stats and columns"; //output int *fvid2pid_data = fvid2pid.device_data(); device_loop_2d( n_column, columns.csc_col_ptr.device_data(), [=]__device__(int col_id, int fvid) { //feature value id -> instance id -> node id int nid = nid_data[iid_data[fvid]]; int pid; //if this node is leaf node, move it to the end if (nid < nid_offset) pid = INT_MAX;//todo negative else pid = (nid - nid_offset) * n_column + col_id; fvid2pid_data[fvid] = pid; }, n_block); LOG(DEBUG) << "fvid2pid " << fvid2pid; } //gather g/h pairs and do prefix sum int n_split; SyncArray<GHPair> gh_prefix_sum; SyncArray<GHPair> missing_gh(n_partition); SyncArray<int> rle_pid; SyncArray<float_type> rle_fval; { //get feature value id mapping for partition, new -> old SyncArray<int> fvid_new2old(nnz); { TIMED_SCOPE(timerObj, "fvid_new2old"); sequence(cuda::par, fvid_new2old.device_data(), fvid_new2old.device_end(), 0); stable_sort_by_key( cuda::par, fvid2pid.device_data(), fvid2pid.device_end(), fvid_new2old.device_data(), thrust::less<int>()); LOG(DEBUG) << "sorted fvid2pid " << fvid2pid; LOG(DEBUG) << "fvid_new2old " << fvid_new2old; } //do prefix sum { TIMED_SCOPE(timerObj, "do prefix sum"); SyncArray<GHPair> rle_gh(nnz); SyncArray<int_float> rle_key(nnz); //same feature value in the same part has the same key. auto key_iter = make_zip_iterator( make_tuple( fvid2pid.device_data(), make_permutation_iterator( columns.csc_val.device_data(), fvid_new2old.device_data())));//use fvid_new2old to access csc_val //apply RLE compression n_split = reduce_by_key( cuda::par, key_iter, key_iter + nnz, make_permutation_iterator( //ins id -> gh pair stats.gh_pair.device_data(), make_permutation_iterator( //old fvid -> ins id columns.csc_row_ind.device_data(), fvid_new2old.device_data())), //new fvid -> old fvid rle_key.device_data(), rle_gh.device_data() ).first - rle_key.device_data(); gh_prefix_sum.resize(n_split); rle_pid.resize(n_split); rle_fval.resize(n_split); const auto rle_gh_data = rle_gh.device_data(); const auto rle_key_data = rle_key.device_data(); auto gh_prefix_sum_data = gh_prefix_sum.device_data(); auto rle_pid_data = rle_pid.device_data(); auto rle_fval_data = rle_fval.device_data(); device_loop(n_split, [=]__device__(int i) { gh_prefix_sum_data[i] = rle_gh_data[i]; rle_pid_data[i] = get<0>(rle_key_data[i]); rle_fval_data[i] = get<1>(rle_key_data[i]); }); inclusive_scan_by_key( cuda::par, rle_pid.device_data(), rle_pid.device_end(), gh_prefix_sum.device_data(), gh_prefix_sum.device_data()); // LOG(DEBUG) << "gh prefix sum = " << gh_prefix_sum; LOG(DEBUG) << "reduced pid = " << rle_pid; LOG(DEBUG) << "reduced fval = " << rle_fval; } //calculate missing value for each partition { TIMED_SCOPE(timerObj, "calculate missing value"); SyncArray<int> pid_ptr(n_partition + 1); counting_iterator<int> search_begin(0); upper_bound(cuda::par, rle_pid.device_data(), rle_pid.device_end(), search_begin, search_begin + n_partition, pid_ptr.device_data() + 1); LOG(DEBUG) << "pid_ptr = " << pid_ptr; auto pid_ptr_data = pid_ptr.device_data(); auto rle_pid_data = rle_pid.device_data(); auto rle_fval_data = rle_fval.device_data(); float_type rt_eps = this->rt_eps; device_loop(n_split, [=]__device__(int i) { int pid = rle_pid_data[i]; if (pid == INT_MAX) return; float_type f = rle_fval_data[i]; if ((pid_ptr_data[pid + 1] - 1) == i)//the last RLE rle_fval_data[i] = (f - fabsf(rle_fval_data[pid_ptr_data[pid]]) - rt_eps); else //FIXME read/write collision rle_fval_data[i] = (f + rle_fval_data[i + 1]) * 0.5f; }); const auto gh_prefix_sum_data = gh_prefix_sum.device_data(); const auto node_data = tree.nodes.device_data(); auto missing_gh_data = missing_gh.device_data(); device_loop(n_partition, [=]__device__(int pid) { int nid = pid / n_column + nid_offset; if (pid_ptr_data[pid + 1] != pid_ptr_data[pid]) missing_gh_data[pid] = node_data[nid].sum_gh_pair - gh_prefix_sum_data[pid_ptr_data[pid + 1] - 1]; }); // LOG(DEBUG) << "missing gh = " << missing_gh; } } //calculate gain of each split SyncArray<float_type> gain(n_split); SyncArray<bool> default_right(n_split); { TIMED_SCOPE(timerObj, "calculate gain"); auto compute_gain = []__device__(GHPair father, GHPair lch, GHPair rch, float_type min_child_weight, float_type lambda) -> float_type { if (lch.h >= min_child_weight && rch.h >= min_child_weight) return (lch.g * lch.g) / (lch.h + lambda) + (rch.g * rch.g) / (rch.h + lambda) - (father.g * father.g) / (father.h + lambda); else return 0; }; int *fvid2pid_data = fvid2pid.device_data(); const Tree::TreeNode *nodes_data = tree.nodes.device_data(); GHPair *gh_prefix_sum_data = gh_prefix_sum.device_data(); float_type *gain_data = gain.device_data(); bool *default_right_data = default_right.device_data(); const auto rle_pid_data = rle_pid.device_data(); const auto missing_gh_data = missing_gh.device_data(); auto rle_fval_data = rle_fval.device_data(); //for lambda expression float_type mcw = min_child_weight; float_type l = lambda; device_loop(n_split, [=]__device__(int i) { int pid = rle_pid_data[i]; int nid0 = pid / n_column; int nid = nid0 + nid_offset; if (pid == INT_MAX) return; GHPair father_gh = nodes_data[nid].sum_gh_pair; GHPair p_missing_gh = missing_gh_data[pid]; GHPair rch_gh = gh_prefix_sum_data[i]; float_type max_gain = compute_gain(father_gh, father_gh - rch_gh, rch_gh, mcw, l); if (p_missing_gh.h > 1) { rch_gh = rch_gh + p_missing_gh; float_type temp_gain = compute_gain(father_gh, father_gh - rch_gh, rch_gh, mcw, l); if (temp_gain > 0 && temp_gain - max_gain > 0.1) { max_gain = temp_gain; default_right_data[i] = true; } } gain_data[i] = max_gain; }); LOG(DEBUG) << "gain = " << gain; } //get best gain and the index of best gain for each feature and each node SyncArray<int_float> best_idx_gain(n_max_nodes_in_level); int n_nodes_in_level; { TIMED_SCOPE(timerObj, "get best gain"); auto arg_max = []__device__(const int_float &a, const int_float &b) { if (get<1>(a) == get<1>(b)) return get<0>(a) < get<0>(b) ? a : b; else return get<1>(a) > get<1>(b) ? a : b; }; auto in_same_node = [=]__device__(const int a, const int b) { return (a / n_column) == (b / n_column); }; //reduce to get best split of each node for this feature SyncArray<int> key_test(n_max_nodes_in_level); n_nodes_in_level = reduce_by_key( cuda::par, rle_pid.device_data(), rle_pid.device_end(), make_zip_iterator(make_tuple(counting_iterator<int>(0), gain.device_data())), key_test.device_data(),//make_discard_iterator(), best_idx_gain.device_data(), in_same_node, arg_max).second - best_idx_gain.device_data(); LOG(DEBUG) << "#nodes in level = " << n_nodes_in_level; LOG(DEBUG) << "best pid = " << key_test; LOG(DEBUG) << "best idx & gain = " << best_idx_gain; } //get split points const int_float *best_idx_gain_data = best_idx_gain.device_data(); const auto rle_pid_data = rle_pid.device_data(); GHPair *gh_prefix_sum_data = gh_prefix_sum.device_data(); const auto rle_fval_data = rle_fval.device_data(); const auto missing_gh_data = missing_gh.device_data(); bool *default_right_data = default_right.device_data(); sp.resize(n_nodes_in_level); auto sp_data = sp.device_data(); int column_offset = columns.column_offset; device_loop(n_nodes_in_level, [=]__device__(int i) { int_float bst = best_idx_gain_data[i]; float_type best_split_gain = get<1>(bst); int split_index = get<0>(bst); int pid = rle_pid_data[split_index]; sp_data[i].split_fea_id = (pid == INT_MAX) ? -1 : (pid % n_column) + column_offset; sp_data[i].nid = (pid == INT_MAX) ? -1 : (pid / n_column + nid_offset); sp_data[i].gain = best_split_gain; if (pid != INT_MAX) {//avoid split_index out of bound sp_data[i].fval = rle_fval_data[split_index]; sp_data[i].fea_missing_gh = missing_gh_data[pid]; sp_data[i].default_right = default_right_data[split_index]; sp_data[i].rch_sum_gh = gh_prefix_sum_data[split_index]; } }); } LOG(DEBUG) << "split points (gain/fea_id/nid): " << sp; } void ExactUpdater::update_tree(Tree &tree, const SyncArray<SplitPoint> &sp) { auto sp_data = sp.device_data(); int n_nodes_in_level = sp.size(); Tree::TreeNode *nodes_data = tree.nodes.device_data(); float_type rt_eps = this->rt_eps; float_type lambda = this->lambda; device_loop(n_nodes_in_level, [=]__device__(int i) { float_type best_split_gain = sp_data[i].gain; if (best_split_gain > rt_eps) { //do split if (sp_data[i].nid == -1) return; int nid = sp_data[i].nid; Tree::TreeNode &node = nodes_data[nid]; node.gain = best_split_gain; Tree::TreeNode &lch = nodes_data[node.lch_index];//left child Tree::TreeNode &rch = nodes_data[node.rch_index];//right child lch.is_valid = true; rch.is_valid = true; node.split_feature_id = sp_data[i].split_fea_id; GHPair p_missing_gh = sp_data[i].fea_missing_gh; //todo process begin node.split_value = sp_data[i].fval; rch.sum_gh_pair = sp_data[i].rch_sum_gh; if (sp_data[i].default_right) { rch.sum_gh_pair = rch.sum_gh_pair + p_missing_gh; node.default_right = true; } lch.sum_gh_pair = node.sum_gh_pair - rch.sum_gh_pair; lch.calc_weight(lambda); rch.calc_weight(lambda); } else { //set leaf if (sp_data[i].nid == -1) return; int nid = sp_data[i].nid; Tree::TreeNode &node = nodes_data[nid]; node.is_leaf = true; nodes_data[node.lch_index].is_valid = false; nodes_data[node.rch_index].is_valid = false; } // } }); } bool ExactUpdater::reset_ins2node_id(InsStat &stats, const Tree &tree, const SparseColumns &columns) { SyncArray<bool> has_splittable(1); //set new node id for each instance { TIMED_SCOPE(timerObj, "get new node id"); int *nid_data = stats.nid.device_data(); const int *iid_data = columns.csc_row_ind.device_data(); const Tree::TreeNode *nodes_data = tree.nodes.device_data(); const int *col_ptr_data = columns.csc_col_ptr.device_data(); const float_type *f_val_data = columns.csc_val.device_data(); has_splittable.host_data()[0] = false; bool *h_s_data = has_splittable.device_data(); int column_offset = columns.column_offset; int n_column = columns.n_column; int nnz = columns.nnz; int n_block = std::min((nnz / n_column - 1) / 256 + 1, 32 * 56); LOG(TRACE) << "update ins2node id for each fval"; device_loop_2d(n_column, col_ptr_data, [=]__device__(int col_id, int fvid) { //feature value id -> instance id int iid = iid_data[fvid]; //instance id -> node id int nid = nid_data[iid]; //node id -> node const Tree::TreeNode &node = nodes_data[nid]; //if the node splits on this feature if (node.splittable() && node.split_feature_id == col_id + column_offset) { h_s_data[0] = true; if (f_val_data[fvid] < node.split_value) //goes to left child nid_data[iid] = node.lch_index; else //right child nid_data[iid] = node.rch_index; } }, n_block); } LOG(DEBUG) << "new tree_id = " << stats.nid; // LOG(DEBUG) << v_trees_gpu[cur_device_id].nodes; return has_splittable.host_data()[0]; } std::ostream &operator<<(std::ostream &os, const int_float &rhs) { os << string_format("%d/%f", thrust::get<0>(rhs), thrust::get<1>(rhs)); return os; }
TriangularOps.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/Context.h> #include <ATen/hip/HIPContext.h> #include <ATen/Dispatch.h> #include <ATen/MemoryOverlap.h> #include <ATen/NativeFunctions.h> #include <ATen/native/Resize.h> #include <ATen/hip/HIPApplyUtils.cuh> #include <ATen/hip/detail/IndexUtils.cuh> namespace at { namespace native { // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ triu/tril ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ template <typename scalar_t, typename IndexType, bool upper> C10_LAUNCH_BOUNDS_1(cuda::getApplyBlockSize()) __global__ void triu_tril_kernel( cuda::detail::TensorInfo<scalar_t, IndexType> result_info, const cuda::detail::TensorInfo<scalar_t, IndexType> self_info, const int64_t k, const int64_t N) { int64_t linear_idx = blockIdx.x * blockDim.x + threadIdx.x; if (linear_idx >= N) { return; } auto dims = self_info.dims; IndexType self_offset = 0, result_offset = 0; // Compute column index and corresponding offset IndexType col = linear_idx % self_info.sizes[dims - 1]; linear_idx /= self_info.sizes[dims - 1]; self_offset += self_info.strides[dims - 1] * col; result_offset += result_info.strides[dims - 1] * col; // Compute row index and corresponding offset IndexType row = linear_idx % self_info.sizes[dims - 2]; linear_idx /= self_info.sizes[dims - 2]; self_offset += self_info.strides[dims - 2] * row; result_offset += result_info.strides[dims - 2] * row; // Compute remaining offsets IndexType running_index; #pragma unroll for (IndexType i = dims - 3; i >= 0; --i) { running_index = linear_idx % self_info.sizes[i]; linear_idx /= self_info.sizes[i]; self_offset += running_index * self_info.strides[i]; result_offset += running_index * result_info.strides[i]; } bool mask = upper ? (col - row >= k) : (col - row <= k); result_info.data[result_offset] = mask ? self_info.data[self_offset] : scalar_t(0); } template <bool upper> Tensor& triu_tril_cuda_template(Tensor& result, const Tensor& self, int64_t k, const char* name) { int64_t N = self.numel(); dim3 dim_block = cuda::getApplyBlock(); dim3 dim_grid((N + dim_block.x - 1) / dim_block.x); AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2(at::ScalarType::Half, at::ScalarType::Bool, self.scalar_type(), "triu_tril_cuda_template", [&]{ if (cuda::detail::canUse32BitIndexMath(result) && cuda::detail::canUse32BitIndexMath(self)) { auto result_info = cuda::detail::getTensorInfo<scalar_t, int32_t>(result); auto self_info = cuda::detail::getTensorInfo<scalar_t, int32_t>(self); hipLaunchKernelGGL(( triu_tril_kernel<scalar_t, int32_t, upper>) , dim3(dim_grid), dim3(dim_block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), result_info, self_info, k, N); C10_HIP_KERNEL_LAUNCH_CHECK(); } else { auto result_info = cuda::detail::getTensorInfo<scalar_t, int64_t>(result); auto self_info = cuda::detail::getTensorInfo<scalar_t, int64_t>(self); hipLaunchKernelGGL(( triu_tril_kernel<scalar_t, int64_t, upper>) , dim3(dim_grid), dim3(dim_block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), result_info, self_info, k, N); C10_HIP_KERNEL_LAUNCH_CHECK(); } }); return result; } Tensor& tril_cuda_(Tensor &self, int64_t k) { return tril_cuda_out(self, k, self); } Tensor& tril_cuda_out(const Tensor& self, int64_t k, Tensor &result) { if (result.sizes() != self.sizes()) { result.resize_as_(self); } if (self.numel() == 0) { return result; } return triu_tril_cuda_template<false>(result, self, k, "tril"); } Tensor& triu_cuda_(Tensor &self, int64_t k) { return triu_cuda_out(self, k, self); } Tensor& triu_cuda_out(const Tensor& self, int64_t k, Tensor &result) { if (result.sizes() != self.sizes()) { result.resize_as_(self); } if (self.numel() == 0) { return result; } return triu_tril_cuda_template<true>(result, self, k, "triu"); } // Copy the kth diagonal of a matrix B to a vector A. template <typename scalar_t> C10_LAUNCH_BOUNDS_1(1024) __global__ void copy_from_diagonal_kernel( scalar_t* a, scalar_t* b, std::ptrdiff_t start, std::ptrdiff_t size, std::ptrdiff_t strideSum, std::ptrdiff_t strideA) { for (std::ptrdiff_t linearIndex = blockIdx.x * blockDim.x + threadIdx.x; linearIndex < size; linearIndex += gridDim.x * blockDim.x) { const std::ptrdiff_t bOffset = start + strideSum * linearIndex; a[strideA * linearIndex] = b[bOffset]; } } // Copy vector B to the kth diagonal of a matrix A template <typename scalar_t> C10_LAUNCH_BOUNDS_1(1024) __global__ void copy_to_diagonal_kernel( scalar_t* a, scalar_t* b, std::ptrdiff_t start, std::ptrdiff_t size, std::ptrdiff_t strideSum, std::ptrdiff_t strideB) { for (std::ptrdiff_t linearIndex = blockIdx.x * blockDim.x + threadIdx.x; linearIndex < size; linearIndex += gridDim.x * blockDim.x) { const std::ptrdiff_t aOffset = start + strideSum * linearIndex; a[aOffset] = b[strideB * linearIndex]; } } template <typename scalar_t> Tensor& apply_diag(Tensor& result, const Tensor& self, int64_t dimension) { TORCH_CHECK( self.dim() == 1 || self.dim() == 2, "matrix or a vector expected"); TensorArg result_arg{result, "result", 1}; TensorArg self_arg{self, "self", 2}; checkAllSameGPU("diag", {result_arg, self_arg}); checkSameType("diag", result_arg, self_arg); int nDimension = self.dim(); if (nDimension == 2) { auto self_stride_0 = self.stride(0); auto self_stride_1 = self.stride(1); int sz; if (dimension > 0) { sz = ::min(self.size(0), self.size(1) - dimension); } else { sz = ::min(self.size(0) + dimension, self.size(1)); } at::native::resize_output(result, {sz}); if (sz > 0) { at::assert_no_internal_overlap(result); auto result_stride = result.stride(0); const dim3 threads(::min( int(sz), int(at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock))); const dim3 grid( ::min(int(1024), cuda::ATenCeilDiv(int(sz), int(threads.x)))); auto start = (dimension >= 0 ? dimension * self_stride_1 : -dimension * self_stride_0); // Kernel Launch hipLaunchKernelGGL(( copy_from_diagonal_kernel<scalar_t>) , dim3(grid), dim3(threads), 0, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(), result.data_ptr<scalar_t>(), self.data_ptr<scalar_t>(), start, sz, self_stride_0 + self_stride_1, result_stride); C10_HIP_KERNEL_LAUNCH_CHECK(); } } else { auto n_elems = self.numel(); auto sz = (dimension > 0) ? n_elems + dimension : n_elems - dimension; auto self_stride = self.stride(0); at::native::resize_output(result, {sz, sz}); result.zero_(); if (sz > 0) { at::assert_no_internal_overlap(result); auto result_stride_0 = result.stride(0); auto result_stride_1 = result.stride(1); const dim3 threads(::min( int(sz), at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock)); const dim3 grid( ::min(int(1024), cuda::ATenCeilDiv(int(sz), int(threads.x)))); auto start = (dimension >= 0 ? dimension * result_stride_1 : -dimension * result_stride_0); // Kernel Launch hipLaunchKernelGGL(( copy_to_diagonal_kernel<scalar_t>) , dim3(grid), dim3(threads), 0, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(), result.data_ptr<scalar_t>(), self.data_ptr<scalar_t>(), start, n_elems, result_stride_0 + result_stride_1, self_stride); C10_HIP_KERNEL_LAUNCH_CHECK(); } } return result; } Tensor& diag_cuda_out(const Tensor& self, int64_t dimension, Tensor& result) { AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2(ScalarType::Half, ScalarType::Bool, self.scalar_type(), "diag_cuda", [&] { apply_diag<scalar_t>(result, self, dimension); }); return result; } Tensor trace_cuda(const Tensor& self) { TORCH_CHECK(self.dim() == 2, "expected a matrix"); int dimension = 0; auto result = at::diag(self, dimension); return result.sum(); } } // namespace native } // namespace at
TriangularOps.cu
#include <ATen/Context.h> #include <ATen/cuda/CUDAContext.h> #include <ATen/Dispatch.h> #include <ATen/MemoryOverlap.h> #include <ATen/NativeFunctions.h> #include <ATen/native/Resize.h> #include <ATen/cuda/CUDAApplyUtils.cuh> #include <ATen/cuda/detail/IndexUtils.cuh> namespace at { namespace native { // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ triu/tril ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ template <typename scalar_t, typename IndexType, bool upper> C10_LAUNCH_BOUNDS_1(cuda::getApplyBlockSize()) __global__ void triu_tril_kernel( cuda::detail::TensorInfo<scalar_t, IndexType> result_info, const cuda::detail::TensorInfo<scalar_t, IndexType> self_info, const int64_t k, const int64_t N) { int64_t linear_idx = blockIdx.x * blockDim.x + threadIdx.x; if (linear_idx >= N) { return; } auto dims = self_info.dims; IndexType self_offset = 0, result_offset = 0; // Compute column index and corresponding offset IndexType col = linear_idx % self_info.sizes[dims - 1]; linear_idx /= self_info.sizes[dims - 1]; self_offset += self_info.strides[dims - 1] * col; result_offset += result_info.strides[dims - 1] * col; // Compute row index and corresponding offset IndexType row = linear_idx % self_info.sizes[dims - 2]; linear_idx /= self_info.sizes[dims - 2]; self_offset += self_info.strides[dims - 2] * row; result_offset += result_info.strides[dims - 2] * row; // Compute remaining offsets IndexType running_index; #pragma unroll for (IndexType i = dims - 3; i >= 0; --i) { running_index = linear_idx % self_info.sizes[i]; linear_idx /= self_info.sizes[i]; self_offset += running_index * self_info.strides[i]; result_offset += running_index * result_info.strides[i]; } bool mask = upper ? (col - row >= k) : (col - row <= k); result_info.data[result_offset] = mask ? self_info.data[self_offset] : scalar_t(0); } template <bool upper> Tensor& triu_tril_cuda_template(Tensor& result, const Tensor& self, int64_t k, const char* name) { int64_t N = self.numel(); dim3 dim_block = cuda::getApplyBlock(); dim3 dim_grid((N + dim_block.x - 1) / dim_block.x); AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2(at::ScalarType::Half, at::ScalarType::Bool, self.scalar_type(), "triu_tril_cuda_template", [&]{ if (cuda::detail::canUse32BitIndexMath(result) && cuda::detail::canUse32BitIndexMath(self)) { auto result_info = cuda::detail::getTensorInfo<scalar_t, int32_t>(result); auto self_info = cuda::detail::getTensorInfo<scalar_t, int32_t>(self); triu_tril_kernel<scalar_t, int32_t, upper> <<<dim_grid, dim_block, 0, at::cuda::getCurrentCUDAStream()>>>( result_info, self_info, k, N); C10_CUDA_KERNEL_LAUNCH_CHECK(); } else { auto result_info = cuda::detail::getTensorInfo<scalar_t, int64_t>(result); auto self_info = cuda::detail::getTensorInfo<scalar_t, int64_t>(self); triu_tril_kernel<scalar_t, int64_t, upper> <<<dim_grid, dim_block, 0, at::cuda::getCurrentCUDAStream()>>>( result_info, self_info, k, N); C10_CUDA_KERNEL_LAUNCH_CHECK(); } }); return result; } Tensor& tril_cuda_(Tensor &self, int64_t k) { return tril_cuda_out(self, k, self); } Tensor& tril_cuda_out(const Tensor& self, int64_t k, Tensor &result) { if (result.sizes() != self.sizes()) { result.resize_as_(self); } if (self.numel() == 0) { return result; } return triu_tril_cuda_template<false>(result, self, k, "tril"); } Tensor& triu_cuda_(Tensor &self, int64_t k) { return triu_cuda_out(self, k, self); } Tensor& triu_cuda_out(const Tensor& self, int64_t k, Tensor &result) { if (result.sizes() != self.sizes()) { result.resize_as_(self); } if (self.numel() == 0) { return result; } return triu_tril_cuda_template<true>(result, self, k, "triu"); } // Copy the kth diagonal of a matrix B to a vector A. template <typename scalar_t> C10_LAUNCH_BOUNDS_1(1024) __global__ void copy_from_diagonal_kernel( scalar_t* a, scalar_t* b, std::ptrdiff_t start, std::ptrdiff_t size, std::ptrdiff_t strideSum, std::ptrdiff_t strideA) { for (std::ptrdiff_t linearIndex = blockIdx.x * blockDim.x + threadIdx.x; linearIndex < size; linearIndex += gridDim.x * blockDim.x) { const std::ptrdiff_t bOffset = start + strideSum * linearIndex; a[strideA * linearIndex] = b[bOffset]; } } // Copy vector B to the kth diagonal of a matrix A template <typename scalar_t> C10_LAUNCH_BOUNDS_1(1024) __global__ void copy_to_diagonal_kernel( scalar_t* a, scalar_t* b, std::ptrdiff_t start, std::ptrdiff_t size, std::ptrdiff_t strideSum, std::ptrdiff_t strideB) { for (std::ptrdiff_t linearIndex = blockIdx.x * blockDim.x + threadIdx.x; linearIndex < size; linearIndex += gridDim.x * blockDim.x) { const std::ptrdiff_t aOffset = start + strideSum * linearIndex; a[aOffset] = b[strideB * linearIndex]; } } template <typename scalar_t> Tensor& apply_diag(Tensor& result, const Tensor& self, int64_t dimension) { TORCH_CHECK( self.dim() == 1 || self.dim() == 2, "matrix or a vector expected"); TensorArg result_arg{result, "result", 1}; TensorArg self_arg{self, "self", 2}; checkAllSameGPU("diag", {result_arg, self_arg}); checkSameType("diag", result_arg, self_arg); int nDimension = self.dim(); if (nDimension == 2) { auto self_stride_0 = self.stride(0); auto self_stride_1 = self.stride(1); int sz; if (dimension > 0) { sz = std::min(self.size(0), self.size(1) - dimension); } else { sz = std::min(self.size(0) + dimension, self.size(1)); } at::native::resize_output(result, {sz}); if (sz > 0) { at::assert_no_internal_overlap(result); auto result_stride = result.stride(0); const dim3 threads(std::min( int(sz), int(at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock))); const dim3 grid( std::min(int(1024), cuda::ATenCeilDiv(int(sz), int(threads.x)))); auto start = (dimension >= 0 ? dimension * self_stride_1 : -dimension * self_stride_0); // Kernel Launch copy_from_diagonal_kernel<scalar_t> <<<grid, threads, 0, c10::cuda::getCurrentCUDAStream()>>>( result.data_ptr<scalar_t>(), self.data_ptr<scalar_t>(), start, sz, self_stride_0 + self_stride_1, result_stride); C10_CUDA_KERNEL_LAUNCH_CHECK(); } } else { auto n_elems = self.numel(); auto sz = (dimension > 0) ? n_elems + dimension : n_elems - dimension; auto self_stride = self.stride(0); at::native::resize_output(result, {sz, sz}); result.zero_(); if (sz > 0) { at::assert_no_internal_overlap(result); auto result_stride_0 = result.stride(0); auto result_stride_1 = result.stride(1); const dim3 threads(std::min( int(sz), at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock)); const dim3 grid( std::min(int(1024), cuda::ATenCeilDiv(int(sz), int(threads.x)))); auto start = (dimension >= 0 ? dimension * result_stride_1 : -dimension * result_stride_0); // Kernel Launch copy_to_diagonal_kernel<scalar_t> <<<grid, threads, 0, c10::cuda::getCurrentCUDAStream()>>>( result.data_ptr<scalar_t>(), self.data_ptr<scalar_t>(), start, n_elems, result_stride_0 + result_stride_1, self_stride); C10_CUDA_KERNEL_LAUNCH_CHECK(); } } return result; } Tensor& diag_cuda_out(const Tensor& self, int64_t dimension, Tensor& result) { AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2(ScalarType::Half, ScalarType::Bool, self.scalar_type(), "diag_cuda", [&] { apply_diag<scalar_t>(result, self, dimension); }); return result; } Tensor trace_cuda(const Tensor& self) { TORCH_CHECK(self.dim() == 2, "expected a matrix"); int dimension = 0; auto result = at::diag(self, dimension); return result.sum(); } } // namespace native } // namespace at
ef66b9f60c1b4dcf87b35e1bae8d343c6cbe977f.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <iostream> #include <assert.h> #include "DataFormats/DetId/interface/DetId.h" #include "DataFormats/HcalDetId/interface/HcalDetId.h" __global__ void test_gen_detid(DetId* id, uint32_t const rawid) { DetId did{rawid}; *id = did; } void test_detid() { // test det ids DetId h_id, h_id_test{100}; DetId h_test0{1}; DetId *d_id; hipMalloc((void**)&d_id, sizeof(DetId)); hipMemcpy(d_id, &h_id, sizeof(DetId), hipMemcpyHostToDevice); hipLaunchKernelGGL(( test_gen_detid), dim3(1),dim3(1), 0, 0, d_id, 100); hipMemcpy(&h_id, d_id, sizeof(DetId), hipMemcpyDeviceToHost); assert(h_id_test == h_id); assert(h_id != h_test0); } int main(int argc, char** argv) { int nDevices; hipGetDeviceCount(&nDevices); std::cout << "nDevices = " << nDevices << std::endl; // test det id functionality if (nDevices > 0) test_detid(); }
ef66b9f60c1b4dcf87b35e1bae8d343c6cbe977f.cu
#include <cuda_runtime.h> #include <cuda.h> #include <iostream> #include <assert.h> #include "DataFormats/DetId/interface/DetId.h" #include "DataFormats/HcalDetId/interface/HcalDetId.h" __global__ void test_gen_detid(DetId* id, uint32_t const rawid) { DetId did{rawid}; *id = did; } void test_detid() { // test det ids DetId h_id, h_id_test{100}; DetId h_test0{1}; DetId *d_id; cudaMalloc((void**)&d_id, sizeof(DetId)); cudaMemcpy(d_id, &h_id, sizeof(DetId), cudaMemcpyHostToDevice); test_gen_detid<<<1,1>>>(d_id, 100); cudaMemcpy(&h_id, d_id, sizeof(DetId), cudaMemcpyDeviceToHost); assert(h_id_test == h_id); assert(h_id != h_test0); } int main(int argc, char** argv) { int nDevices; cudaGetDeviceCount(&nDevices); std::cout << "nDevices = " << nDevices << std::endl; // test det id functionality if (nDevices > 0) test_detid(); }
4c12923e3f0f4208bcb1cf9be742f750e379b395.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "littleBinoticSort.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *arr = NULL; hipMalloc(&arr, XSIZE*YSIZE); int num = 1; int numMax = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( littleBinoticSort), dim3(gridBlock),dim3(threadBlock), 0, 0, arr,num,numMax); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( littleBinoticSort), dim3(gridBlock),dim3(threadBlock), 0, 0, arr,num,numMax); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( littleBinoticSort), dim3(gridBlock),dim3(threadBlock), 0, 0, arr,num,numMax); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
4c12923e3f0f4208bcb1cf9be742f750e379b395.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "littleBinoticSort.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *arr = NULL; cudaMalloc(&arr, XSIZE*YSIZE); int num = 1; int numMax = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); littleBinoticSort<<<gridBlock,threadBlock>>>(arr,num,numMax); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { littleBinoticSort<<<gridBlock,threadBlock>>>(arr,num,numMax); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { littleBinoticSort<<<gridBlock,threadBlock>>>(arr,num,numMax); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
4126713445e8eb37590bf540a2bbb150c01385c5.hip
// !!! This is a file automatically generated by hipify!!! // List of splitting planes. // ------------------------------------------------------------------- // Copyright (C) 2010 OpenEngine.dk (See AUTHORS) // // This program is free software; It is covered by the GNU General // Public License version 2 or any later version. // See the GNU General Public License for more details (see LICENSE). //-------------------------------------------------------------------- #include <Utils/CUDA/SplittingPlanes.h> namespace OpenEngine { namespace Utils { namespace CUDA { SplittingPlanes::SplittingPlanes() : size(0) {} SplittingPlanes::SplittingPlanes(unsigned int i) : size(i) { hipMalloc(&triangleSetX, size * sizeof(int2)); hipMalloc(&triangleSetY, size * sizeof(int2)); hipMalloc(&triangleSetZ, size * sizeof(int2)); } void SplittingPlanes::Resize(unsigned int i){ unsigned int copySize = min(i, size); int2 *temp; hipMalloc(&temp, i * sizeof(int2)); hipMemcpy(temp, triangleSetX, copySize * sizeof(int2), hipMemcpyDeviceToDevice); hipFree(triangleSetX); triangleSetX = temp; CHECK_FOR_CUDA_ERROR(); hipMalloc(&temp, i * sizeof(int2)); hipMemcpy(temp, triangleSetY, copySize * sizeof(int2), hipMemcpyDeviceToDevice); hipFree(triangleSetY); triangleSetY = temp; CHECK_FOR_CUDA_ERROR(); hipMalloc(&temp, i * sizeof(int2)); hipMemcpy(temp, triangleSetZ, copySize * sizeof(int2), hipMemcpyDeviceToDevice); hipFree(triangleSetZ); triangleSetZ = temp; CHECK_FOR_CUDA_ERROR(); } } } }
4126713445e8eb37590bf540a2bbb150c01385c5.cu
// List of splitting planes. // ------------------------------------------------------------------- // Copyright (C) 2010 OpenEngine.dk (See AUTHORS) // // This program is free software; It is covered by the GNU General // Public License version 2 or any later version. // See the GNU General Public License for more details (see LICENSE). //-------------------------------------------------------------------- #include <Utils/CUDA/SplittingPlanes.h> namespace OpenEngine { namespace Utils { namespace CUDA { SplittingPlanes::SplittingPlanes() : size(0) {} SplittingPlanes::SplittingPlanes(unsigned int i) : size(i) { cudaMalloc(&triangleSetX, size * sizeof(int2)); cudaMalloc(&triangleSetY, size * sizeof(int2)); cudaMalloc(&triangleSetZ, size * sizeof(int2)); } void SplittingPlanes::Resize(unsigned int i){ unsigned int copySize = min(i, size); int2 *temp; cudaMalloc(&temp, i * sizeof(int2)); cudaMemcpy(temp, triangleSetX, copySize * sizeof(int2), cudaMemcpyDeviceToDevice); cudaFree(triangleSetX); triangleSetX = temp; CHECK_FOR_CUDA_ERROR(); cudaMalloc(&temp, i * sizeof(int2)); cudaMemcpy(temp, triangleSetY, copySize * sizeof(int2), cudaMemcpyDeviceToDevice); cudaFree(triangleSetY); triangleSetY = temp; CHECK_FOR_CUDA_ERROR(); cudaMalloc(&temp, i * sizeof(int2)); cudaMemcpy(temp, triangleSetZ, copySize * sizeof(int2), cudaMemcpyDeviceToDevice); cudaFree(triangleSetZ); triangleSetZ = temp; CHECK_FOR_CUDA_ERROR(); } } } }
50d0176f81c6df1a277d99adb47f4902597b3669.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2018, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "selection/kselection.h" #include "random/rng.h" #include <gtest/gtest.h> #include <limits> #include<stdlib.h> #include <algorithm> namespace MLCommon { namespace Selection { template <typename TypeV, typename TypeK, int N, int TPB, bool Greater> __global__ void sortTestKernel(TypeK* key) { KVArray<TypeV,TypeK,N,Greater> arr; #pragma unroll for(int i=0;i<N;++i) { arr.arr[i].val = (TypeV)laneId(); arr.arr[i].key = (TypeK)laneId(); } warpFence(); arr.sort(); warpFence(); #pragma unroll for(int i=0;i<N;++i) arr.arr[i].store(nullptr, key+threadIdx.x+i*TPB); } template <typename TypeV, typename TypeK, int N, int TPB, bool Greater> void sortTest(TypeK* key) { TypeK* dkey; CUDA_CHECK(hipMalloc((void**)&dkey, sizeof(TypeK)*TPB*N)); hipLaunchKernelGGL(( sortTestKernel<TypeV,TypeK,N,TPB,Greater>), dim3(1),dim3(TPB), 0, 0, dkey); CUDA_CHECK(hipPeekAtLastError()); updateHost<TypeK>(key, dkey, TPB*N); CUDA_CHECK(hipFree(dkey)); } /************************************************************************/ /********************** Add the function for CPU test *******************/ /************************************************************************/ template <typename TypeV,typename TypeK,bool Greater> int cmp(KVPair<TypeV,TypeK> a,KVPair<TypeV,TypeK> b) { if(Greater==0){ return a.val>b.val; }else{ return a.val<b.val; } } template <typename TypeV,typename TypeK, bool Greater> void partSortKVPair(KVPair<TypeV,TypeK> *arr,int N, int k){ std::partial_sort(arr,arr+k,arr+N,cmp<TypeV,TypeK,Greater>); } template <typename TypeV,typename TypeK,int N, bool Greater> void sortKVArray(KVArray<TypeV,TypeK,N,Greater> &arr){ std::sort(arr.arr,arr.arr+N,cmp<TypeV,TypeK,Greater>); } template <typename TypeV,typename TypeK, bool Greater> ::testing::AssertionResult checkResult(TypeV* d_arr, TypeV* d_outv, TypeK* d_outk, int rows,int N, int k,TypeV tolerance){ for(int rIndex=0; rIndex<rows;rIndex++){ //input data TypeV* h_arr = new TypeV[N]; updateHost(h_arr, d_arr+rIndex*N, N); KVPair<TypeV,TypeK>* topk=new KVPair<TypeV,TypeK>[N]; for(int j=0;j<N;j++){ topk[j].val=h_arr[j]; topk[j].key=j; } //result reference TypeV* h_outv = new TypeV[k]; updateHost(h_outv, d_outv+rIndex*k, k); TypeK* h_outk = new TypeK[k]; updateHost(h_outk, d_outk+rIndex*k, k); //calculate the result partSortKVPair<TypeV,TypeK,Greater>(topk,N,k); //check result for(int j=0;j<k;j++){ //std::cout<<"Get value at ("<<rIndex<<" "<<j<<") Cpu " // <<topk[j].val<<" "<<topk[j].key<<" Gpu "<<h_outv[j]<<" " //<<h_outk[j] <<std::endl<<std::endl; if(abs(h_outv[j]-topk[j].val)>tolerance){ return ::testing::AssertionFailure() << "actual=" << topk[j].val << " != expected=" << h_outv[j]; } } //delete resource delete [] h_arr; delete [] h_outv; delete [] h_outk; delete [] topk; } return ::testing::AssertionSuccess(); } //Structure WarpTopKInputs template <typename T> struct WarpTopKInputs { T tolerance; int rows; //batch size int cols;// N the length of variables int k; // the top-k value unsigned long long int seed;//seed to generate data }; template <typename T> ::std::ostream& operator<<(::std::ostream& os, const WarpTopKInputs<T>& dims) { return os; } //Define functions WarpTopKTest template <typename T> class WarpTopKTest: public ::testing::TestWithParam<WarpTopKInputs<T> > { protected: void SetUp() override { params = ::testing::TestWithParam<WarpTopKInputs<T>>::GetParam(); Random::Rng<T> r(params.seed); allocate(arr, params.rows*params.cols); allocate(outk, params.rows*params.k); allocate(outv, params.rows*params.k); r.uniform(arr, params.rows*params.cols, T(-1.0), T(1.0)); static const bool Sort=false; static const bool Greater=true; warpTopK<T,int,Greater,Sort>(outv, outk, arr, params.k, params.rows, params.cols); } void TearDown() override { CUDA_CHECK(hipFree(outv)); CUDA_CHECK(hipFree(outk)); CUDA_CHECK(hipFree(arr)); } protected: WarpTopKInputs<T> params; T *arr, *outv; int *outk; }; //Parameters // Milestone 1: Verify the result of current implementation // Milestone 2: Support all the values of k between 1 and 1024; both inclusive // Milestone 2.1: Using the POC code to Support all the values const std::vector<WarpTopKInputs<float> > inputs2_0 = { {0.00000001, 2, 1024, 256,1234ULL} }; const std::vector<WarpTopKInputs<float> > inputs2_1 = { {0.00000001, 4, 2048, 1024,1234ULL} }; const std::vector<WarpTopKInputs<float> > inputs2_2 = { {0.00000001, 4, 2048, 1,1234ULL} }; // Milestone 2.2: Using the full thread queue and warp queue code to support all the values // @TODO: Milestone 3: Support not sorted // @TODO: Milestone 4: Support multi-gpu //Define the function TEST_P typedef WarpTopKTest<float> TestD2_0; typedef WarpTopKTest<float> TestD2_1; typedef WarpTopKTest<float> TestD2_2; TEST_P(TestD2_0, Result){ const static bool Greater=true; ASSERT_TRUE((checkResult<float, int,Greater>(arr, outv, outk, params.rows, params.cols,params.k,params.tolerance))); } TEST_P(TestD2_1, Result){ const static bool Greater=true; ASSERT_TRUE((checkResult<float, int,Greater>(arr, outv, outk, params.rows, params.cols,params.k,params.tolerance))); } TEST_P(TestD2_2, Result){ const static bool Greater=true; ASSERT_TRUE((checkResult<float, int,Greater>(arr, outv, outk, params.rows, params.cols,params.k,params.tolerance))); } // Instantiate INSTANTIATE_TEST_CASE_P(WarpTopKTests, TestD2_0, ::testing::ValuesIn(inputs2_0)); INSTANTIATE_TEST_CASE_P(WarpTopKTests, TestD2_1, ::testing::ValuesIn(inputs2_1)); INSTANTIATE_TEST_CASE_P(WarpTopKTests, TestD2_2, ::testing::ValuesIn(inputs2_2)); } // end namespace Selection } // end namespace MLCommon
50d0176f81c6df1a277d99adb47f4902597b3669.cu
/* * Copyright (c) 2018, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "selection/kselection.h" #include "random/rng.h" #include <gtest/gtest.h> #include <limits> #include<stdlib.h> #include <algorithm> namespace MLCommon { namespace Selection { template <typename TypeV, typename TypeK, int N, int TPB, bool Greater> __global__ void sortTestKernel(TypeK* key) { KVArray<TypeV,TypeK,N,Greater> arr; #pragma unroll for(int i=0;i<N;++i) { arr.arr[i].val = (TypeV)laneId(); arr.arr[i].key = (TypeK)laneId(); } warpFence(); arr.sort(); warpFence(); #pragma unroll for(int i=0;i<N;++i) arr.arr[i].store(nullptr, key+threadIdx.x+i*TPB); } template <typename TypeV, typename TypeK, int N, int TPB, bool Greater> void sortTest(TypeK* key) { TypeK* dkey; CUDA_CHECK(cudaMalloc((void**)&dkey, sizeof(TypeK)*TPB*N)); sortTestKernel<TypeV,TypeK,N,TPB,Greater><<<1,TPB>>>(dkey); CUDA_CHECK(cudaPeekAtLastError()); updateHost<TypeK>(key, dkey, TPB*N); CUDA_CHECK(cudaFree(dkey)); } /************************************************************************/ /********************** Add the function for CPU test *******************/ /************************************************************************/ template <typename TypeV,typename TypeK,bool Greater> int cmp(KVPair<TypeV,TypeK> a,KVPair<TypeV,TypeK> b) { if(Greater==0){ return a.val>b.val; }else{ return a.val<b.val; } } template <typename TypeV,typename TypeK, bool Greater> void partSortKVPair(KVPair<TypeV,TypeK> *arr,int N, int k){ std::partial_sort(arr,arr+k,arr+N,cmp<TypeV,TypeK,Greater>); } template <typename TypeV,typename TypeK,int N, bool Greater> void sortKVArray(KVArray<TypeV,TypeK,N,Greater> &arr){ std::sort(arr.arr,arr.arr+N,cmp<TypeV,TypeK,Greater>); } template <typename TypeV,typename TypeK, bool Greater> ::testing::AssertionResult checkResult(TypeV* d_arr, TypeV* d_outv, TypeK* d_outk, int rows,int N, int k,TypeV tolerance){ for(int rIndex=0; rIndex<rows;rIndex++){ //input data TypeV* h_arr = new TypeV[N]; updateHost(h_arr, d_arr+rIndex*N, N); KVPair<TypeV,TypeK>* topk=new KVPair<TypeV,TypeK>[N]; for(int j=0;j<N;j++){ topk[j].val=h_arr[j]; topk[j].key=j; } //result reference TypeV* h_outv = new TypeV[k]; updateHost(h_outv, d_outv+rIndex*k, k); TypeK* h_outk = new TypeK[k]; updateHost(h_outk, d_outk+rIndex*k, k); //calculate the result partSortKVPair<TypeV,TypeK,Greater>(topk,N,k); //check result for(int j=0;j<k;j++){ //std::cout<<"Get value at ("<<rIndex<<" "<<j<<") Cpu " // <<topk[j].val<<" "<<topk[j].key<<" Gpu "<<h_outv[j]<<" " //<<h_outk[j] <<std::endl<<std::endl; if(abs(h_outv[j]-topk[j].val)>tolerance){ return ::testing::AssertionFailure() << "actual=" << topk[j].val << " != expected=" << h_outv[j]; } } //delete resource delete [] h_arr; delete [] h_outv; delete [] h_outk; delete [] topk; } return ::testing::AssertionSuccess(); } //Structure WarpTopKInputs template <typename T> struct WarpTopKInputs { T tolerance; int rows; //batch size int cols;// N the length of variables int k; // the top-k value unsigned long long int seed;//seed to generate data }; template <typename T> ::std::ostream& operator<<(::std::ostream& os, const WarpTopKInputs<T>& dims) { return os; } //Define functions WarpTopKTest template <typename T> class WarpTopKTest: public ::testing::TestWithParam<WarpTopKInputs<T> > { protected: void SetUp() override { params = ::testing::TestWithParam<WarpTopKInputs<T>>::GetParam(); Random::Rng<T> r(params.seed); allocate(arr, params.rows*params.cols); allocate(outk, params.rows*params.k); allocate(outv, params.rows*params.k); r.uniform(arr, params.rows*params.cols, T(-1.0), T(1.0)); static const bool Sort=false; static const bool Greater=true; warpTopK<T,int,Greater,Sort>(outv, outk, arr, params.k, params.rows, params.cols); } void TearDown() override { CUDA_CHECK(cudaFree(outv)); CUDA_CHECK(cudaFree(outk)); CUDA_CHECK(cudaFree(arr)); } protected: WarpTopKInputs<T> params; T *arr, *outv; int *outk; }; //Parameters // Milestone 1: Verify the result of current implementation // Milestone 2: Support all the values of k between 1 and 1024; both inclusive // Milestone 2.1: Using the POC code to Support all the values const std::vector<WarpTopKInputs<float> > inputs2_0 = { {0.00000001, 2, 1024, 256,1234ULL} }; const std::vector<WarpTopKInputs<float> > inputs2_1 = { {0.00000001, 4, 2048, 1024,1234ULL} }; const std::vector<WarpTopKInputs<float> > inputs2_2 = { {0.00000001, 4, 2048, 1,1234ULL} }; // Milestone 2.2: Using the full thread queue and warp queue code to support all the values // @TODO: Milestone 3: Support not sorted // @TODO: Milestone 4: Support multi-gpu //Define the function TEST_P typedef WarpTopKTest<float> TestD2_0; typedef WarpTopKTest<float> TestD2_1; typedef WarpTopKTest<float> TestD2_2; TEST_P(TestD2_0, Result){ const static bool Greater=true; ASSERT_TRUE((checkResult<float, int,Greater>(arr, outv, outk, params.rows, params.cols,params.k,params.tolerance))); } TEST_P(TestD2_1, Result){ const static bool Greater=true; ASSERT_TRUE((checkResult<float, int,Greater>(arr, outv, outk, params.rows, params.cols,params.k,params.tolerance))); } TEST_P(TestD2_2, Result){ const static bool Greater=true; ASSERT_TRUE((checkResult<float, int,Greater>(arr, outv, outk, params.rows, params.cols,params.k,params.tolerance))); } // Instantiate INSTANTIATE_TEST_CASE_P(WarpTopKTests, TestD2_0, ::testing::ValuesIn(inputs2_0)); INSTANTIATE_TEST_CASE_P(WarpTopKTests, TestD2_1, ::testing::ValuesIn(inputs2_1)); INSTANTIATE_TEST_CASE_P(WarpTopKTests, TestD2_2, ::testing::ValuesIn(inputs2_2)); } // end namespace Selection } // end namespace MLCommon
4b91eda560490d3c993111f005e0a600476534ca.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include "hip/hip_runtime.h" #include "device_launch_parameters.h" __global__ void register_usage_test(int * results, int size) { int gid = blockDim.x * blockIdx.x + threadIdx.x; int x1 = 3465; int x2 = 1768; int x3 = 453; /* int x7 = 3465; int x5 = 1768; int x6 = 453; */ int x4 = x1 + x2 + x3; // + x7 + x5 + x6; if (gid < size) { results[gid] = x4; } } int main() { int size = 1 << 22; int byte_size = sizeof(int)*size; int * h_ref = (int*) malloc(byte_size); int * d_results; hipMalloc((void**)&d_results, byte_size); hipMemset(d_results, 0, byte_size); dim3 blocks(128); dim3 grid((size+blocks.x-1)/blocks.x); printf("launching the kernel \n"); register_usage_test << <grid,blocks >> > (d_results, size); hipDeviceSynchronize(); hipMemcpy(h_ref, d_results, byte_size, hipMemcpyDeviceToHost); printf("Results have arrived \n"); int sum = 0; for (int i = 0; i < size; i++) { sum += h_ref[i]; } printf("final sum : %d \n",sum); return 0; }
4b91eda560490d3c993111f005e0a600476534ca.cu
#include <stdio.h> #include <stdlib.h> #include "cuda_runtime.h" #include "device_launch_parameters.h" __global__ void register_usage_test(int * results, int size) { int gid = blockDim.x * blockIdx.x + threadIdx.x; int x1 = 3465; int x2 = 1768; int x3 = 453; /* int x7 = 3465; int x5 = 1768; int x6 = 453; */ int x4 = x1 + x2 + x3; // + x7 + x5 + x6; if (gid < size) { results[gid] = x4; } } int main() { int size = 1 << 22; int byte_size = sizeof(int)*size; int * h_ref = (int*) malloc(byte_size); int * d_results; cudaMalloc((void**)&d_results, byte_size); cudaMemset(d_results, 0, byte_size); dim3 blocks(128); dim3 grid((size+blocks.x-1)/blocks.x); printf("launching the kernel \n"); register_usage_test << <grid,blocks >> > (d_results, size); cudaDeviceSynchronize(); cudaMemcpy(h_ref, d_results, byte_size, cudaMemcpyDeviceToHost); printf("Results have arrived \n"); int sum = 0; for (int i = 0; i < size; i++) { sum += h_ref[i]; } printf("final sum : %d \n",sum); return 0; }
6017974cfecfd28bd631583504b67f0b67d0d5f5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Mary Barker Homework 1 Vector addition on GPU to compile: nvcc BarkerHW1_GPU.cu OUTPUTS: N = 100 Time in milliseconds= 0.026000000000000 Last Values are A[99] = 198.000000000000000 B[99] = 99.000000000000000 C[99] = 297.000000000000000 N = 600 Time in milliseconds= 0.027000000000000 Last Values are A[599] = 1198.000000000000000 B[599] = 599.000000000000000 C[599] = 1797.000000000000000 N = 2000 Time in milliseconds= 0.035000000000000 Last Values are A[1999] = 3998.000000000000000 B[1999] = 1999.000000000000000 C[1999] = 5997.000000000000000 */ #include <sys/time.h> #include <stdio.h> //Length of vectors to be added. #define N 100 //if N is greater than dimBlock.x program will break float *A_CPU, *B_CPU, *C_CPU; //CPU pointers float *A_GPU, *B_GPU, *C_GPU; //GPU pointers void AllocateMemory() { //Allocate Device (GPU) Memory, & allocates the value of the specific pointer/array hipMalloc(&A_GPU,N*sizeof(float)); hipMalloc(&B_GPU,N*sizeof(float)); hipMalloc(&C_GPU,N*sizeof(float)); //Allocate Host (CPU) Memory A_CPU = (float*)malloc(N*sizeof(float)); B_CPU = (float*)malloc(N*sizeof(float)); C_CPU = (float*)malloc(N*sizeof(float)); } //Loads values into vectors that we will add. void Innitialize() { int i; for(i = 0; i < N; i++) { A_CPU[i] = (float)2*i; B_CPU[i] = (float)i; } } //Cleaning up memory after we are finished. void CleanUp(float *A_CPU,float *B_CPU,float *C_CPU,float *A_GPU,float *B_GPU,float *C_GPU) //free { free(A_CPU); free(B_CPU); free(C_CPU); hipFree(A_GPU); hipFree(B_GPU); hipFree(C_GPU); } //This is the kernel. It is the function that will run on the GPU. //It adds vectors A and B then stores result in vector C __global__ void Addition(float *A, float *B, float *C, int n) { int id = blockIdx.x; if(id < n) C[id] = A[id] + B[id]; } int main() { int i; timeval start, end; //Partitioning off the memory that you will be using. AllocateMemory(); //Loading up values to be added. Innitialize(); //Starting the timer gettimeofday(&start, NULL); //Copy Memory from CPU to GPU hipMemcpyAsync(A_GPU, A_CPU, N*sizeof(float), hipMemcpyHostToDevice); hipMemcpyAsync(B_GPU, B_CPU, N*sizeof(float), hipMemcpyHostToDevice); //Calling the Kernel (GPU) function. hipLaunchKernelGGL(( Addition), dim3(dim3(N)), dim3(1), 0, 0, A_GPU, B_GPU, C_GPU, N); //Copy Memory from GPU to CPU hipMemcpyAsync(C_CPU, C_GPU, N*sizeof(float), hipMemcpyDeviceToHost); //Stopping the timer gettimeofday(&end, NULL); //Calculating the total time used in the addition and converting it to milliseconds. float time = (end.tv_sec * 1000000 + end.tv_usec) - (start.tv_sec * 1000000 + start.tv_usec); //Displaying the time printf("Time in milliseconds= %.15f\n", (time/1000.0)); // Displaying vector info you will want to comment out the vector print line when your //vector becomes big. This is just to make sure everything is running correctly. for(i = 0; i < N; i++) { //printf("A[%d] = %.15f B[%d] = %.15f C[%d] = %.15f\n", i, A_CPU[i], i, B_CPU[i], i, C_CPU[i]); } //Displaying the last value of the addition for a check when all vector display has been commented out. printf("Last Values are A[%d] = %.15f B[%d] = %.15f C[%d] = %.15f\n", N-1, A_CPU[N-1], N-1, B_CPU[N-1], N-1, C_CPU[N-1]); //You're done so cleanup your mess. CleanUp(A_CPU,B_CPU,C_CPU,A_GPU,B_GPU,C_GPU); return(0); }
6017974cfecfd28bd631583504b67f0b67d0d5f5.cu
/* Mary Barker Homework 1 Vector addition on GPU to compile: nvcc BarkerHW1_GPU.cu OUTPUTS: N = 100 Time in milliseconds= 0.026000000000000 Last Values are A[99] = 198.000000000000000 B[99] = 99.000000000000000 C[99] = 297.000000000000000 N = 600 Time in milliseconds= 0.027000000000000 Last Values are A[599] = 1198.000000000000000 B[599] = 599.000000000000000 C[599] = 1797.000000000000000 N = 2000 Time in milliseconds= 0.035000000000000 Last Values are A[1999] = 3998.000000000000000 B[1999] = 1999.000000000000000 C[1999] = 5997.000000000000000 */ #include <sys/time.h> #include <stdio.h> //Length of vectors to be added. #define N 100 //if N is greater than dimBlock.x program will break float *A_CPU, *B_CPU, *C_CPU; //CPU pointers float *A_GPU, *B_GPU, *C_GPU; //GPU pointers void AllocateMemory() { //Allocate Device (GPU) Memory, & allocates the value of the specific pointer/array cudaMalloc(&A_GPU,N*sizeof(float)); cudaMalloc(&B_GPU,N*sizeof(float)); cudaMalloc(&C_GPU,N*sizeof(float)); //Allocate Host (CPU) Memory A_CPU = (float*)malloc(N*sizeof(float)); B_CPU = (float*)malloc(N*sizeof(float)); C_CPU = (float*)malloc(N*sizeof(float)); } //Loads values into vectors that we will add. void Innitialize() { int i; for(i = 0; i < N; i++) { A_CPU[i] = (float)2*i; B_CPU[i] = (float)i; } } //Cleaning up memory after we are finished. void CleanUp(float *A_CPU,float *B_CPU,float *C_CPU,float *A_GPU,float *B_GPU,float *C_GPU) //free { free(A_CPU); free(B_CPU); free(C_CPU); cudaFree(A_GPU); cudaFree(B_GPU); cudaFree(C_GPU); } //This is the kernel. It is the function that will run on the GPU. //It adds vectors A and B then stores result in vector C __global__ void Addition(float *A, float *B, float *C, int n) { int id = blockIdx.x; if(id < n) C[id] = A[id] + B[id]; } int main() { int i; timeval start, end; //Partitioning off the memory that you will be using. AllocateMemory(); //Loading up values to be added. Innitialize(); //Starting the timer gettimeofday(&start, NULL); //Copy Memory from CPU to GPU cudaMemcpyAsync(A_GPU, A_CPU, N*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpyAsync(B_GPU, B_CPU, N*sizeof(float), cudaMemcpyHostToDevice); //Calling the Kernel (GPU) function. Addition<<<dim3(N), 1>>>(A_GPU, B_GPU, C_GPU, N); //Copy Memory from GPU to CPU cudaMemcpyAsync(C_CPU, C_GPU, N*sizeof(float), cudaMemcpyDeviceToHost); //Stopping the timer gettimeofday(&end, NULL); //Calculating the total time used in the addition and converting it to milliseconds. float time = (end.tv_sec * 1000000 + end.tv_usec) - (start.tv_sec * 1000000 + start.tv_usec); //Displaying the time printf("Time in milliseconds= %.15f\n", (time/1000.0)); // Displaying vector info you will want to comment out the vector print line when your //vector becomes big. This is just to make sure everything is running correctly. for(i = 0; i < N; i++) { //printf("A[%d] = %.15f B[%d] = %.15f C[%d] = %.15f\n", i, A_CPU[i], i, B_CPU[i], i, C_CPU[i]); } //Displaying the last value of the addition for a check when all vector display has been commented out. printf("Last Values are A[%d] = %.15f B[%d] = %.15f C[%d] = %.15f\n", N-1, A_CPU[N-1], N-1, B_CPU[N-1], N-1, C_CPU[N-1]); //You're done so cleanup your mess. CleanUp(A_CPU,B_CPU,C_CPU,A_GPU,B_GPU,C_GPU); return(0); }
cd0efe74d8cde5ce9c0c09b2f27d3abe0974707f.hip
// !!! This is a file automatically generated by hipify!!! // // Created by Zhuohang Lai on 4/7/15. // Copyright (c) 2015 Zhuohang Lai. All rights reserved. // #include <iostream> #include "util/utility.cuh" #include "cuda_base.cuh" #include "params.h" #include "CUDAStat.cuh" #include <hipcub/hipcub.hpp> using namespace std; /* Test CUB scan */ bool test_scan(uint64_t len, CUDATimeStat *timing) { log_info("----------- Function: %s -----------", __FUNCTION__); log_info("Data cardinality=%d (%.1f MB)", len, 1.0*len* sizeof(int)/1024/1024); bool res = true; float ave_time = 0.0f; int *h_in_gpu = new int[len]; int *h_in_cpu = new int[len]; #pragma omp parallel for for(int i = 0; i < len; i++) { h_in_gpu[i] = 1; h_in_cpu[i] = 1; } int *d_in; checkCudaErrors(hipMalloc((void**)&d_in,sizeof(int)*len)); hipMemcpy(d_in, h_in_gpu, sizeof(int) * len, hipMemcpyHostToDevice); hipEvent_t start, end; hipEventCreate(&start); hipEventCreate(&end); for(int e = 0; e < EXPERIMENT_TIMES; e++) { float cur_time; int *d_out; checkCudaErrors(hipMalloc((void**)&d_out,sizeof(int)*len)); // Allocate temporary storage void *d_temp_storage = NULL; size_t temp_storage_bytes = 0; hipEventRecord(start, 0); hipcub::DeviceScan::ExclusiveSum(d_temp_storage, temp_storage_bytes, d_in, d_out, len); checkCudaErrors(hipMalloc(&d_temp_storage,temp_storage_bytes)); hipcub::DeviceScan::ExclusiveSum(d_temp_storage, temp_storage_bytes, d_in, d_out, len); hipEventRecord(end, 0); hipEventSynchronize(end); hipEventElapsedTime(&cur_time, start, end); if(e==0) { //check hipMemcpy(h_in_gpu, d_out, sizeof(int) * len, hipMemcpyDeviceToHost); for(int i = 0; i < len; i++) { if (h_in_gpu[i] != i) { res = false; } } } else if (res == true) { ave_time += cur_time; } else { log_error("Wrong results"); res = false; break; } checkCudaErrors(hipFree(d_out)); checkCudaErrors(hipFree(d_temp_storage)); } ave_time/= (EXPERIMENT_TIMES-1); checkCudaErrors(hipFree(d_in)); delete[] h_in_gpu; delete[] h_in_cpu; log_info("Time=%.1f ms, throughput=%.1f GB/s", ave_time, compute_bandwidth(len, sizeof(int), ave_time)); return res; } int main(int argc, char *argv[]) { hipSetDevice(DEVICE_ID); CUDATimeStat timing; for(int scale = 10; scale <= 30; scale++) { uint64_t num = pow(2,scale); assert(test_scan(num, &timing)); } return 0; }
cd0efe74d8cde5ce9c0c09b2f27d3abe0974707f.cu
// // Created by Zhuohang Lai on 4/7/15. // Copyright (c) 2015 Zhuohang Lai. All rights reserved. // #include <iostream> #include "util/utility.cuh" #include "cuda_base.cuh" #include "params.h" #include "CUDAStat.cuh" #include <cub/cub.cuh> using namespace std; /* Test CUB scan */ bool test_scan(uint64_t len, CUDATimeStat *timing) { log_info("----------- Function: %s -----------", __FUNCTION__); log_info("Data cardinality=%d (%.1f MB)", len, 1.0*len* sizeof(int)/1024/1024); bool res = true; float ave_time = 0.0f; int *h_in_gpu = new int[len]; int *h_in_cpu = new int[len]; #pragma omp parallel for for(int i = 0; i < len; i++) { h_in_gpu[i] = 1; h_in_cpu[i] = 1; } int *d_in; checkCudaErrors(cudaMalloc((void**)&d_in,sizeof(int)*len)); cudaMemcpy(d_in, h_in_gpu, sizeof(int) * len, cudaMemcpyHostToDevice); cudaEvent_t start, end; cudaEventCreate(&start); cudaEventCreate(&end); for(int e = 0; e < EXPERIMENT_TIMES; e++) { float cur_time; int *d_out; checkCudaErrors(cudaMalloc((void**)&d_out,sizeof(int)*len)); // Allocate temporary storage void *d_temp_storage = NULL; size_t temp_storage_bytes = 0; cudaEventRecord(start, 0); cub::DeviceScan::ExclusiveSum(d_temp_storage, temp_storage_bytes, d_in, d_out, len); checkCudaErrors(cudaMalloc(&d_temp_storage,temp_storage_bytes)); cub::DeviceScan::ExclusiveSum(d_temp_storage, temp_storage_bytes, d_in, d_out, len); cudaEventRecord(end, 0); cudaEventSynchronize(end); cudaEventElapsedTime(&cur_time, start, end); if(e==0) { //check cudaMemcpy(h_in_gpu, d_out, sizeof(int) * len, cudaMemcpyDeviceToHost); for(int i = 0; i < len; i++) { if (h_in_gpu[i] != i) { res = false; } } } else if (res == true) { ave_time += cur_time; } else { log_error("Wrong results"); res = false; break; } checkCudaErrors(cudaFree(d_out)); checkCudaErrors(cudaFree(d_temp_storage)); } ave_time/= (EXPERIMENT_TIMES-1); checkCudaErrors(cudaFree(d_in)); delete[] h_in_gpu; delete[] h_in_cpu; log_info("Time=%.1f ms, throughput=%.1f GB/s", ave_time, compute_bandwidth(len, sizeof(int), ave_time)); return res; } int main(int argc, char *argv[]) { cudaSetDevice(DEVICE_ID); CUDATimeStat timing; for(int scale = 10; scale <= 30; scale++) { uint64_t num = pow(2,scale); assert(test_scan(num, &timing)); } return 0; }
f2a741866a6cea6bce50e8162932275674cf5b3c.hip
// !!! This is a file automatically generated by hipify!!! #include "THHHalf.h" #include <thrust/transform.h> #include <thrust/execution_policy.h> struct __half2floatOp { __device__ float operator()(half v) { return __half2float(v); } }; struct __float2halfOp { __device__ half operator()(float v) { return __float2half(v); } }; void THCFloat2Half(THCState *state, half *out, float *in, long len) { thrust::transform( #if TORCH_HIP_VERSION >= 7000 thrust::hip::par.on(THCState_getCurrentStream(state)), #else thrust::device, #endif in, in + len, out, __float2halfOp()); } void THCHalf2Float(THCState *state, float *out, half *in, long len) { thrust::transform( #if TORCH_HIP_VERSION >= 7000 thrust::hip::par.on(THCState_getCurrentStream(state)), #else thrust::device, #endif in, in + len, out, __half2floatOp()); } float THC_half2float(half a) { unsigned int bits = a.x & 0x7fff; unsigned int sign = a.x & 0x8000; unsigned int exp = a.x & 0x7c00; bits <<= 13; sign <<= 16; bits += 0x38000000U; // flush denormals to 0 bits = (exp == 0 ? 0 : bits) | sign; union { float f; unsigned int v; } conv; conv.v = bits; return conv.f; } /* Copyright (c) 2015, Norbert Juffa All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ half THC_float2half(float a) { uint32_t ia; uint16_t ir; memcpy(&ia, &a, sizeof(float)); ir = (ia >> 16) & 0x8000; if ((ia & 0x7f800000) == 0x7f800000) { if ((ia & 0x7fffffff) == 0x7f800000) { ir |= 0x7c00; /* infinity */ } else { ir = 0x7fff; /* canonical NaN */ } } else if ((ia & 0x7f800000) >= 0x33000000) { int shift = (int)((ia >> 23) & 0xff) - 127; if (shift > 15) { ir |= 0x7c00; /* infinity */ } else { ia = (ia & 0x007fffff) | 0x00800000; /* extract mantissa */ if (shift < -14) { /* denormal */ ir |= ia >> (-1 - shift); ia = ia << (32 - (-1 - shift)); } else { /* normal */ ir |= ia >> (24 - 11); ia = ia << (32 - (24 - 11)); ir = ir + ((14 + shift) << 10); } /* IEEE-754 round to nearest of even */ if ((ia > 0x80000000) || ((ia == 0x80000000) && (ir & 1))) { ir++; } } } half ret; memcpy(&ret, &ir, sizeof(half)); return ret; } THC_EXTERNC int THC_nativeHalfInstructions(THCState *state) { #if CUDA_HALF_INSTRUCTIONS hipDeviceProp_t* prop = THCState_getCurrentDeviceProperties(state); // CC 5.3+ return (prop->major > 5 || (prop->major == 5 && prop->minor == 3)); #else return false; #endif }
f2a741866a6cea6bce50e8162932275674cf5b3c.cu
#include "THCHalf.h" #include <thrust/transform.h> #include <thrust/execution_policy.h> struct __half2floatOp { __device__ float operator()(half v) { return __half2float(v); } }; struct __float2halfOp { __device__ half operator()(float v) { return __float2half(v); } }; void THCFloat2Half(THCState *state, half *out, float *in, long len) { thrust::transform( #if CUDA_VERSION >= 7000 thrust::cuda::par.on(THCState_getCurrentStream(state)), #else thrust::device, #endif in, in + len, out, __float2halfOp()); } void THCHalf2Float(THCState *state, float *out, half *in, long len) { thrust::transform( #if CUDA_VERSION >= 7000 thrust::cuda::par.on(THCState_getCurrentStream(state)), #else thrust::device, #endif in, in + len, out, __half2floatOp()); } float THC_half2float(half a) { unsigned int bits = a.x & 0x7fff; unsigned int sign = a.x & 0x8000; unsigned int exp = a.x & 0x7c00; bits <<= 13; sign <<= 16; bits += 0x38000000U; // flush denormals to 0 bits = (exp == 0 ? 0 : bits) | sign; union { float f; unsigned int v; } conv; conv.v = bits; return conv.f; } /* Copyright (c) 2015, Norbert Juffa All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ half THC_float2half(float a) { uint32_t ia; uint16_t ir; memcpy(&ia, &a, sizeof(float)); ir = (ia >> 16) & 0x8000; if ((ia & 0x7f800000) == 0x7f800000) { if ((ia & 0x7fffffff) == 0x7f800000) { ir |= 0x7c00; /* infinity */ } else { ir = 0x7fff; /* canonical NaN */ } } else if ((ia & 0x7f800000) >= 0x33000000) { int shift = (int)((ia >> 23) & 0xff) - 127; if (shift > 15) { ir |= 0x7c00; /* infinity */ } else { ia = (ia & 0x007fffff) | 0x00800000; /* extract mantissa */ if (shift < -14) { /* denormal */ ir |= ia >> (-1 - shift); ia = ia << (32 - (-1 - shift)); } else { /* normal */ ir |= ia >> (24 - 11); ia = ia << (32 - (24 - 11)); ir = ir + ((14 + shift) << 10); } /* IEEE-754 round to nearest of even */ if ((ia > 0x80000000) || ((ia == 0x80000000) && (ir & 1))) { ir++; } } } half ret; memcpy(&ret, &ir, sizeof(half)); return ret; } THC_EXTERNC int THC_nativeHalfInstructions(THCState *state) { #if CUDA_HALF_INSTRUCTIONS cudaDeviceProp* prop = THCState_getCurrentDeviceProperties(state); // CC 5.3+ return (prop->major > 5 || (prop->major == 5 && prop->minor == 3)); #else return false; #endif }
85996bce1c30cf98836c8e94780807c424aba36b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <iostream> #include "hip/hip_runtime_api.h" #include <iostream> #include <fstream> #include <iomanip> #include <windows.h> #include <io.h> #include <stdio.h> #include<conio.h> #include <cstdlib> #include "cstdlib" #include <process.h> #include <stdlib.h> #include <malloc.h> #include <ctime> using namespace std; #define MEDIAN_DIMENSION 3 // For matrix of 3 x 3. We can Use 5 x 5 , 7 x 7 , 9 x 9...... #define MEDIAN_LENGTH 9 // Shoul be MEDIAN_DIMENSION x MEDIAN_DIMENSION = 3 x 3 #define BLOCK_WIDTH 16 // Should be 8 If matrix is of larger then of 5 x 5 elese error occur as " uses too much shared data " at surround[BLOCK_WIDTH*BLOCK_HEIGHT][MEDIAN_LENGTH] #define BLOCK_HEIGHT 16// Should be 8 If matrix is of larger then of 5 x 5 elese error occur as " uses too much shared data " at surround[BLOCK_WIDTH*BLOCK_HEIGHT][MEDIAN_LENGTH] __global__ void MedianFilter_gpu(unsigned short *Device_ImageData, int Image_Width, int Image_Height) { __shared__ unsigned short surround[BLOCK_WIDTH*BLOCK_HEIGHT][MEDIAN_LENGTH]; int iterator; const int Half_Of_MEDIAN_LENGTH = (MEDIAN_LENGTH / 2) + 1; int StartPoint = MEDIAN_DIMENSION / 2; int EndPoint = StartPoint + 1; const int x = blockDim.x * blockIdx.x + threadIdx.x; const int y = blockDim.y * blockIdx.y + threadIdx.y; const int tid = threadIdx.y*blockDim.y + threadIdx.x; if (x >= Image_Width || y >= Image_Height) return; //Fill surround with pixel value of Image in Matrix Pettern of MEDIAN_DIMENSION x MEDIAN_DIMENSION if (x == 0 || x == Image_Width - StartPoint || y == 0 || y == Image_Height - StartPoint) { } else { iterator = 0; for (int r = x - StartPoint; r < x + (EndPoint); r++) { for (int c = y - StartPoint; c < y + (EndPoint); c++) { surround[tid][iterator] = *(Device_ImageData + (c*Image_Width) + r); iterator++; } } //Sort the Surround Array to Find Median. Use Bubble Short if Matrix oF 3 x 3 Matrix //You can use Insertion commented below to Short Bigger Dimension Matrix //// bubble short // for (int i = 0; i<Half_Of_MEDIAN_LENGTH; ++i) { // Find position of minimum element int min = i; for (int l = i + 1; l<MEDIAN_LENGTH; ++l) if (surround[tid][l] <surround[tid][min]) min = l; // Put found minimum element in its place unsigned short temp = surround[tid][i]; surround[tid][i] = surround[tid][min]; surround[tid][min] = temp; }//bubble short end //////insertion sort start // /*int t,j,i; for ( i = 1 ; i< MEDIAN_LENGTH ; i++) { j = i; while ( j > 0 && surround[tid][j] < surround[tid][j-1]) { t= surround[tid][j]; surround[tid][j]= surround[tid][j-1]; surround[tid][j-1] = t; j--; } }*/ ////insertion sort end *(Device_ImageData + (y*Image_Width) + x) = surround[tid][Half_Of_MEDIAN_LENGTH - 1]; // it will give value of surround[tid][4] as Median Value if use 3 x 3 matrix __syncthreads(); } } int main(int argc, const char** argv) { int dataLength; int p1; unsigned short* Host_ImageData = NULL; ifstream is; // Read File is.open("maxresdefault", ios::binary); // get length of file: is.seekg(0, ios::end); dataLength = is.tellg(); is.seekg(0, ios::beg); Host_ImageData = new unsigned short[dataLength * sizeof(char) / sizeof(unsigned short)]; is.read((char*)Host_ImageData, dataLength); is.close(); int Image_Width = 1580; int Image_Height = 1050; unsigned short *Host_ResultData = (unsigned short *)malloc(dataLength); unsigned short *Device_ImageData = NULL; ///////////////////////////// // As First time hipMalloc take more time for memory alocation, i dont want to cosider this time in my process. //So Please Ignore Code For Displaying First CudaMelloc Time clock_t begin = clock(); unsigned short *forFirstCudaMalloc = NULL; hipMalloc((void**)&forFirstCudaMalloc, dataLength * sizeof(unsigned short)); clock_t end = clock(); double elapsed_secs = double(end - begin) / CLOCKS_PER_SEC; cout << "First CudaMelloc time = " << elapsed_secs << " Second\n"; hipFree(forFirstCudaMalloc); //////////////////////////// //Actual Process Starts From Here clock_t beginOverAll = clock(); // hipMalloc((void**)&Device_ImageData, dataLength * sizeof(unsigned short)); hipMemcpy(Device_ImageData, Host_ImageData, dataLength, hipMemcpyHostToDevice);// copying Host Data To Device Memory For Filtering int x = static_cast<int>(ceilf(static_cast<float>(1580.0) / BLOCK_WIDTH)); int y = static_cast<int>(ceilf(static_cast<float>(1050.0) / BLOCK_HEIGHT)); const dim3 grid(x, y, 1); const dim3 block(BLOCK_WIDTH, BLOCK_HEIGHT, 1); begin = clock(); MedianFilter_gpu << <grid, block >> >(Device_ImageData, Image_Width, Image_Height); hipDeviceSynchronize(); end = clock(); elapsed_secs = double(end - begin) / CLOCKS_PER_SEC; cout << "Process time = " << elapsed_secs << " Second\n"; hipMemcpy(Host_ResultData, Device_ImageData, dataLength, hipMemcpyDeviceToHost); // copying Back Device Data To Host Memory To write In file After Filter Done clock_t endOverall = clock(); elapsed_secs = double(endOverall - beginOverAll) / CLOCKS_PER_SEC; cout << "Complete Time = " << elapsed_secs << " Second\n"; ofstream of2; //Write Filtered Image Into File of2.open("D:\\Filtered_Image.raw", ios::binary); of2.write((char*)Host_ResultData, dataLength); of2.close(); cout << "\nEnd of Writing File. Press Any Key To Exit..!!"; hipFree(Device_ImageData); delete Host_ImageData; delete Host_ResultData; getch(); return 0; }
85996bce1c30cf98836c8e94780807c424aba36b.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <iostream> #include "cuda_runtime_api.h" #include <iostream> #include <fstream> #include <iomanip> #include <windows.h> #include <io.h> #include <stdio.h> #include<conio.h> #include <cstdlib> #include "cstdlib" #include <process.h> #include <stdlib.h> #include <malloc.h> #include <ctime> using namespace std; #define MEDIAN_DIMENSION 3 // For matrix of 3 x 3. We can Use 5 x 5 , 7 x 7 , 9 x 9...... #define MEDIAN_LENGTH 9 // Shoul be MEDIAN_DIMENSION x MEDIAN_DIMENSION = 3 x 3 #define BLOCK_WIDTH 16 // Should be 8 If matrix is of larger then of 5 x 5 elese error occur as " uses too much shared data " at surround[BLOCK_WIDTH*BLOCK_HEIGHT][MEDIAN_LENGTH] #define BLOCK_HEIGHT 16// Should be 8 If matrix is of larger then of 5 x 5 elese error occur as " uses too much shared data " at surround[BLOCK_WIDTH*BLOCK_HEIGHT][MEDIAN_LENGTH] __global__ void MedianFilter_gpu(unsigned short *Device_ImageData, int Image_Width, int Image_Height) { __shared__ unsigned short surround[BLOCK_WIDTH*BLOCK_HEIGHT][MEDIAN_LENGTH]; int iterator; const int Half_Of_MEDIAN_LENGTH = (MEDIAN_LENGTH / 2) + 1; int StartPoint = MEDIAN_DIMENSION / 2; int EndPoint = StartPoint + 1; const int x = blockDim.x * blockIdx.x + threadIdx.x; const int y = blockDim.y * blockIdx.y + threadIdx.y; const int tid = threadIdx.y*blockDim.y + threadIdx.x; if (x >= Image_Width || y >= Image_Height) return; //Fill surround with pixel value of Image in Matrix Pettern of MEDIAN_DIMENSION x MEDIAN_DIMENSION if (x == 0 || x == Image_Width - StartPoint || y == 0 || y == Image_Height - StartPoint) { } else { iterator = 0; for (int r = x - StartPoint; r < x + (EndPoint); r++) { for (int c = y - StartPoint; c < y + (EndPoint); c++) { surround[tid][iterator] = *(Device_ImageData + (c*Image_Width) + r); iterator++; } } //Sort the Surround Array to Find Median. Use Bubble Short if Matrix oF 3 x 3 Matrix //You can use Insertion commented below to Short Bigger Dimension Matrix //// bubble short // for (int i = 0; i<Half_Of_MEDIAN_LENGTH; ++i) { // Find position of minimum element int min = i; for (int l = i + 1; l<MEDIAN_LENGTH; ++l) if (surround[tid][l] <surround[tid][min]) min = l; // Put found minimum element in its place unsigned short temp = surround[tid][i]; surround[tid][i] = surround[tid][min]; surround[tid][min] = temp; }//bubble short end //////insertion sort start // /*int t,j,i; for ( i = 1 ; i< MEDIAN_LENGTH ; i++) { j = i; while ( j > 0 && surround[tid][j] < surround[tid][j-1]) { t= surround[tid][j]; surround[tid][j]= surround[tid][j-1]; surround[tid][j-1] = t; j--; } }*/ ////insertion sort end *(Device_ImageData + (y*Image_Width) + x) = surround[tid][Half_Of_MEDIAN_LENGTH - 1]; // it will give value of surround[tid][4] as Median Value if use 3 x 3 matrix __syncthreads(); } } int main(int argc, const char** argv) { int dataLength; int p1; unsigned short* Host_ImageData = NULL; ifstream is; // Read File is.open("maxresdefault", ios::binary); // get length of file: is.seekg(0, ios::end); dataLength = is.tellg(); is.seekg(0, ios::beg); Host_ImageData = new unsigned short[dataLength * sizeof(char) / sizeof(unsigned short)]; is.read((char*)Host_ImageData, dataLength); is.close(); int Image_Width = 1580; int Image_Height = 1050; unsigned short *Host_ResultData = (unsigned short *)malloc(dataLength); unsigned short *Device_ImageData = NULL; ///////////////////////////// // As First time cudaMalloc take more time for memory alocation, i dont want to cosider this time in my process. //So Please Ignore Code For Displaying First CudaMelloc Time clock_t begin = clock(); unsigned short *forFirstCudaMalloc = NULL; cudaMalloc((void**)&forFirstCudaMalloc, dataLength * sizeof(unsigned short)); clock_t end = clock(); double elapsed_secs = double(end - begin) / CLOCKS_PER_SEC; cout << "First CudaMelloc time = " << elapsed_secs << " Second\n"; cudaFree(forFirstCudaMalloc); //////////////////////////// //Actual Process Starts From Here clock_t beginOverAll = clock(); // cudaMalloc((void**)&Device_ImageData, dataLength * sizeof(unsigned short)); cudaMemcpy(Device_ImageData, Host_ImageData, dataLength, cudaMemcpyHostToDevice);// copying Host Data To Device Memory For Filtering int x = static_cast<int>(ceilf(static_cast<float>(1580.0) / BLOCK_WIDTH)); int y = static_cast<int>(ceilf(static_cast<float>(1050.0) / BLOCK_HEIGHT)); const dim3 grid(x, y, 1); const dim3 block(BLOCK_WIDTH, BLOCK_HEIGHT, 1); begin = clock(); MedianFilter_gpu << <grid, block >> >(Device_ImageData, Image_Width, Image_Height); cudaDeviceSynchronize(); end = clock(); elapsed_secs = double(end - begin) / CLOCKS_PER_SEC; cout << "Process time = " << elapsed_secs << " Second\n"; cudaMemcpy(Host_ResultData, Device_ImageData, dataLength, cudaMemcpyDeviceToHost); // copying Back Device Data To Host Memory To write In file After Filter Done clock_t endOverall = clock(); elapsed_secs = double(endOverall - beginOverAll) / CLOCKS_PER_SEC; cout << "Complete Time = " << elapsed_secs << " Second\n"; ofstream of2; //Write Filtered Image Into File of2.open("D:\\Filtered_Image.raw", ios::binary); of2.write((char*)Host_ResultData, dataLength); of2.close(); cout << "\nEnd of Writing File. Press Any Key To Exit..!!"; cudaFree(Device_ImageData); delete Host_ImageData; delete Host_ResultData; getch(); return 0; }
25a9f9c443ec9e6670b3ac9186de96dadcc93a47.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "fsc_tomo_cmp_kernal.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const float *data1 = NULL; hipMalloc(&data1, XSIZE*YSIZE); const float *data2 = NULL; hipMalloc(&data2, XSIZE*YSIZE); float *device_soln = NULL; hipMalloc(&device_soln, XSIZE*YSIZE); const float data1threshold = 1; const float data2threshold = 1; const int nx = 1; const int ny = 1; const int nz = 1; const int offset = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( fsc_tomo_cmp_kernal), dim3(gridBlock),dim3(threadBlock), 0, 0, data1,data2,device_soln,data1threshold,data2threshold,nx,ny,nz,offset); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( fsc_tomo_cmp_kernal), dim3(gridBlock),dim3(threadBlock), 0, 0, data1,data2,device_soln,data1threshold,data2threshold,nx,ny,nz,offset); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( fsc_tomo_cmp_kernal), dim3(gridBlock),dim3(threadBlock), 0, 0, data1,data2,device_soln,data1threshold,data2threshold,nx,ny,nz,offset); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
25a9f9c443ec9e6670b3ac9186de96dadcc93a47.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "fsc_tomo_cmp_kernal.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const float *data1 = NULL; cudaMalloc(&data1, XSIZE*YSIZE); const float *data2 = NULL; cudaMalloc(&data2, XSIZE*YSIZE); float *device_soln = NULL; cudaMalloc(&device_soln, XSIZE*YSIZE); const float data1threshold = 1; const float data2threshold = 1; const int nx = 1; const int ny = 1; const int nz = 1; const int offset = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); fsc_tomo_cmp_kernal<<<gridBlock,threadBlock>>>(data1,data2,device_soln,data1threshold,data2threshold,nx,ny,nz,offset); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { fsc_tomo_cmp_kernal<<<gridBlock,threadBlock>>>(data1,data2,device_soln,data1threshold,data2threshold,nx,ny,nz,offset); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { fsc_tomo_cmp_kernal<<<gridBlock,threadBlock>>>(data1,data2,device_soln,data1threshold,data2threshold,nx,ny,nz,offset); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
9ddb72d846d098ab0d44c92ddec471f9f15be9b7.hip
// !!! This is a file automatically generated by hipify!!! /******************************************************************************* GPU OPTIMIZED MONTE CARLO (GOMC) 2.75 Copyright (C) 2022 GOMC Group A copy of the MIT License can be found in License.txt along with this program, also can be found at <https://opensource.org/licenses/MIT>. ********************************************************************************/ #include "CUDAMemoryManager.cuh" #ifdef GOMC_CUDA long long CUDAMemoryManager::totalAllocatedBytes = 0; std::unordered_map<void *, std::pair<unsigned int, std::string> > CUDAMemoryManager::allocatedPointers; hipError_t CUDAMemoryManager::mallocMemory(void **address, unsigned int size, std::string var_name) { hipError_t ret = hipMalloc(address, size); allocatedPointers[*address] = make_pair(size, var_name); totalAllocatedBytes += size; if (size == 0) { std::cout << "Warning! You are trying to allocate " << var_name << " with a size of zero bytes!\n"; } return ret; } hipError_t CUDAMemoryManager::freeMemory(void *address, std::string var_name) { if(allocatedPointers.find(address) != allocatedPointers.end()) { totalAllocatedBytes -= allocatedPointers[address].first; allocatedPointers.erase(address); } else if (address != nullptr) { std::cout << "Warning! You are trying to free " << var_name << " but it has already been freed\n" << "\tor was never allocated!\n"; } return hipFree(address); } bool CUDAMemoryManager::isFreed() { bool ret = allocatedPointers.size() == 0; while(allocatedPointers.size() != 0) { auto it = allocatedPointers.begin(); std::cout << "You forgot to free memory " << it->second.second << " with " << it->second.first << " bytes allocated!\n"; std::cout << "I am going to free it for you!\n"; freeMemory(it->first, it->second.second); } return ret; } #endif
9ddb72d846d098ab0d44c92ddec471f9f15be9b7.cu
/******************************************************************************* GPU OPTIMIZED MONTE CARLO (GOMC) 2.75 Copyright (C) 2022 GOMC Group A copy of the MIT License can be found in License.txt along with this program, also can be found at <https://opensource.org/licenses/MIT>. ********************************************************************************/ #include "CUDAMemoryManager.cuh" #ifdef GOMC_CUDA long long CUDAMemoryManager::totalAllocatedBytes = 0; std::unordered_map<void *, std::pair<unsigned int, std::string> > CUDAMemoryManager::allocatedPointers; cudaError_t CUDAMemoryManager::mallocMemory(void **address, unsigned int size, std::string var_name) { cudaError_t ret = cudaMalloc(address, size); allocatedPointers[*address] = make_pair(size, var_name); totalAllocatedBytes += size; if (size == 0) { std::cout << "Warning! You are trying to allocate " << var_name << " with a size of zero bytes!\n"; } return ret; } cudaError_t CUDAMemoryManager::freeMemory(void *address, std::string var_name) { if(allocatedPointers.find(address) != allocatedPointers.end()) { totalAllocatedBytes -= allocatedPointers[address].first; allocatedPointers.erase(address); } else if (address != nullptr) { std::cout << "Warning! You are trying to free " << var_name << " but it has already been freed\n" << "\tor was never allocated!\n"; } return cudaFree(address); } bool CUDAMemoryManager::isFreed() { bool ret = allocatedPointers.size() == 0; while(allocatedPointers.size() != 0) { auto it = allocatedPointers.begin(); std::cout << "You forgot to free memory " << it->second.second << " with " << it->second.first << " bytes allocated!\n"; std::cout << "I am going to free it for you!\n"; freeMemory(it->first, it->second.second); } return ret; } #endif
b2cda51a691946d42b65dea944d4adda246a3df6.hip
// !!! This is a file automatically generated by hipify!!! /* This code has the assumption that the source vertices are sorted in the input file Also, the vertices are 0 indexed */ #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> #define CUDA_SAFE_CALL( err ) (safe_call(err, __LINE__)) #define MAX_THREADS_PER_BLOCK 1024 void safe_call(hipError_t ret, int line) { if(ret!=hipSuccess) { printf("Error at line %d : %s\n",line,hipGetErrorString(ret)); exit(-1); } } typedef struct __graph { int V; int *adj_prefix_sum; int *adj; } graph_t; __device__ bool d_over; __global__ void reset() { d_over = false; } // Print the graph __global__ void temp_kernel(graph_t * graph) { int id = blockDim.x*blockIdx.x + threadIdx.x; if(id == 0) { int j; for(j=0; j<graph->adj_prefix_sum[graph->V-1]; j++) printf("%d ",graph->adj[j]); printf("\n"); } } __global__ void init(int * vertices, int starting_vertex, int num_vertices) { int v = blockDim.x*blockIdx.x + threadIdx.x; if (v==starting_vertex) vertices[v] = 0; else if(v < num_vertices) vertices[v] = -1; } __global__ void bfs(const graph_t * graph, int * vertices, int current_depth) { int id = blockDim.x*blockIdx.x + threadIdx.x; if(id < graph->V) { if(vertices[id] == current_depth) { int i; if(id == 0) i = 0; else i = graph->adj_prefix_sum[id-1]; for(; i < graph->adj_prefix_sum[id]; i++) { if(vertices[graph->adj[i]] == -1) { vertices[graph->adj[i]] = current_depth+1; d_over = true; } } } } } int main(int argc, char * argv[]) { static char * filename; if(argc>2) { printf("./a.out <filename>\n"); exit(-1); } else if(argc==2) { filename = argv[1]; } else { filename = "../data/input.txt"; } FILE * fp = fopen(filename,"r"); if(!fp) { printf("Error reading file.\n"); exit(-1); } /* Set cuda device to K40 */ CUDA_SAFE_CALL(hipSetDevice(0)); /* Get graph from file into CPU memory */ int num_vertices, num_edges, i, j; fscanf(fp,"%d %d",&num_vertices,&num_edges); graph_t *graph_host; CUDA_SAFE_CALL(hipHostMalloc((void **)&graph_host, sizeof(graph_t))); graph_host->V = num_vertices; CUDA_SAFE_CALL(hipHostMalloc((void **)&(graph_host->adj_prefix_sum), num_vertices*sizeof(int))); CUDA_SAFE_CALL(hipHostMalloc((void **)&(graph_host->adj), num_edges*sizeof(int *))); /* for(i=0; i<num_vertices; i++) { int edges_per_vertex; fscanf(fp,"%d",&edges_per_vertex); if(i>0) { graph_host->adj_prefix_sum[i] = graph_host->adj_prefix_sum[i-1]+edges_per_vertex; j = graph_host->adj_prefix_sum[i-1]; } else { graph_host->adj_prefix_sum[i] = edges_per_vertex; j = 0; } for(; j<graph_host->adj_prefix_sum[i]; j++) { fscanf(fp,"%d",&graph_host->adj[j]); } } */ /* It has been assumed that the source vertices are in sorted order */ int * temp_adj = (int *) malloc(num_vertices*sizeof(int)); int s,d,c=0,ps=0,jt; for(i=0; i<num_edges; i++) { fscanf(fp,"%d",&s); fscanf(fp,"%d",&d); if(ps == s) { temp_adj[c] = d; c++; } else { //printf("%d %d %d\n",i,ps,s); if(ps>0) { graph_host->adj_prefix_sum[ps] = graph_host->adj_prefix_sum[ps-1]+c; j = graph_host->adj_prefix_sum[ps-1]; } else { graph_host->adj_prefix_sum[ps] = c; j = 0; } jt = j; for(; j<graph_host->adj_prefix_sum[ps]; j++) { graph_host->adj[j] = temp_adj[j-jt]; } temp_adj[0] = d; c=1; while((++ps)<s) { graph_host->adj_prefix_sum[ps] = graph_host->adj_prefix_sum[ps-1]; } } } if(ps>0) { graph_host->adj_prefix_sum[ps] = graph_host->adj_prefix_sum[ps-1]+c; j = graph_host->adj_prefix_sum[ps-1]; } else { graph_host->adj_prefix_sum[ps] = c; j = 0; } jt = j; for(; j<graph_host->adj_prefix_sum[ps]; j++) { graph_host->adj[j] = temp_adj[j-jt]; } while((++ps)<num_vertices) { graph_host->adj_prefix_sum[ps] = graph_host->adj_prefix_sum[ps-1]; } /***************************************************** XXX: GPU does not know the size of each adjacency list. For that, a new struct containing size of list and list has to be created and passed to GPU memory. Too much hassle. OR Create 1-D array in the graph itself which contains the size of each list. *****************************************************/ //temp_kernel<<<1,1>>>(graph_host); int num_of_blocks = 1; int num_of_threads_per_block = num_vertices; if(num_vertices>MAX_THREADS_PER_BLOCK) { num_of_blocks = (int)ceil(num_vertices/(double)MAX_THREADS_PER_BLOCK); num_of_threads_per_block = MAX_THREADS_PER_BLOCK; } int * vertices_host; CUDA_SAFE_CALL(hipHostMalloc((void **)&vertices_host, num_vertices*sizeof(int))); dim3 grid( num_of_blocks, 1, 1); dim3 threads( num_of_threads_per_block, 1, 1); hipEvent_t start,end; float diff; double time = 0; CUDA_SAFE_CALL(hipEventCreate(&start)); CUDA_SAFE_CALL(hipEventCreate(&end)); hipLaunchKernelGGL(( init), dim3(grid),dim3(threads), 0, 0, vertices_host, 0, num_vertices); bool stop; int k=0; do { stop = false; CUDA_SAFE_CALL(hipMemcpyToSymbol(d_over, &stop, sizeof(bool),0, hipMemcpyHostToDevice)); CUDA_SAFE_CALL(hipDeviceSynchronize()); CUDA_SAFE_CALL(hipEventRecord(start,0)); hipLaunchKernelGGL(( bfs), dim3(grid), dim3(threads), 0, 0, graph_host, vertices_host, k); CUDA_SAFE_CALL(hipDeviceSynchronize()); CUDA_SAFE_CALL(hipEventRecord(end,0)); CUDA_SAFE_CALL(hipEventSynchronize(end)); CUDA_SAFE_CALL(hipEventElapsedTime(&diff, start, end)); time += diff*1.0e-3; CUDA_SAFE_CALL(hipMemcpyFromSymbol(&stop, d_over, sizeof(bool),0, hipMemcpyDeviceToHost)); k++; }while(stop); printf("Number of iterations : %d\n",k); for(int i = 0; i < num_vertices; i++) { printf("Vertex %d Distance %d\n",i,vertices_host[i]); } printf("Time: %f ms\n",time); CUDA_SAFE_CALL(hipHostFree(vertices_host)); CUDA_SAFE_CALL(hipHostFree(graph_host->adj)); CUDA_SAFE_CALL(hipHostFree(graph_host->adj_prefix_sum)); CUDA_SAFE_CALL(hipHostFree(graph_host)); CUDA_SAFE_CALL(hipEventDestroy(start)); CUDA_SAFE_CALL(hipEventDestroy(end)); return 0; }
b2cda51a691946d42b65dea944d4adda246a3df6.cu
/* This code has the assumption that the source vertices are sorted in the input file Also, the vertices are 0 indexed */ #include <stdio.h> #include <stdlib.h> #include <cuda.h> #define CUDA_SAFE_CALL( err ) (safe_call(err, __LINE__)) #define MAX_THREADS_PER_BLOCK 1024 void safe_call(cudaError_t ret, int line) { if(ret!=cudaSuccess) { printf("Error at line %d : %s\n",line,cudaGetErrorString(ret)); exit(-1); } } typedef struct __graph { int V; int *adj_prefix_sum; int *adj; } graph_t; __device__ bool d_over; __global__ void reset() { d_over = false; } // Print the graph __global__ void temp_kernel(graph_t * graph) { int id = blockDim.x*blockIdx.x + threadIdx.x; if(id == 0) { int j; for(j=0; j<graph->adj_prefix_sum[graph->V-1]; j++) printf("%d ",graph->adj[j]); printf("\n"); } } __global__ void init(int * vertices, int starting_vertex, int num_vertices) { int v = blockDim.x*blockIdx.x + threadIdx.x; if (v==starting_vertex) vertices[v] = 0; else if(v < num_vertices) vertices[v] = -1; } __global__ void bfs(const graph_t * graph, int * vertices, int current_depth) { int id = blockDim.x*blockIdx.x + threadIdx.x; if(id < graph->V) { if(vertices[id] == current_depth) { int i; if(id == 0) i = 0; else i = graph->adj_prefix_sum[id-1]; for(; i < graph->adj_prefix_sum[id]; i++) { if(vertices[graph->adj[i]] == -1) { vertices[graph->adj[i]] = current_depth+1; d_over = true; } } } } } int main(int argc, char * argv[]) { static char * filename; if(argc>2) { printf("./a.out <filename>\n"); exit(-1); } else if(argc==2) { filename = argv[1]; } else { filename = "../data/input.txt"; } FILE * fp = fopen(filename,"r"); if(!fp) { printf("Error reading file.\n"); exit(-1); } /* Set cuda device to K40 */ CUDA_SAFE_CALL(cudaSetDevice(0)); /* Get graph from file into CPU memory */ int num_vertices, num_edges, i, j; fscanf(fp,"%d %d",&num_vertices,&num_edges); graph_t *graph_host; CUDA_SAFE_CALL(cudaMallocHost((void **)&graph_host, sizeof(graph_t))); graph_host->V = num_vertices; CUDA_SAFE_CALL(cudaMallocHost((void **)&(graph_host->adj_prefix_sum), num_vertices*sizeof(int))); CUDA_SAFE_CALL(cudaMallocHost((void **)&(graph_host->adj), num_edges*sizeof(int *))); /* for(i=0; i<num_vertices; i++) { int edges_per_vertex; fscanf(fp,"%d",&edges_per_vertex); if(i>0) { graph_host->adj_prefix_sum[i] = graph_host->adj_prefix_sum[i-1]+edges_per_vertex; j = graph_host->adj_prefix_sum[i-1]; } else { graph_host->adj_prefix_sum[i] = edges_per_vertex; j = 0; } for(; j<graph_host->adj_prefix_sum[i]; j++) { fscanf(fp,"%d",&graph_host->adj[j]); } } */ /* It has been assumed that the source vertices are in sorted order */ int * temp_adj = (int *) malloc(num_vertices*sizeof(int)); int s,d,c=0,ps=0,jt; for(i=0; i<num_edges; i++) { fscanf(fp,"%d",&s); fscanf(fp,"%d",&d); if(ps == s) { temp_adj[c] = d; c++; } else { //printf("%d %d %d\n",i,ps,s); if(ps>0) { graph_host->adj_prefix_sum[ps] = graph_host->adj_prefix_sum[ps-1]+c; j = graph_host->adj_prefix_sum[ps-1]; } else { graph_host->adj_prefix_sum[ps] = c; j = 0; } jt = j; for(; j<graph_host->adj_prefix_sum[ps]; j++) { graph_host->adj[j] = temp_adj[j-jt]; } temp_adj[0] = d; c=1; while((++ps)<s) { graph_host->adj_prefix_sum[ps] = graph_host->adj_prefix_sum[ps-1]; } } } if(ps>0) { graph_host->adj_prefix_sum[ps] = graph_host->adj_prefix_sum[ps-1]+c; j = graph_host->adj_prefix_sum[ps-1]; } else { graph_host->adj_prefix_sum[ps] = c; j = 0; } jt = j; for(; j<graph_host->adj_prefix_sum[ps]; j++) { graph_host->adj[j] = temp_adj[j-jt]; } while((++ps)<num_vertices) { graph_host->adj_prefix_sum[ps] = graph_host->adj_prefix_sum[ps-1]; } /***************************************************** XXX: GPU does not know the size of each adjacency list. For that, a new struct containing size of list and list has to be created and passed to GPU memory. Too much hassle. OR Create 1-D array in the graph itself which contains the size of each list. *****************************************************/ //temp_kernel<<<1,1>>>(graph_host); int num_of_blocks = 1; int num_of_threads_per_block = num_vertices; if(num_vertices>MAX_THREADS_PER_BLOCK) { num_of_blocks = (int)ceil(num_vertices/(double)MAX_THREADS_PER_BLOCK); num_of_threads_per_block = MAX_THREADS_PER_BLOCK; } int * vertices_host; CUDA_SAFE_CALL(cudaMallocHost((void **)&vertices_host, num_vertices*sizeof(int))); dim3 grid( num_of_blocks, 1, 1); dim3 threads( num_of_threads_per_block, 1, 1); cudaEvent_t start,end; float diff; double time = 0; CUDA_SAFE_CALL(cudaEventCreate(&start)); CUDA_SAFE_CALL(cudaEventCreate(&end)); init<<<grid,threads>>> (vertices_host, 0, num_vertices); bool stop; int k=0; do { stop = false; CUDA_SAFE_CALL(cudaMemcpyToSymbol(d_over, &stop, sizeof(bool),0, cudaMemcpyHostToDevice)); CUDA_SAFE_CALL(cudaDeviceSynchronize()); CUDA_SAFE_CALL(cudaEventRecord(start,0)); bfs<<<grid, threads>>> (graph_host, vertices_host, k); CUDA_SAFE_CALL(cudaDeviceSynchronize()); CUDA_SAFE_CALL(cudaEventRecord(end,0)); CUDA_SAFE_CALL(cudaEventSynchronize(end)); CUDA_SAFE_CALL(cudaEventElapsedTime(&diff, start, end)); time += diff*1.0e-3; CUDA_SAFE_CALL(cudaMemcpyFromSymbol(&stop, d_over, sizeof(bool),0, cudaMemcpyDeviceToHost)); k++; }while(stop); printf("Number of iterations : %d\n",k); for(int i = 0; i < num_vertices; i++) { printf("Vertex %d Distance %d\n",i,vertices_host[i]); } printf("Time: %f ms\n",time); CUDA_SAFE_CALL(cudaFreeHost(vertices_host)); CUDA_SAFE_CALL(cudaFreeHost(graph_host->adj)); CUDA_SAFE_CALL(cudaFreeHost(graph_host->adj_prefix_sum)); CUDA_SAFE_CALL(cudaFreeHost(graph_host)); CUDA_SAFE_CALL(cudaEventDestroy(start)); CUDA_SAFE_CALL(cudaEventDestroy(end)); return 0; }
7b051534f5c0ce4b1e9d49400790ab73754b6165.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.1.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date August 2016 @generated from magmablas/zsymmetrize_tiles.cu, normal z -> c, Tue Aug 30 09:38:34 2016 @author Mark Gates */ #include "magma_internal.h" #define NB 64 /* Symmetrizes ntile tiles at a time, e.g., all diagonal tiles of a matrix. Grid is ceil(m/NB) x ntile. Each tile is m x m, and is divided into block rows, each NB x m. Each block has NB threads. Each thread copies one row, iterating across all columns below diagonal. The bottom block of rows may be partially outside the matrix; if so, rows outside the matrix (i >= m) are disabled. */ __global__ void csymmetrize_tiles_lower( int m, magmaFloatComplex *dA, int ldda, int mstride, int nstride ) { // shift dA to tile's top-left corner dA += blockIdx.y*(mstride + nstride*ldda); // dA iterates across row i and dAT iterates down column i. int i = blockIdx.x*NB + threadIdx.x; magmaFloatComplex *dAT = dA; if ( i < m ) { dA += i; dAT += i*ldda; magmaFloatComplex *dAend = dA + i*ldda; while( dA < dAend ) { *dAT = MAGMA_C_CONJ(*dA); // upper := lower dA += ldda; dAT += 1; } } } // only difference with _lower version is direction dA=dAT instead of dAT=dA. __global__ void csymmetrize_tiles_upper( int m, magmaFloatComplex *dA, int ldda, int mstride, int nstride ) { // shift dA to tile's top-left corner dA += blockIdx.y*(mstride + nstride*ldda); // dA iterates across row i and dAT iterates down column i. int i = blockIdx.x*NB + threadIdx.x; magmaFloatComplex *dAT = dA; if ( i < m ) { dA += i; dAT += i*ldda; magmaFloatComplex *dAend = dA + i*ldda; while( dA < dAend ) { *dA = MAGMA_C_CONJ(*dAT); // lower := upper dA += ldda; dAT += 1; } } } /***************************************************************************//** Purpose ------- CSYMMETRIZE_TILES copies lower triangle to upper triangle, or vice-versa, to make some blocks of dA into general representations of a symmetric block. This processes NTILE blocks, typically the diagonal blocks. Each block is offset by mstride rows and nstride columns from the previous block. Arguments --------- @param[in] uplo magma_uplo_t Specifies the part of the matrix dA that is valid on input. - = MagmaUpper: Upper triangular part - = MagmaLower: Lower triangular part @param[in] m INTEGER The number of rows & columns of each square block of dA. M >= 0. @param[in,out] dA COMPLEX array, dimension (LDDA,N) The matrix dA. N = m + nstride*(ntile-1). @param[in] ldda INTEGER The leading dimension of the array dA. LDDA >= max(1, m + mstride*(ntile-1)). @param[in] ntile INTEGER Number of blocks to symmetrize. ntile >= 0. @param[in] mstride INTEGER Row offset from start of one block to start of next block. mstride >= 0. Either (mstride >= m) or (nstride >= m), to prevent m-by-m tiles from overlapping. @param[in] nstride INTEGER Column offset from start of one block to start of next block. nstride >= 0. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_symmetrize_batched *******************************************************************************/ extern "C" void magmablas_csymmetrize_tiles_q( magma_uplo_t uplo, magma_int_t m, magmaFloatComplex_ptr dA, magma_int_t ldda, magma_int_t ntile, magma_int_t mstride, magma_int_t nstride, magma_queue_t queue ) { magma_int_t info = 0; if ( uplo != MagmaLower && uplo != MagmaUpper ) info = -1; else if ( m < 0 ) info = -2; else if ( ldda < max(1,m + mstride*(ntile-1)) ) info = -5; else if ( ntile < 0 ) info = -6; else if ( mstride < 0 ) info = -7; else if ( nstride < 0 ) info = -8; else if ( mstride < m && nstride < m ) // only one must be >= m. info = -7; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return; } if ( m == 0 || ntile == 0 ) return; dim3 threads( NB, 1 ); dim3 grid( magma_ceildiv( m, NB ), ntile ); //printf( "m %d, grid %d x %d, threads %d\n", m, grid.x, grid.y, threads.x ); if ( uplo == MagmaUpper ) { hipLaunchKernelGGL(( csymmetrize_tiles_upper) , dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, dA, ldda, mstride, nstride ); } else { hipLaunchKernelGGL(( csymmetrize_tiles_lower) , dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, dA, ldda, mstride, nstride ); } }
7b051534f5c0ce4b1e9d49400790ab73754b6165.cu
/* -- MAGMA (version 2.1.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date August 2016 @generated from magmablas/zsymmetrize_tiles.cu, normal z -> c, Tue Aug 30 09:38:34 2016 @author Mark Gates */ #include "magma_internal.h" #define NB 64 /* Symmetrizes ntile tiles at a time, e.g., all diagonal tiles of a matrix. Grid is ceil(m/NB) x ntile. Each tile is m x m, and is divided into block rows, each NB x m. Each block has NB threads. Each thread copies one row, iterating across all columns below diagonal. The bottom block of rows may be partially outside the matrix; if so, rows outside the matrix (i >= m) are disabled. */ __global__ void csymmetrize_tiles_lower( int m, magmaFloatComplex *dA, int ldda, int mstride, int nstride ) { // shift dA to tile's top-left corner dA += blockIdx.y*(mstride + nstride*ldda); // dA iterates across row i and dAT iterates down column i. int i = blockIdx.x*NB + threadIdx.x; magmaFloatComplex *dAT = dA; if ( i < m ) { dA += i; dAT += i*ldda; magmaFloatComplex *dAend = dA + i*ldda; while( dA < dAend ) { *dAT = MAGMA_C_CONJ(*dA); // upper := lower dA += ldda; dAT += 1; } } } // only difference with _lower version is direction dA=dAT instead of dAT=dA. __global__ void csymmetrize_tiles_upper( int m, magmaFloatComplex *dA, int ldda, int mstride, int nstride ) { // shift dA to tile's top-left corner dA += blockIdx.y*(mstride + nstride*ldda); // dA iterates across row i and dAT iterates down column i. int i = blockIdx.x*NB + threadIdx.x; magmaFloatComplex *dAT = dA; if ( i < m ) { dA += i; dAT += i*ldda; magmaFloatComplex *dAend = dA + i*ldda; while( dA < dAend ) { *dA = MAGMA_C_CONJ(*dAT); // lower := upper dA += ldda; dAT += 1; } } } /***************************************************************************//** Purpose ------- CSYMMETRIZE_TILES copies lower triangle to upper triangle, or vice-versa, to make some blocks of dA into general representations of a symmetric block. This processes NTILE blocks, typically the diagonal blocks. Each block is offset by mstride rows and nstride columns from the previous block. Arguments --------- @param[in] uplo magma_uplo_t Specifies the part of the matrix dA that is valid on input. - = MagmaUpper: Upper triangular part - = MagmaLower: Lower triangular part @param[in] m INTEGER The number of rows & columns of each square block of dA. M >= 0. @param[in,out] dA COMPLEX array, dimension (LDDA,N) The matrix dA. N = m + nstride*(ntile-1). @param[in] ldda INTEGER The leading dimension of the array dA. LDDA >= max(1, m + mstride*(ntile-1)). @param[in] ntile INTEGER Number of blocks to symmetrize. ntile >= 0. @param[in] mstride INTEGER Row offset from start of one block to start of next block. mstride >= 0. Either (mstride >= m) or (nstride >= m), to prevent m-by-m tiles from overlapping. @param[in] nstride INTEGER Column offset from start of one block to start of next block. nstride >= 0. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_symmetrize_batched *******************************************************************************/ extern "C" void magmablas_csymmetrize_tiles_q( magma_uplo_t uplo, magma_int_t m, magmaFloatComplex_ptr dA, magma_int_t ldda, magma_int_t ntile, magma_int_t mstride, magma_int_t nstride, magma_queue_t queue ) { magma_int_t info = 0; if ( uplo != MagmaLower && uplo != MagmaUpper ) info = -1; else if ( m < 0 ) info = -2; else if ( ldda < max(1,m + mstride*(ntile-1)) ) info = -5; else if ( ntile < 0 ) info = -6; else if ( mstride < 0 ) info = -7; else if ( nstride < 0 ) info = -8; else if ( mstride < m && nstride < m ) // only one must be >= m. info = -7; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return; } if ( m == 0 || ntile == 0 ) return; dim3 threads( NB, 1 ); dim3 grid( magma_ceildiv( m, NB ), ntile ); //printf( "m %d, grid %d x %d, threads %d\n", m, grid.x, grid.y, threads.x ); if ( uplo == MagmaUpper ) { csymmetrize_tiles_upper <<< grid, threads, 0, queue->cuda_stream() >>> ( m, dA, ldda, mstride, nstride ); } else { csymmetrize_tiles_lower <<< grid, threads, 0, queue->cuda_stream() >>> ( m, dA, ldda, mstride, nstride ); } }
d1aef48a56989394fd068b315fc3bd3ee3582c99.hip
// !!! This is a file automatically generated by hipify!!! /* * PAVLE - Parallel Variable-Length Encoder for CUDA. Main file. * * Copyright (C) 2009 Ana Balevic <ana.balevic@gmail.com> * All rights reserved. * * This program is free software; you can redistribute it and/or modify it under the terms of the * MIT License. Read the full licence: http://www.opensource.org/licenses/mit-license.php * * If you find this program useful, please contact me and reference PAVLE home page in your work. * */ #include "stdafx.h" #include <hip/hip_runtime.h> #include "cuda_helpers.h" #include "print_helpers.h" #include "comparison_helpers.h" #include "stats_logger.h" #include "load_data.h" #include <sys/time.h> //#include "vlc_kernel_gm32.cu" //#include "vlc_kernel_sm32.cu" #include "vlc_kernel_sm64huff.cu" //#include "vlc_kernel_dpt.cu" //#include "vlc_kernel_dptt.cu" //#include "scan_kernel.cu" #include "scan.hip" #include "pack_kernels.cu" #include "cpuencode.h" long long get_time() { struct timeval tv; gettimeofday(&tv, NULL); return (tv.tv_sec * 1000000) + tv.tv_usec; } void runVLCTest(char *file_name, uint num_block_threads, uint num_blocks=1); extern "C" void cpu_vlc_encode(unsigned int* indata, unsigned int num_elements, unsigned int* outdata, unsigned int *outsize, unsigned int *codewords, unsigned int* codewordlens); int main(int argc, char* argv[]){ if(!InitCUDA()) { return 0; } unsigned int num_block_threads = 256; if (argc > 1) for (int i=1; i<argc; i++) runVLCTest(argv[i], num_block_threads); else { runVLCTest(NULL, num_block_threads, 1024); } CUDA_SAFE_CALL(hipDeviceReset()); return 0; } void runVLCTest(char *file_name, uint num_block_threads, uint num_blocks) { printf("CUDA! Starting VLC Tests!\n"); unsigned int num_elements; //uint num_elements = num_blocks * num_block_threads; unsigned int mem_size; //uint mem_size = num_elements * sizeof(int); unsigned int symbol_type_size = sizeof(int); //////// LOAD DATA /////////////// double H; // entropy initParams(file_name, num_block_threads, num_blocks, num_elements, mem_size, symbol_type_size); printf("Parameters: num_elements: %d, num_blocks: %d, num_block_threads: %d\n----------------------------\n", num_elements, num_blocks, num_block_threads); ////////LOAD DATA /////////////// uint *sourceData = (uint*) malloc(mem_size); uint *destData = (uint*) malloc(mem_size); uint *crefData = (uint*) malloc(mem_size); uint *codewords = (uint*) malloc(NUM_SYMBOLS*symbol_type_size); uint *codewordlens = (uint*) malloc(NUM_SYMBOLS*symbol_type_size); uint *cw32 = (uint*) malloc(mem_size); uint *cw32len = (uint*) malloc(mem_size); uint *cw32idx = (uint*) malloc(mem_size); uint *cindex2= (uint*) malloc(num_blocks*sizeof(int)); memset(sourceData, 0, mem_size); memset(destData, 0, mem_size); memset(crefData, 0, mem_size); memset(cw32, 0, mem_size); memset(cw32len, 0, mem_size); memset(cw32idx, 0, mem_size); memset(codewords, 0, NUM_SYMBOLS*symbol_type_size); memset(codewordlens, 0, NUM_SYMBOLS*symbol_type_size); memset(cindex2, 0, num_blocks*sizeof(int)); //////// LOAD DATA /////////////// loadData(file_name, sourceData, codewords, codewordlens, num_elements, mem_size, H); //////// LOAD DATA /////////////// unsigned int *d_sourceData, *d_destData, *d_destDataPacked; unsigned int *d_codewords, *d_codewordlens; unsigned int *d_cw32, *d_cw32len, *d_cw32idx, *d_cindex, *d_cindex2; CUDA_SAFE_CALL(hipMalloc((void**) &d_sourceData, mem_size)); CUDA_SAFE_CALL(hipMalloc((void**) &d_destData, mem_size)); CUDA_SAFE_CALL(hipMalloc((void**) &d_destDataPacked, mem_size)); CUDA_SAFE_CALL(hipMalloc((void**) &d_codewords, NUM_SYMBOLS*symbol_type_size)); CUDA_SAFE_CALL(hipMalloc((void**) &d_codewordlens, NUM_SYMBOLS*symbol_type_size)); CUDA_SAFE_CALL(hipMalloc((void**) &d_cw32, mem_size)); CUDA_SAFE_CALL(hipMalloc((void**) &d_cw32len, mem_size)); CUDA_SAFE_CALL(hipMalloc((void**) &d_cw32idx, mem_size)); CUDA_SAFE_CALL(hipMalloc((void**)&d_cindex, num_blocks*sizeof(unsigned int))); CUDA_SAFE_CALL(hipMalloc((void**)&d_cindex2, num_blocks*sizeof(unsigned int))); CUDA_SAFE_CALL(hipMemcpy(d_sourceData, sourceData, mem_size, hipMemcpyHostToDevice)); CUDA_SAFE_CALL(hipMemcpy(d_codewords, codewords, NUM_SYMBOLS*symbol_type_size, hipMemcpyHostToDevice)); CUDA_SAFE_CALL(hipMemcpy(d_codewordlens, codewordlens, NUM_SYMBOLS*symbol_type_size, hipMemcpyHostToDevice)); CUDA_SAFE_CALL(hipMemcpy(d_destData, destData, mem_size, hipMemcpyHostToDevice)); dim3 grid_size(num_blocks,1,1); dim3 block_size(num_block_threads, 1, 1); unsigned int sm_size; unsigned int NT = 10; //number of runs for each execution time //////////////////* CPU ENCODER */////////////////////////////////// unsigned int refbytesize; long long timer = get_time(); cpu_vlc_encode((unsigned int*)sourceData, num_elements, (unsigned int*)crefData, &refbytesize, codewords, codewordlens); float msec = (float)((get_time() - timer)/1000.0); printf("CPU Encoding time (CPU): %f (ms)\n", msec); printf("CPU Encoded to %d [B]\n", refbytesize); unsigned int num_ints = refbytesize/4 + ((refbytesize%4 ==0)?0:1); //////////////////* END CPU */////////////////////////////////// //////////////////* SM64HUFF KERNEL */////////////////////////////////// grid_size.x = num_blocks; block_size.x = num_block_threads; sm_size = block_size.x*sizeof(unsigned int); #ifdef CACHECWLUT sm_size = 2*NUM_SYMBOLS*sizeof(int) + block_size.x*sizeof(unsigned int); #endif hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord( start, 0 ); for (int i=0; i<NT; i++) { hipLaunchKernelGGL(( vlc_encode_kernel_sm64huff), dim3(grid_size), dim3(block_size), sm_size, 0, d_sourceData, d_codewords, d_codewordlens, #ifdef TESTING d_cw32, d_cw32len, d_cw32idx, #endif d_destData, d_cindex); //testedOK2 } hipDeviceSynchronize(); hipEventRecord( stop, 0 ) ; hipEventSynchronize( stop ) ; float elapsedTime; hipEventElapsedTime( &elapsedTime, start, stop ) ; CUT_CHECK_ERROR("Kernel execution failed\n"); printf("GPU Encoding time (SM64HUFF): %f (ms)\n", elapsedTime/NT); //////////////////* END KERNEL */////////////////////////////////// #ifdef TESTING unsigned int num_scan_elements = grid_size.x; preallocBlockSums(num_scan_elements); hipMemset(d_destDataPacked, 0, mem_size); printf("Num_blocks to be passed to scan is %d.\n", num_scan_elements); prescanArray(d_cindex2, d_cindex, num_scan_elements); hipLaunchKernelGGL(( pack2), dim3(num_scan_elements/16), dim3(16), 0, 0, (unsigned int*)d_destData, d_cindex, d_cindex2, (unsigned int*)d_destDataPacked, num_elements/num_scan_elements); CUT_CHECK_ERROR("Pack2 Kernel execution failed\n"); deallocBlockSums(); CUDA_SAFE_CALL(hipMemcpy(destData, d_destDataPacked, mem_size, hipMemcpyDeviceToHost)); compare_vectors((unsigned int*)crefData, (unsigned int*)destData, num_ints); #endif free(sourceData); free(destData); free(codewords); free(codewordlens); free(cw32); free(cw32len); free(crefData); CUDA_SAFE_CALL(hipFree(d_sourceData)); CUDA_SAFE_CALL(hipFree(d_destData)); CUDA_SAFE_CALL(hipFree(d_destDataPacked)); CUDA_SAFE_CALL(hipFree(d_codewords)); CUDA_SAFE_CALL(hipFree(d_codewordlens)); CUDA_SAFE_CALL(hipFree(d_cw32)); CUDA_SAFE_CALL(hipFree(d_cw32len)); CUDA_SAFE_CALL(hipFree(d_cw32idx)); CUDA_SAFE_CALL(hipFree(d_cindex)); CUDA_SAFE_CALL(hipFree(d_cindex2)); free(cindex2); }
d1aef48a56989394fd068b315fc3bd3ee3582c99.cu
/* * PAVLE - Parallel Variable-Length Encoder for CUDA. Main file. * * Copyright (C) 2009 Ana Balevic <ana.balevic@gmail.com> * All rights reserved. * * This program is free software; you can redistribute it and/or modify it under the terms of the * MIT License. Read the full licence: http://www.opensource.org/licenses/mit-license.php * * If you find this program useful, please contact me and reference PAVLE home page in your work. * */ #include "stdafx.h" #include <cuda_runtime.h> #include "cuda_helpers.h" #include "print_helpers.h" #include "comparison_helpers.h" #include "stats_logger.h" #include "load_data.h" #include <sys/time.h> //#include "vlc_kernel_gm32.cu" //#include "vlc_kernel_sm32.cu" #include "vlc_kernel_sm64huff.cu" //#include "vlc_kernel_dpt.cu" //#include "vlc_kernel_dptt.cu" //#include "scan_kernel.cu" #include "scan.cu" #include "pack_kernels.cu" #include "cpuencode.h" long long get_time() { struct timeval tv; gettimeofday(&tv, NULL); return (tv.tv_sec * 1000000) + tv.tv_usec; } void runVLCTest(char *file_name, uint num_block_threads, uint num_blocks=1); extern "C" void cpu_vlc_encode(unsigned int* indata, unsigned int num_elements, unsigned int* outdata, unsigned int *outsize, unsigned int *codewords, unsigned int* codewordlens); int main(int argc, char* argv[]){ if(!InitCUDA()) { return 0; } unsigned int num_block_threads = 256; if (argc > 1) for (int i=1; i<argc; i++) runVLCTest(argv[i], num_block_threads); else { runVLCTest(NULL, num_block_threads, 1024); } CUDA_SAFE_CALL(cudaThreadExit()); return 0; } void runVLCTest(char *file_name, uint num_block_threads, uint num_blocks) { printf("CUDA! Starting VLC Tests!\n"); unsigned int num_elements; //uint num_elements = num_blocks * num_block_threads; unsigned int mem_size; //uint mem_size = num_elements * sizeof(int); unsigned int symbol_type_size = sizeof(int); //////// LOAD DATA /////////////// double H; // entropy initParams(file_name, num_block_threads, num_blocks, num_elements, mem_size, symbol_type_size); printf("Parameters: num_elements: %d, num_blocks: %d, num_block_threads: %d\n----------------------------\n", num_elements, num_blocks, num_block_threads); ////////LOAD DATA /////////////// uint *sourceData = (uint*) malloc(mem_size); uint *destData = (uint*) malloc(mem_size); uint *crefData = (uint*) malloc(mem_size); uint *codewords = (uint*) malloc(NUM_SYMBOLS*symbol_type_size); uint *codewordlens = (uint*) malloc(NUM_SYMBOLS*symbol_type_size); uint *cw32 = (uint*) malloc(mem_size); uint *cw32len = (uint*) malloc(mem_size); uint *cw32idx = (uint*) malloc(mem_size); uint *cindex2= (uint*) malloc(num_blocks*sizeof(int)); memset(sourceData, 0, mem_size); memset(destData, 0, mem_size); memset(crefData, 0, mem_size); memset(cw32, 0, mem_size); memset(cw32len, 0, mem_size); memset(cw32idx, 0, mem_size); memset(codewords, 0, NUM_SYMBOLS*symbol_type_size); memset(codewordlens, 0, NUM_SYMBOLS*symbol_type_size); memset(cindex2, 0, num_blocks*sizeof(int)); //////// LOAD DATA /////////////// loadData(file_name, sourceData, codewords, codewordlens, num_elements, mem_size, H); //////// LOAD DATA /////////////// unsigned int *d_sourceData, *d_destData, *d_destDataPacked; unsigned int *d_codewords, *d_codewordlens; unsigned int *d_cw32, *d_cw32len, *d_cw32idx, *d_cindex, *d_cindex2; CUDA_SAFE_CALL(cudaMalloc((void**) &d_sourceData, mem_size)); CUDA_SAFE_CALL(cudaMalloc((void**) &d_destData, mem_size)); CUDA_SAFE_CALL(cudaMalloc((void**) &d_destDataPacked, mem_size)); CUDA_SAFE_CALL(cudaMalloc((void**) &d_codewords, NUM_SYMBOLS*symbol_type_size)); CUDA_SAFE_CALL(cudaMalloc((void**) &d_codewordlens, NUM_SYMBOLS*symbol_type_size)); CUDA_SAFE_CALL(cudaMalloc((void**) &d_cw32, mem_size)); CUDA_SAFE_CALL(cudaMalloc((void**) &d_cw32len, mem_size)); CUDA_SAFE_CALL(cudaMalloc((void**) &d_cw32idx, mem_size)); CUDA_SAFE_CALL(cudaMalloc((void**)&d_cindex, num_blocks*sizeof(unsigned int))); CUDA_SAFE_CALL(cudaMalloc((void**)&d_cindex2, num_blocks*sizeof(unsigned int))); CUDA_SAFE_CALL(cudaMemcpy(d_sourceData, sourceData, mem_size, cudaMemcpyHostToDevice)); CUDA_SAFE_CALL(cudaMemcpy(d_codewords, codewords, NUM_SYMBOLS*symbol_type_size, cudaMemcpyHostToDevice)); CUDA_SAFE_CALL(cudaMemcpy(d_codewordlens, codewordlens, NUM_SYMBOLS*symbol_type_size, cudaMemcpyHostToDevice)); CUDA_SAFE_CALL(cudaMemcpy(d_destData, destData, mem_size, cudaMemcpyHostToDevice)); dim3 grid_size(num_blocks,1,1); dim3 block_size(num_block_threads, 1, 1); unsigned int sm_size; unsigned int NT = 10; //number of runs for each execution time //////////////////* CPU ENCODER */////////////////////////////////// unsigned int refbytesize; long long timer = get_time(); cpu_vlc_encode((unsigned int*)sourceData, num_elements, (unsigned int*)crefData, &refbytesize, codewords, codewordlens); float msec = (float)((get_time() - timer)/1000.0); printf("CPU Encoding time (CPU): %f (ms)\n", msec); printf("CPU Encoded to %d [B]\n", refbytesize); unsigned int num_ints = refbytesize/4 + ((refbytesize%4 ==0)?0:1); //////////////////* END CPU */////////////////////////////////// //////////////////* SM64HUFF KERNEL */////////////////////////////////// grid_size.x = num_blocks; block_size.x = num_block_threads; sm_size = block_size.x*sizeof(unsigned int); #ifdef CACHECWLUT sm_size = 2*NUM_SYMBOLS*sizeof(int) + block_size.x*sizeof(unsigned int); #endif cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord( start, 0 ); for (int i=0; i<NT; i++) { vlc_encode_kernel_sm64huff<<<grid_size, block_size, sm_size>>>(d_sourceData, d_codewords, d_codewordlens, #ifdef TESTING d_cw32, d_cw32len, d_cw32idx, #endif d_destData, d_cindex); //testedOK2 } cudaThreadSynchronize(); cudaEventRecord( stop, 0 ) ; cudaEventSynchronize( stop ) ; float elapsedTime; cudaEventElapsedTime( &elapsedTime, start, stop ) ; CUT_CHECK_ERROR("Kernel execution failed\n"); printf("GPU Encoding time (SM64HUFF): %f (ms)\n", elapsedTime/NT); //////////////////* END KERNEL */////////////////////////////////// #ifdef TESTING unsigned int num_scan_elements = grid_size.x; preallocBlockSums(num_scan_elements); cudaMemset(d_destDataPacked, 0, mem_size); printf("Num_blocks to be passed to scan is %d.\n", num_scan_elements); prescanArray(d_cindex2, d_cindex, num_scan_elements); pack2<<< num_scan_elements/16, 16>>>((unsigned int*)d_destData, d_cindex, d_cindex2, (unsigned int*)d_destDataPacked, num_elements/num_scan_elements); CUT_CHECK_ERROR("Pack2 Kernel execution failed\n"); deallocBlockSums(); CUDA_SAFE_CALL(cudaMemcpy(destData, d_destDataPacked, mem_size, cudaMemcpyDeviceToHost)); compare_vectors((unsigned int*)crefData, (unsigned int*)destData, num_ints); #endif free(sourceData); free(destData); free(codewords); free(codewordlens); free(cw32); free(cw32len); free(crefData); CUDA_SAFE_CALL(cudaFree(d_sourceData)); CUDA_SAFE_CALL(cudaFree(d_destData)); CUDA_SAFE_CALL(cudaFree(d_destDataPacked)); CUDA_SAFE_CALL(cudaFree(d_codewords)); CUDA_SAFE_CALL(cudaFree(d_codewordlens)); CUDA_SAFE_CALL(cudaFree(d_cw32)); CUDA_SAFE_CALL(cudaFree(d_cw32len)); CUDA_SAFE_CALL(cudaFree(d_cw32idx)); CUDA_SAFE_CALL(cudaFree(d_cindex)); CUDA_SAFE_CALL(cudaFree(d_cindex2)); free(cindex2); }
a13341149bb0d34300a28d999b382d701d7ff2f3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <time.h> // We assume that NUM_ELEMENTS is divisible by BLOCK_SIZE #define RADIUS 3 #define BLOCK_SIZE 256 //#define NUM_ELEMENTS (4096*2)*20 #define NUM_ELEMENTS 1e7 // CUDA API error checking macro static void handleError(hipError_t err, const char *file, int line ) { if (err != hipSuccess) { printf("%s in %s at line %d\n", hipGetErrorString(err), file, line ); exit(EXIT_FAILURE); } } #define cudaCheck( err ) (handleError(err, __FILE__, __LINE__ )) __global__ void stencil_1d(int *in, int *out) { // blockDim is 3-dimensional vector storing block grid dimensions // index of a thread across all threads + RADIUS int gindex = threadIdx.x + (blockIdx.x * blockDim.x) + RADIUS; int result = 0; for (int offset = -RADIUS ; offset <= RADIUS ; offset++) result += in[gindex + offset]; // Store the result out[gindex - RADIUS] = result; } int main() { unsigned int i; // vectors stored in the CPU memory - can be used from host code only // int h_in[NUM_ELEMENTS + 2 * RADIUS], h_out[NUM_ELEMENTS]; //size_t size = sizeof(int) * (NUM_ELEMENTS + 2 * RADIUS ); int *h_in = (int*)malloc( sizeof(int) * (NUM_ELEMENTS + 2 * RADIUS ) ); int *h_out = (int*)malloc( sizeof(int) * NUM_ELEMENTS ); // vectors that will be stored in the device memory - can be dereferenced // only in kernel code int *d_in, *d_out; // Initialize host data for( i = 0; i < (NUM_ELEMENTS + 2*RADIUS); ++i ) h_in[i] = 1; // With a value of 1 and RADIUS of 3, all output values should be 7 // Allocate space on the device // hipMalloc is equivalent of malloc cudaCheck( hipMalloc( &d_in, (NUM_ELEMENTS + 2*RADIUS) * sizeof(int)) ); cudaCheck( hipMalloc( &d_out, NUM_ELEMENTS * sizeof(int)) ); // Copy input data to device cudaCheck( hipMemcpy( d_in, h_in, (NUM_ELEMENTS + 2*RADIUS) * sizeof(int), hipMemcpyHostToDevice) ); // Call kernels clock_t start = clock(); hipLaunchKernelGGL(( stencil_1d), dim3((NUM_ELEMENTS + BLOCK_SIZE - 1)/BLOCK_SIZE), dim3(BLOCK_SIZE) , 0, 0, d_in, d_out); printf("CZAS: %d", clock() - start); // Check errors from launching the kernel cudaCheck(hipPeekAtLastError()); // Copy results from device memory to host cudaCheck( hipMemcpy( h_out, d_out, NUM_ELEMENTS * sizeof(int), hipMemcpyDeviceToHost) ); // Verify every out value is 7 for( i = 0; i < NUM_ELEMENTS; ++i ) if (h_out[i] != 7) { printf("Element h_out[%d] == %d != 7\n", i, h_out[i]); break; } if (i == NUM_ELEMENTS) printf("SUCCESS!\n"); free(h_in); free(h_out); // Free out memory hipFree(d_in); hipFree(d_out); return 0; }
a13341149bb0d34300a28d999b382d701d7ff2f3.cu
#include <stdio.h> #include <time.h> // We assume that NUM_ELEMENTS is divisible by BLOCK_SIZE #define RADIUS 3 #define BLOCK_SIZE 256 //#define NUM_ELEMENTS (4096*2)*20 #define NUM_ELEMENTS 1e7 // CUDA API error checking macro static void handleError(cudaError_t err, const char *file, int line ) { if (err != cudaSuccess) { printf("%s in %s at line %d\n", cudaGetErrorString(err), file, line ); exit(EXIT_FAILURE); } } #define cudaCheck( err ) (handleError(err, __FILE__, __LINE__ )) __global__ void stencil_1d(int *in, int *out) { // blockDim is 3-dimensional vector storing block grid dimensions // index of a thread across all threads + RADIUS int gindex = threadIdx.x + (blockIdx.x * blockDim.x) + RADIUS; int result = 0; for (int offset = -RADIUS ; offset <= RADIUS ; offset++) result += in[gindex + offset]; // Store the result out[gindex - RADIUS] = result; } int main() { unsigned int i; // vectors stored in the CPU memory - can be used from host code only // int h_in[NUM_ELEMENTS + 2 * RADIUS], h_out[NUM_ELEMENTS]; //size_t size = sizeof(int) * (NUM_ELEMENTS + 2 * RADIUS ); int *h_in = (int*)malloc( sizeof(int) * (NUM_ELEMENTS + 2 * RADIUS ) ); int *h_out = (int*)malloc( sizeof(int) * NUM_ELEMENTS ); // vectors that will be stored in the device memory - can be dereferenced // only in kernel code int *d_in, *d_out; // Initialize host data for( i = 0; i < (NUM_ELEMENTS + 2*RADIUS); ++i ) h_in[i] = 1; // With a value of 1 and RADIUS of 3, all output values should be 7 // Allocate space on the device // cudaMalloc is equivalent of malloc cudaCheck( cudaMalloc( &d_in, (NUM_ELEMENTS + 2*RADIUS) * sizeof(int)) ); cudaCheck( cudaMalloc( &d_out, NUM_ELEMENTS * sizeof(int)) ); // Copy input data to device cudaCheck( cudaMemcpy( d_in, h_in, (NUM_ELEMENTS + 2*RADIUS) * sizeof(int), cudaMemcpyHostToDevice) ); // Call kernels clock_t start = clock(); stencil_1d<<< (NUM_ELEMENTS + BLOCK_SIZE - 1)/BLOCK_SIZE, BLOCK_SIZE >>> (d_in, d_out); printf("CZAS: %d", clock() - start); // Check errors from launching the kernel cudaCheck(cudaPeekAtLastError()); // Copy results from device memory to host cudaCheck( cudaMemcpy( h_out, d_out, NUM_ELEMENTS * sizeof(int), cudaMemcpyDeviceToHost) ); // Verify every out value is 7 for( i = 0; i < NUM_ELEMENTS; ++i ) if (h_out[i] != 7) { printf("Element h_out[%d] == %d != 7\n", i, h_out[i]); break; } if (i == NUM_ELEMENTS) printf("SUCCESS!\n"); free(h_in); free(h_out); // Free out memory cudaFree(d_in); cudaFree(d_out); return 0; }
8c68474d4e61f74fb4fd174141dfa627b1756782.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // mxnet-roi-align.cpp // // // Created by xinxueshi on 2017/12/22. // /*! * Copyright (c) 2017 by Contributors * \file roi_align.cu * \brief roi align operator * \author Yuchen Guo, Zehao Shi */ #include "./roi_align-inl.h" #include <mshadow/tensor.h> #include <mshadow/cuda/reduce.cuh> #include <algorithm> #include <vector> namespace mshadow { namespace cuda { template<typename Dtype> __global__ void ROIAlignForwardKernel(const int count, const Dtype* bottom_data, const float spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const Dtype* bottom_rois, Dtype* top_data, Dtype* argmax_x, Dtype* argmax_y) { for (int index = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; index < count; index += blockDim.x * gridDim.x * gridDim.y) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; bottom_rois += n * 5; int roi_batch_ind = bottom_rois[0]; if (roi_batch_ind < 0) { top_data[index] = 0; argmax_x[index] = 0; argmax_y[index] = 0; continue; } Dtype roi_start_w = (bottom_rois[1]) * spatial_scale; Dtype roi_start_h = (bottom_rois[2]) * spatial_scale; Dtype roi_end_w = (bottom_rois[3]) * spatial_scale; Dtype roi_end_h = (bottom_rois[4]) * spatial_scale; // Force malformed ROIs to be 1x1 Dtype roi_width = max(roi_end_w - roi_start_w, static_cast<Dtype>(1)); Dtype roi_height = max(roi_end_h - roi_start_h, static_cast<Dtype>(1)); Dtype bin_size_h = static_cast<Dtype>(roi_height) / static_cast<Dtype>(pooled_height); Dtype bin_size_w = static_cast<Dtype>(roi_width) / static_cast<Dtype>(pooled_width); Dtype hstart = static_cast<Dtype>((ph) * bin_size_h); Dtype wstart = static_cast<Dtype>((pw) * bin_size_w); Dtype hend = static_cast<Dtype>((ph + 1) * bin_size_h); Dtype wend = static_cast<Dtype>((pw + 1) * bin_size_w); // Add roi offsets and clip to input boundaries hstart = min(max(hstart + roi_start_h, static_cast<Dtype>(0)), static_cast<Dtype>(height)); hend = min(max(hend + roi_start_h, static_cast<Dtype>(0)), static_cast<Dtype>(height)); wstart = min(max(wstart + roi_start_w, static_cast<Dtype>(0)), static_cast<Dtype>(width)); wend = min(max(wend + roi_start_w, static_cast<Dtype>(0)), static_cast<Dtype>(width)); bool is_empty = (hend <= hstart) || (wend <= wstart); // Define an empty pooling region to be zero Dtype maxval = is_empty ? 0 : -FLT_MAX; // If nothing is pooled, argmax = -1 causes nothing to be backprop'd Dtype maxidx_x = -1; Dtype maxidx_y = -1; bottom_data += (roi_batch_ind * channels + c) * height * width; Dtype h_stride = (hend - hstart)/3.0; Dtype w_stride = (wend - wstart)/3.0; for (Dtype h = hstart+h_stride; h <= hend-h_stride+0.01; h += max(h_stride, 0.01)) { for (Dtype w = wstart+w_stride; w <= wend-w_stride+0.01; w += max(w_stride, 0.01)) { int hlow = min(max(static_cast<int>(floor(h)), 0), height-1); int hhigh = min(max(static_cast<int>(ceil(h)), 0), height-1); int wleft = min(max(static_cast<int>(floor(w)), 0), width-1); int wright = min(max(static_cast<int>(ceil(w)), 0), width-1); int topleft = hlow * width + wleft; int topright = hlow * width + wright; int bottomleft = hhigh * width + wleft; int bottomright = hhigh * width + wright; Dtype alpha = (hlow == hhigh) ? static_cast<Dtype>(0.5) : (h - hlow) / (hhigh - hlow); Dtype beta = (wleft == wright) ? static_cast<Dtype>(0.5) : (w - wleft) / (wright - wleft); Dtype value = (1 - alpha) * (1 - beta) * bottom_data[topleft] + alpha * (1 - beta) * bottom_data[bottomleft] + (1 - alpha) * beta * bottom_data[topright] + alpha * beta * bottom_data[bottomright]; if (value > maxval) { maxval = value; maxidx_x = w; maxidx_y = h; } } } top_data[index] = maxval; argmax_x[index] = (Dtype)maxidx_x; argmax_y[index] = (Dtype)maxidx_y; } } template<typename Dtype> inline void ROIAlignForward(const Tensor<gpu, 4, Dtype> &out, const Tensor<gpu, 4, Dtype> &data, const Tensor<gpu, 2, Dtype> &bbox, const Tensor<gpu, 4, Dtype> &max_idx_x, const Tensor<gpu, 4, Dtype> &max_idx_y, const float spatial_scale) { const Dtype *bottom_data = data.dptr_; const Dtype *bottom_rois = bbox.dptr_; Dtype *top_data = out.dptr_; Dtype *argmax_x = max_idx_x.dptr_; Dtype *argmax_y = max_idx_y.dptr_; const int count = out.shape_.Size(); const int channels = data.size(1); const int height = data.size(2); const int width = data.size(3); const int pooled_height = out.size(2); const int pooled_width = out.size(3); const int gridSize = (count + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock; dim3 dimGrid(kMaxGridNum, (gridSize + kMaxGridNum - 1) / kMaxGridNum); dim3 dimBlock(kMaxThreadsPerBlock); CheckLaunchParam(dimGrid, dimBlock, "ROIPooling Forward"); hipStream_t stream = Stream<gpu>::GetStream(out.stream_); hipLaunchKernelGGL(( ROIAlignForwardKernel<Dtype>), dim3(dimGrid), dim3(dimBlock), 0, stream, count, bottom_data, spatial_scale, channels, height, width, pooled_height, pooled_width, bottom_rois, top_data, argmax_x, argmax_y); } template<typename Dtype> __global__ void ROIAlignBackwardAccKernel(const int count, const Dtype* top_diff, const Dtype* argmax_x, const Dtype* argmax_y, const int num_rois, const float spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, Dtype* bottom_diff, const Dtype* bottom_rois) { for (int index = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; index < count; index += blockDim.x * gridDim.x * gridDim.y) { // (n, c, h, w) coords in bottom data int w = index % width; int h = (index / width) % height; int c = (index / width / height) % channels; int n = index / width / height / channels; Dtype gradient = 0; // Accumulate gradient over all ROIs that pooled this element for (int roi_n = 0; roi_n < num_rois; ++roi_n) { const Dtype* offset_bottom_rois = bottom_rois + roi_n * 5; int roi_batch_ind = offset_bottom_rois[0]; // Skip if ROI's batch index doesn't match n if (n != roi_batch_ind) { continue; } Dtype roi_start_w = (offset_bottom_rois[1]) * spatial_scale; Dtype roi_start_h = (offset_bottom_rois[2]) * spatial_scale; Dtype roi_end_w = (offset_bottom_rois[3]) * spatial_scale; Dtype roi_end_h = (offset_bottom_rois[4]) * spatial_scale; // Skip if ROI doesn't include (h, w) const bool in_roi = (w > roi_start_w - 1.0 && w < roi_end_w + 1.0 && h > roi_start_h - 1.0 && h < roi_end_h + 1.0); if (!in_roi) { continue; } int offset = (roi_n * channels + c) * pooled_height * pooled_width; const Dtype* offset_top_diff = top_diff + offset; const Dtype* offset_argmax_x = argmax_x + offset; const Dtype* offset_argmax_y = argmax_y + offset; // Force malformed ROIs to be 1x1 Dtype roi_width = max(roi_end_w - roi_start_w, static_cast<Dtype>(1)); Dtype roi_height = max(roi_end_h - roi_start_h, static_cast<Dtype>(1)); for (int ph = 0; ph < pooled_height; ++ph) { for (int pw = 0; pw < pooled_width; ++pw) { const int pool_index = ph * pooled_width + pw; Dtype a_x = offset_argmax_x[pool_index]; Dtype a_y = offset_argmax_y[pool_index]; int hlow = min(max(static_cast<int>(floor(a_y)), 0), height-1); int hhigh = min(max(static_cast<int>(ceil(a_y)), 0), height-1); int wleft = min(max(static_cast<int>(floor(a_x)), 0), width-1); int wright = min(max(static_cast<int>(ceil(a_x)), 0), width-1); if (h != hlow && h != hhigh && w != wleft && w != wright) // (w, h) is not around (a_x, a_y) continue; Dtype alpha = (hlow == hhigh) ? static_cast<Dtype>(0.5) : (a_y - hlow) / (hhigh - hlow); Dtype beta = (wleft == wright) ? static_cast<Dtype>(0.5) : (a_x - wleft) / (wright - wleft); if (h == hlow && w == wleft) gradient += offset_top_diff[pool_index] * (1 - alpha) * (1 - beta); else if (h == hlow && w == wright) gradient += offset_top_diff[pool_index] * (1 - alpha) * beta; else if (h == hhigh && w == wleft) gradient += offset_top_diff[pool_index] * alpha * (1 - beta); else if (h == hhigh && w == wright) gradient += offset_top_diff[pool_index] * alpha * beta; } } } bottom_diff[index] += gradient; } } template<typename Dtype> inline void ROIAlignBackwardAcc(const Tensor<gpu, 4, Dtype> &in_grad, const Tensor<gpu, 4, Dtype> &out_grad, const Tensor<gpu, 2, Dtype> &bbox, const Tensor<gpu, 4, Dtype> &max_idx_x, const Tensor<gpu, 4, Dtype> &max_idx_y, const float spatial_scale) { const Dtype *top_diff = out_grad.dptr_; const Dtype *bottom_rois = bbox.dptr_; Dtype *bottom_diff = in_grad.dptr_; Dtype *argmax_x = max_idx_x.dptr_; Dtype *argmax_y = max_idx_y.dptr_; const int count = in_grad.shape_.Size(); const int num_rois = bbox.size(0); const int channels = in_grad.size(1); const int height = in_grad.size(2); const int width = in_grad.size(3); const int pooled_height = out_grad.size(2); const int pooled_width = out_grad.size(3); const int gridSize = (count + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock; dim3 dimGrid(kMaxGridNum, (gridSize + kMaxGridNum - 1) / kMaxGridNum); dim3 dimBlock(kMaxThreadsPerBlock); CheckLaunchParam(dimGrid, dimBlock, "ROIPooling Backward"); hipStream_t stream = Stream<gpu>::GetStream(in_grad.stream_); hipLaunchKernelGGL(( ROIAlignBackwardAccKernel<Dtype>), dim3(dimGrid), dim3(dimBlock), 0, stream, count, top_diff, argmax_x, argmax_y, num_rois, spatial_scale, channels, height, width, pooled_height, pooled_width, bottom_diff, bottom_rois); } } // namespace cuda template<typename Dtype> inline void ROIAlignForward(const Tensor<gpu, 4, Dtype> &out, const Tensor<gpu, 4, Dtype> &data, const Tensor<gpu, 2, Dtype> &bbox, const Tensor<gpu, 4, Dtype> &max_idx_x, const Tensor<gpu, 4, Dtype> &max_idx_y, const float spatial_scale) { cuda::ROIAlignForward(out, data, bbox, max_idx_x, max_idx_y, spatial_scale); } template<typename Dtype> inline void ROIAlignBackwardAcc(const Tensor<gpu, 4, Dtype> &in_grad, const Tensor<gpu, 4, Dtype> &out_grad, const Tensor<gpu, 2, Dtype> &bbox, const Tensor<gpu, 4, Dtype> &max_idx_x, const Tensor<gpu, 4, Dtype> &max_idx_y, const float spatial_scale) { cuda::ROIAlignBackwardAcc(in_grad, out_grad, bbox, max_idx_x, max_idx_y, spatial_scale); } } // namespace mshadow namespace mxnet { namespace op { template<> Operator* CreateOp<gpu>(ROIAlignParam param, int dtype) { Operator* op = NULL; MSHADOW_REAL_TYPE_SWITCH(dtype, DType, { op = new ROIAlignOp<gpu, DType>(param); }); return op; } } // namespace op } // namespace mxnet
8c68474d4e61f74fb4fd174141dfa627b1756782.cu
// // mxnet-roi-align.cpp // // // Created by xinxueshi on 2017/12/22. // /*! * Copyright (c) 2017 by Contributors * \file roi_align.cu * \brief roi align operator * \author Yuchen Guo, Zehao Shi */ #include "./roi_align-inl.h" #include <mshadow/tensor.h> #include <mshadow/cuda/reduce.cuh> #include <algorithm> #include <vector> namespace mshadow { namespace cuda { template<typename Dtype> __global__ void ROIAlignForwardKernel(const int count, const Dtype* bottom_data, const float spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const Dtype* bottom_rois, Dtype* top_data, Dtype* argmax_x, Dtype* argmax_y) { for (int index = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; index < count; index += blockDim.x * gridDim.x * gridDim.y) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; bottom_rois += n * 5; int roi_batch_ind = bottom_rois[0]; if (roi_batch_ind < 0) { top_data[index] = 0; argmax_x[index] = 0; argmax_y[index] = 0; continue; } Dtype roi_start_w = (bottom_rois[1]) * spatial_scale; Dtype roi_start_h = (bottom_rois[2]) * spatial_scale; Dtype roi_end_w = (bottom_rois[3]) * spatial_scale; Dtype roi_end_h = (bottom_rois[4]) * spatial_scale; // Force malformed ROIs to be 1x1 Dtype roi_width = max(roi_end_w - roi_start_w, static_cast<Dtype>(1)); Dtype roi_height = max(roi_end_h - roi_start_h, static_cast<Dtype>(1)); Dtype bin_size_h = static_cast<Dtype>(roi_height) / static_cast<Dtype>(pooled_height); Dtype bin_size_w = static_cast<Dtype>(roi_width) / static_cast<Dtype>(pooled_width); Dtype hstart = static_cast<Dtype>((ph) * bin_size_h); Dtype wstart = static_cast<Dtype>((pw) * bin_size_w); Dtype hend = static_cast<Dtype>((ph + 1) * bin_size_h); Dtype wend = static_cast<Dtype>((pw + 1) * bin_size_w); // Add roi offsets and clip to input boundaries hstart = min(max(hstart + roi_start_h, static_cast<Dtype>(0)), static_cast<Dtype>(height)); hend = min(max(hend + roi_start_h, static_cast<Dtype>(0)), static_cast<Dtype>(height)); wstart = min(max(wstart + roi_start_w, static_cast<Dtype>(0)), static_cast<Dtype>(width)); wend = min(max(wend + roi_start_w, static_cast<Dtype>(0)), static_cast<Dtype>(width)); bool is_empty = (hend <= hstart) || (wend <= wstart); // Define an empty pooling region to be zero Dtype maxval = is_empty ? 0 : -FLT_MAX; // If nothing is pooled, argmax = -1 causes nothing to be backprop'd Dtype maxidx_x = -1; Dtype maxidx_y = -1; bottom_data += (roi_batch_ind * channels + c) * height * width; Dtype h_stride = (hend - hstart)/3.0; Dtype w_stride = (wend - wstart)/3.0; for (Dtype h = hstart+h_stride; h <= hend-h_stride+0.01; h += max(h_stride, 0.01)) { for (Dtype w = wstart+w_stride; w <= wend-w_stride+0.01; w += max(w_stride, 0.01)) { int hlow = min(max(static_cast<int>(floor(h)), 0), height-1); int hhigh = min(max(static_cast<int>(ceil(h)), 0), height-1); int wleft = min(max(static_cast<int>(floor(w)), 0), width-1); int wright = min(max(static_cast<int>(ceil(w)), 0), width-1); int topleft = hlow * width + wleft; int topright = hlow * width + wright; int bottomleft = hhigh * width + wleft; int bottomright = hhigh * width + wright; Dtype alpha = (hlow == hhigh) ? static_cast<Dtype>(0.5) : (h - hlow) / (hhigh - hlow); Dtype beta = (wleft == wright) ? static_cast<Dtype>(0.5) : (w - wleft) / (wright - wleft); Dtype value = (1 - alpha) * (1 - beta) * bottom_data[topleft] + alpha * (1 - beta) * bottom_data[bottomleft] + (1 - alpha) * beta * bottom_data[topright] + alpha * beta * bottom_data[bottomright]; if (value > maxval) { maxval = value; maxidx_x = w; maxidx_y = h; } } } top_data[index] = maxval; argmax_x[index] = (Dtype)maxidx_x; argmax_y[index] = (Dtype)maxidx_y; } } template<typename Dtype> inline void ROIAlignForward(const Tensor<gpu, 4, Dtype> &out, const Tensor<gpu, 4, Dtype> &data, const Tensor<gpu, 2, Dtype> &bbox, const Tensor<gpu, 4, Dtype> &max_idx_x, const Tensor<gpu, 4, Dtype> &max_idx_y, const float spatial_scale) { const Dtype *bottom_data = data.dptr_; const Dtype *bottom_rois = bbox.dptr_; Dtype *top_data = out.dptr_; Dtype *argmax_x = max_idx_x.dptr_; Dtype *argmax_y = max_idx_y.dptr_; const int count = out.shape_.Size(); const int channels = data.size(1); const int height = data.size(2); const int width = data.size(3); const int pooled_height = out.size(2); const int pooled_width = out.size(3); const int gridSize = (count + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock; dim3 dimGrid(kMaxGridNum, (gridSize + kMaxGridNum - 1) / kMaxGridNum); dim3 dimBlock(kMaxThreadsPerBlock); CheckLaunchParam(dimGrid, dimBlock, "ROIPooling Forward"); cudaStream_t stream = Stream<gpu>::GetStream(out.stream_); ROIAlignForwardKernel<Dtype><<<dimGrid, dimBlock, 0, stream>>>( count, bottom_data, spatial_scale, channels, height, width, pooled_height, pooled_width, bottom_rois, top_data, argmax_x, argmax_y); } template<typename Dtype> __global__ void ROIAlignBackwardAccKernel(const int count, const Dtype* top_diff, const Dtype* argmax_x, const Dtype* argmax_y, const int num_rois, const float spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, Dtype* bottom_diff, const Dtype* bottom_rois) { for (int index = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; index < count; index += blockDim.x * gridDim.x * gridDim.y) { // (n, c, h, w) coords in bottom data int w = index % width; int h = (index / width) % height; int c = (index / width / height) % channels; int n = index / width / height / channels; Dtype gradient = 0; // Accumulate gradient over all ROIs that pooled this element for (int roi_n = 0; roi_n < num_rois; ++roi_n) { const Dtype* offset_bottom_rois = bottom_rois + roi_n * 5; int roi_batch_ind = offset_bottom_rois[0]; // Skip if ROI's batch index doesn't match n if (n != roi_batch_ind) { continue; } Dtype roi_start_w = (offset_bottom_rois[1]) * spatial_scale; Dtype roi_start_h = (offset_bottom_rois[2]) * spatial_scale; Dtype roi_end_w = (offset_bottom_rois[3]) * spatial_scale; Dtype roi_end_h = (offset_bottom_rois[4]) * spatial_scale; // Skip if ROI doesn't include (h, w) const bool in_roi = (w > roi_start_w - 1.0 && w < roi_end_w + 1.0 && h > roi_start_h - 1.0 && h < roi_end_h + 1.0); if (!in_roi) { continue; } int offset = (roi_n * channels + c) * pooled_height * pooled_width; const Dtype* offset_top_diff = top_diff + offset; const Dtype* offset_argmax_x = argmax_x + offset; const Dtype* offset_argmax_y = argmax_y + offset; // Force malformed ROIs to be 1x1 Dtype roi_width = max(roi_end_w - roi_start_w, static_cast<Dtype>(1)); Dtype roi_height = max(roi_end_h - roi_start_h, static_cast<Dtype>(1)); for (int ph = 0; ph < pooled_height; ++ph) { for (int pw = 0; pw < pooled_width; ++pw) { const int pool_index = ph * pooled_width + pw; Dtype a_x = offset_argmax_x[pool_index]; Dtype a_y = offset_argmax_y[pool_index]; int hlow = min(max(static_cast<int>(floor(a_y)), 0), height-1); int hhigh = min(max(static_cast<int>(ceil(a_y)), 0), height-1); int wleft = min(max(static_cast<int>(floor(a_x)), 0), width-1); int wright = min(max(static_cast<int>(ceil(a_x)), 0), width-1); if (h != hlow && h != hhigh && w != wleft && w != wright) // (w, h) is not around (a_x, a_y) continue; Dtype alpha = (hlow == hhigh) ? static_cast<Dtype>(0.5) : (a_y - hlow) / (hhigh - hlow); Dtype beta = (wleft == wright) ? static_cast<Dtype>(0.5) : (a_x - wleft) / (wright - wleft); if (h == hlow && w == wleft) gradient += offset_top_diff[pool_index] * (1 - alpha) * (1 - beta); else if (h == hlow && w == wright) gradient += offset_top_diff[pool_index] * (1 - alpha) * beta; else if (h == hhigh && w == wleft) gradient += offset_top_diff[pool_index] * alpha * (1 - beta); else if (h == hhigh && w == wright) gradient += offset_top_diff[pool_index] * alpha * beta; } } } bottom_diff[index] += gradient; } } template<typename Dtype> inline void ROIAlignBackwardAcc(const Tensor<gpu, 4, Dtype> &in_grad, const Tensor<gpu, 4, Dtype> &out_grad, const Tensor<gpu, 2, Dtype> &bbox, const Tensor<gpu, 4, Dtype> &max_idx_x, const Tensor<gpu, 4, Dtype> &max_idx_y, const float spatial_scale) { const Dtype *top_diff = out_grad.dptr_; const Dtype *bottom_rois = bbox.dptr_; Dtype *bottom_diff = in_grad.dptr_; Dtype *argmax_x = max_idx_x.dptr_; Dtype *argmax_y = max_idx_y.dptr_; const int count = in_grad.shape_.Size(); const int num_rois = bbox.size(0); const int channels = in_grad.size(1); const int height = in_grad.size(2); const int width = in_grad.size(3); const int pooled_height = out_grad.size(2); const int pooled_width = out_grad.size(3); const int gridSize = (count + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock; dim3 dimGrid(kMaxGridNum, (gridSize + kMaxGridNum - 1) / kMaxGridNum); dim3 dimBlock(kMaxThreadsPerBlock); CheckLaunchParam(dimGrid, dimBlock, "ROIPooling Backward"); cudaStream_t stream = Stream<gpu>::GetStream(in_grad.stream_); ROIAlignBackwardAccKernel<Dtype><<<dimGrid, dimBlock, 0, stream>>>( count, top_diff, argmax_x, argmax_y, num_rois, spatial_scale, channels, height, width, pooled_height, pooled_width, bottom_diff, bottom_rois); } } // namespace cuda template<typename Dtype> inline void ROIAlignForward(const Tensor<gpu, 4, Dtype> &out, const Tensor<gpu, 4, Dtype> &data, const Tensor<gpu, 2, Dtype> &bbox, const Tensor<gpu, 4, Dtype> &max_idx_x, const Tensor<gpu, 4, Dtype> &max_idx_y, const float spatial_scale) { cuda::ROIAlignForward(out, data, bbox, max_idx_x, max_idx_y, spatial_scale); } template<typename Dtype> inline void ROIAlignBackwardAcc(const Tensor<gpu, 4, Dtype> &in_grad, const Tensor<gpu, 4, Dtype> &out_grad, const Tensor<gpu, 2, Dtype> &bbox, const Tensor<gpu, 4, Dtype> &max_idx_x, const Tensor<gpu, 4, Dtype> &max_idx_y, const float spatial_scale) { cuda::ROIAlignBackwardAcc(in_grad, out_grad, bbox, max_idx_x, max_idx_y, spatial_scale); } } // namespace mshadow namespace mxnet { namespace op { template<> Operator* CreateOp<gpu>(ROIAlignParam param, int dtype) { Operator* op = NULL; MSHADOW_REAL_TYPE_SWITCH(dtype, DType, { op = new ROIAlignOp<gpu, DType>(param); }); return op; } } // namespace op } // namespace mxnet
89ad4aca5a34a2ff1ad0b33e88acdede57b63c6c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <algorithm> #include "paddle/fluid/operators/math/sequence_padding.h" #include "paddle/phi/backends/gpu/gpu_context.h" namespace paddle { namespace operators { namespace math { template <typename T, CopyType Type> __global__ void SequencePaddingKernel(T* dst, const T* src, const T* pad_value, bool is_constant_pad, const size_t* seq_offsets, const size_t seq_num, const size_t pad_seq_len, const size_t step_width, bool norm_by_len, const PadLayout layout) { size_t seq_idx = blockIdx.y; size_t seq_len = seq_offsets[seq_idx + 1] - seq_offsets[seq_idx]; size_t step_idx = blockIdx.x * blockDim.y + threadIdx.y; size_t seq_data_offset = (seq_offsets[seq_idx] + step_idx) * step_width; size_t pad_data_offset = layout == kBatchLengthWidth ? (seq_idx * pad_seq_len + step_idx) * step_width : (step_idx * seq_num + seq_idx) * step_width; T* dst_data = dst + (Type == kSeqToPad ? pad_data_offset : seq_data_offset); const T* src_data = src + (Type == kSeqToPad ? seq_data_offset : pad_data_offset); if (step_idx < seq_len) { float scale = norm_by_len ? (1.0f / static_cast<float>(seq_len)) : 1.0f; for (size_t i = threadIdx.x; i < step_width; i += blockDim.x) { dst_data[i] = scale * src_data[i]; } } else if (step_idx < pad_seq_len && Type == kSeqToPad) { for (size_t i = threadIdx.x; i < step_width; i += blockDim.x) { dst_data[i] = is_constant_pad ? pad_value[0] : pad_value[i]; } } } template <typename T> class PaddingLoDTensorFunctor<phi::GPUContext, T> { public: void operator()(const phi::GPUContext& context, const framework::LoDTensor& seq_tensor, framework::LoDTensor* pad_tensor, const framework::LoDTensor& pad_value, int pad_seq_len = -1, int lod_level = 0, bool norm_by_times = false, const PadLayout layout = kBatchLengthWidth) { auto seq_lod = seq_tensor.lod(); auto seq_offsets = framework::ToAbsOffset(seq_lod)[lod_level]; const auto& seq_tensor_dims = seq_tensor.dims(); const auto& pad_tensor_dims = pad_tensor->dims(); int max_seq_len = MaximumSequenceLength(seq_offsets); if (pad_seq_len == -1) { pad_seq_len = max_seq_len; } PADDLE_ENFORCE_GE( pad_seq_len, max_seq_len, platform::errors::InvalidArgument( "The pad_seq_len must be equal to or greater than the " "original max sequence length. Expected %ld >= %ld, but got %ld < " "%ld. Please check the input value.", pad_seq_len, max_seq_len, pad_seq_len, max_seq_len)); int step_width = seq_tensor.numel() / seq_tensor_dims[0]; int seq_num = seq_offsets.size() - 1; CheckDims(seq_tensor_dims, pad_tensor_dims, seq_offsets, pad_seq_len, step_width, layout); PADDLE_ENFORCE_EQ( pad_value.numel() == 1 || pad_value.numel() == step_width, true, platform::errors::InvalidArgument( "The numel of 'pad_value' can only be 1 or be equal to " "the 'step_width', but got %ld != 1 and %ld. Please check the " "input value.", pad_value.numel(), step_width)); const int kBlockSize = 512; /* At least use 32 threads to copy sequence_width elements, * and at least 8 elements for each thread. */ size_t block_dim_x = ::min(((((step_width + 7) >> 3) + 31) >> 5) << 5, kBlockSize); size_t block_dim_y = kBlockSize / block_dim_x; dim3 threads(block_dim_x, block_dim_y); size_t grid_dim_x = (pad_seq_len + block_dim_y - 1) / block_dim_y; size_t grid_dim_y = seq_num; dim3 grid(grid_dim_x, grid_dim_y); const T* seq_data = seq_tensor.data<T>(); T* pad_data = pad_tensor->data<T>(); const T* pad_value_data = pad_value.data<T>(); paddle::framework::MixVector<size_t> mix_vector_seq_offsets(&seq_offsets); hipLaunchKernelGGL(( SequencePaddingKernel<T, kSeqToPad>), dim3(grid), dim3(threads), 0, context.stream(), pad_data, seq_data, pad_value_data, pad_value.numel() == 1, mix_vector_seq_offsets.CUDAData(context.GetPlace()), seq_num, pad_seq_len, step_width, norm_by_times, layout); } }; template <typename T> class UnpaddingLoDTensorFunctor<phi::GPUContext, T> { public: void operator()(const phi::GPUContext& context, const framework::LoDTensor& pad_tensor, framework::LoDTensor* seq_tensor, int pad_seq_len = -1, int lod_level = 0, bool norm_by_times = false, const PadLayout layout = kBatchLengthWidth) { auto seq_offsets = framework::ToAbsOffset(seq_tensor->lod())[lod_level]; const auto& seq_tensor_dims = seq_tensor->dims(); const auto& pad_tensor_dims = pad_tensor.dims(); int max_seq_len = MaximumSequenceLength(seq_offsets); if (pad_seq_len == -1) { pad_seq_len = max_seq_len; } int step_width = seq_tensor->numel() / seq_tensor_dims[0]; int seq_num = seq_offsets.size() - 1; CheckDims(seq_tensor_dims, pad_tensor_dims, seq_offsets, pad_seq_len, step_width, layout); /* if (!norm_by_times && seq_num == 1UL && pad_seq_len == max_seq_len) { paddle::framework::TensorCopy(pad_tensor, context.GetPlace(), context, seq_tensor); seq_tensor->Resize(seq_tensor_dims); return; } */ const int kBlockSize = 512; /* At least use 32 threads to copy sequence_width elements, * and at least 8 elements for each thread. */ size_t block_dim_x = ::min(((((step_width + 7) >> 3) + 31) >> 5) << 5, kBlockSize); size_t block_dim_y = kBlockSize / block_dim_x; dim3 threads(block_dim_x, block_dim_y); size_t grid_dim_x = (pad_seq_len + block_dim_y - 1) / block_dim_y; size_t grid_dim_y = seq_num; dim3 grid(grid_dim_x, grid_dim_y); const T* pad_data = pad_tensor.data<T>(); T* seq_data = seq_tensor->data<T>(); paddle::framework::MixVector<size_t> mixv_seq_offsets(&seq_offsets); hipLaunchKernelGGL(( SequencePaddingKernel<T, kPadToSeq>), dim3(grid), dim3(threads), 0, context.stream(), seq_data, pad_data, nullptr, false, mixv_seq_offsets.CUDAData(context.GetPlace()), seq_num, pad_seq_len, step_width, norm_by_times, layout); } }; template class PaddingLoDTensorFunctor<phi::GPUContext, int>; template class PaddingLoDTensorFunctor<phi::GPUContext, int64_t>; template class PaddingLoDTensorFunctor<phi::GPUContext, float>; template class PaddingLoDTensorFunctor<phi::GPUContext, double>; template class UnpaddingLoDTensorFunctor<phi::GPUContext, int>; template class UnpaddingLoDTensorFunctor<phi::GPUContext, int64_t>; template class UnpaddingLoDTensorFunctor<phi::GPUContext, float>; template class UnpaddingLoDTensorFunctor<phi::GPUContext, double>; } // namespace math } // namespace operators } // namespace paddle
89ad4aca5a34a2ff1ad0b33e88acdede57b63c6c.cu
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <algorithm> #include "paddle/fluid/operators/math/sequence_padding.h" #include "paddle/phi/backends/gpu/gpu_context.h" namespace paddle { namespace operators { namespace math { template <typename T, CopyType Type> __global__ void SequencePaddingKernel(T* dst, const T* src, const T* pad_value, bool is_constant_pad, const size_t* seq_offsets, const size_t seq_num, const size_t pad_seq_len, const size_t step_width, bool norm_by_len, const PadLayout layout) { size_t seq_idx = blockIdx.y; size_t seq_len = seq_offsets[seq_idx + 1] - seq_offsets[seq_idx]; size_t step_idx = blockIdx.x * blockDim.y + threadIdx.y; size_t seq_data_offset = (seq_offsets[seq_idx] + step_idx) * step_width; size_t pad_data_offset = layout == kBatchLengthWidth ? (seq_idx * pad_seq_len + step_idx) * step_width : (step_idx * seq_num + seq_idx) * step_width; T* dst_data = dst + (Type == kSeqToPad ? pad_data_offset : seq_data_offset); const T* src_data = src + (Type == kSeqToPad ? seq_data_offset : pad_data_offset); if (step_idx < seq_len) { float scale = norm_by_len ? (1.0f / static_cast<float>(seq_len)) : 1.0f; for (size_t i = threadIdx.x; i < step_width; i += blockDim.x) { dst_data[i] = scale * src_data[i]; } } else if (step_idx < pad_seq_len && Type == kSeqToPad) { for (size_t i = threadIdx.x; i < step_width; i += blockDim.x) { dst_data[i] = is_constant_pad ? pad_value[0] : pad_value[i]; } } } template <typename T> class PaddingLoDTensorFunctor<phi::GPUContext, T> { public: void operator()(const phi::GPUContext& context, const framework::LoDTensor& seq_tensor, framework::LoDTensor* pad_tensor, const framework::LoDTensor& pad_value, int pad_seq_len = -1, int lod_level = 0, bool norm_by_times = false, const PadLayout layout = kBatchLengthWidth) { auto seq_lod = seq_tensor.lod(); auto seq_offsets = framework::ToAbsOffset(seq_lod)[lod_level]; const auto& seq_tensor_dims = seq_tensor.dims(); const auto& pad_tensor_dims = pad_tensor->dims(); int max_seq_len = MaximumSequenceLength(seq_offsets); if (pad_seq_len == -1) { pad_seq_len = max_seq_len; } PADDLE_ENFORCE_GE( pad_seq_len, max_seq_len, platform::errors::InvalidArgument( "The pad_seq_len must be equal to or greater than the " "original max sequence length. Expected %ld >= %ld, but got %ld < " "%ld. Please check the input value.", pad_seq_len, max_seq_len, pad_seq_len, max_seq_len)); int step_width = seq_tensor.numel() / seq_tensor_dims[0]; int seq_num = seq_offsets.size() - 1; CheckDims(seq_tensor_dims, pad_tensor_dims, seq_offsets, pad_seq_len, step_width, layout); PADDLE_ENFORCE_EQ( pad_value.numel() == 1 || pad_value.numel() == step_width, true, platform::errors::InvalidArgument( "The numel of 'pad_value' can only be 1 or be equal to " "the 'step_width', but got %ld != 1 and %ld. Please check the " "input value.", pad_value.numel(), step_width)); const int kBlockSize = 512; /* At least use 32 threads to copy sequence_width elements, * and at least 8 elements for each thread. */ size_t block_dim_x = std::min(((((step_width + 7) >> 3) + 31) >> 5) << 5, kBlockSize); size_t block_dim_y = kBlockSize / block_dim_x; dim3 threads(block_dim_x, block_dim_y); size_t grid_dim_x = (pad_seq_len + block_dim_y - 1) / block_dim_y; size_t grid_dim_y = seq_num; dim3 grid(grid_dim_x, grid_dim_y); const T* seq_data = seq_tensor.data<T>(); T* pad_data = pad_tensor->data<T>(); const T* pad_value_data = pad_value.data<T>(); paddle::framework::MixVector<size_t> mix_vector_seq_offsets(&seq_offsets); SequencePaddingKernel<T, kSeqToPad><<<grid, threads, 0, context.stream()>>>( pad_data, seq_data, pad_value_data, pad_value.numel() == 1, mix_vector_seq_offsets.CUDAData(context.GetPlace()), seq_num, pad_seq_len, step_width, norm_by_times, layout); } }; template <typename T> class UnpaddingLoDTensorFunctor<phi::GPUContext, T> { public: void operator()(const phi::GPUContext& context, const framework::LoDTensor& pad_tensor, framework::LoDTensor* seq_tensor, int pad_seq_len = -1, int lod_level = 0, bool norm_by_times = false, const PadLayout layout = kBatchLengthWidth) { auto seq_offsets = framework::ToAbsOffset(seq_tensor->lod())[lod_level]; const auto& seq_tensor_dims = seq_tensor->dims(); const auto& pad_tensor_dims = pad_tensor.dims(); int max_seq_len = MaximumSequenceLength(seq_offsets); if (pad_seq_len == -1) { pad_seq_len = max_seq_len; } int step_width = seq_tensor->numel() / seq_tensor_dims[0]; int seq_num = seq_offsets.size() - 1; CheckDims(seq_tensor_dims, pad_tensor_dims, seq_offsets, pad_seq_len, step_width, layout); /* if (!norm_by_times && seq_num == 1UL && pad_seq_len == max_seq_len) { paddle::framework::TensorCopy(pad_tensor, context.GetPlace(), context, seq_tensor); seq_tensor->Resize(seq_tensor_dims); return; } */ const int kBlockSize = 512; /* At least use 32 threads to copy sequence_width elements, * and at least 8 elements for each thread. */ size_t block_dim_x = std::min(((((step_width + 7) >> 3) + 31) >> 5) << 5, kBlockSize); size_t block_dim_y = kBlockSize / block_dim_x; dim3 threads(block_dim_x, block_dim_y); size_t grid_dim_x = (pad_seq_len + block_dim_y - 1) / block_dim_y; size_t grid_dim_y = seq_num; dim3 grid(grid_dim_x, grid_dim_y); const T* pad_data = pad_tensor.data<T>(); T* seq_data = seq_tensor->data<T>(); paddle::framework::MixVector<size_t> mixv_seq_offsets(&seq_offsets); SequencePaddingKernel<T, kPadToSeq><<<grid, threads, 0, context.stream()>>>( seq_data, pad_data, nullptr, false, mixv_seq_offsets.CUDAData(context.GetPlace()), seq_num, pad_seq_len, step_width, norm_by_times, layout); } }; template class PaddingLoDTensorFunctor<phi::GPUContext, int>; template class PaddingLoDTensorFunctor<phi::GPUContext, int64_t>; template class PaddingLoDTensorFunctor<phi::GPUContext, float>; template class PaddingLoDTensorFunctor<phi::GPUContext, double>; template class UnpaddingLoDTensorFunctor<phi::GPUContext, int>; template class UnpaddingLoDTensorFunctor<phi::GPUContext, int64_t>; template class UnpaddingLoDTensorFunctor<phi::GPUContext, float>; template class UnpaddingLoDTensorFunctor<phi::GPUContext, double>; } // namespace math } // namespace operators } // namespace paddle
83175ca60c2ae8c5af4a7c5e1eb0ecee542679d8.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime_api.h> #include <stdio.h> #include <stdlib.h> #include <algorithm> #include <helper_timer.h> #include "helper_cuda.h" #include <assert.h> #define BLOCK_DIM 16 #define MAX_FILTER_LENGTH 128 #define RESULT_VERIFICATION 1 // change 1 if you want to verify the result __global__ void convolution_kernel_v1(float *d_output, float *d_input, float *d_filter, int num_row, int num_col, int filter_size) { int idx_x = blockDim.x * blockIdx.x + threadIdx.x; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; float result = 0.f; for (int filter_row = -filter_size / 2; filter_row <= filter_size / 2; ++filter_row) { for (int filter_col = -filter_size / 2; filter_col <= filter_size / 2; ++filter_col) { // Find the global position to apply the given filter int image_row = idx_y + filter_row; int image_col = idx_x + filter_col; float image_value = (image_row >= 0 && image_row < num_row && image_col >= 0 && image_col < num_col) ? d_input[image_row * num_col + image_col] : 0.f; float filter_value = d_filter[(filter_row + filter_size / 2) * filter_size + filter_col + filter_size / 2]; result += image_value * filter_value; } } d_output[idx_y * num_col + idx_x] = result; } __constant__ float c_filter[MAX_FILTER_LENGTH * MAX_FILTER_LENGTH]; __global__ void convolution_kernel_v2(float *d_output, float *d_input, float *d_filter, int num_row, int num_col, int filter_size) { int idx_x = blockDim.x * blockIdx.x + threadIdx.x; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; float result = 0.f; for (int filter_row = -filter_size / 2; filter_row <= filter_size / 2; ++filter_row) { for (int filter_col = -filter_size / 2; filter_col <= filter_size / 2; ++filter_col) { int image_row = idx_y + filter_row; int image_col = idx_x + filter_col; float image_value = (image_row >= 0 && image_row < num_row && image_col >= 0 && image_col < num_col) ? d_input[image_row * num_col + image_col] : 0.f; float filter_value = c_filter[(filter_row + filter_size / 2) * filter_size + filter_col + filter_size / 2]; result += image_value * filter_value; } } d_output[idx_y * num_col + idx_x] = result; } __global__ void convolution_kernel_v3(float *d_output, float *d_input, float *d_filter, int num_row, int num_col, int filter_size) { int idx_x = blockDim.x * blockIdx.x + threadIdx.x; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int pad_size = filter_size / 2; int tile_size = BLOCK_DIM + 2 * pad_size; extern __shared__ float s_input[]; for (int row = 0; row <= tile_size / BLOCK_DIM; row++) { for (int col = 0; col <= tile_size / BLOCK_DIM; col++) { int idx_row = idx_y + BLOCK_DIM * row - pad_size; // input data index row int idx_col = idx_x + BLOCK_DIM * col - pad_size; // input data index column int fid_row = threadIdx.y + BLOCK_DIM * row; // filter index row int fid_col = threadIdx.x + BLOCK_DIM * col; // filter index column if (fid_row >= tile_size || fid_col >= tile_size) continue; s_input[tile_size * fid_row + fid_col] = \ (idx_row >= 0 && idx_row < num_row && idx_col >= 0 && idx_col < num_col) ? d_input[num_col * idx_row + idx_col] : 0.f; } } __syncthreads(); /* Tile Debugging */ // if (idx_x == BLOCK_DIM*1 && idx_y == BLOCK_DIM*1) // { // for (int row = 0; row < 2*pad_size + BLOCK_DIM; row++) // { // for (int col = 0; col < 2*pad_size + BLOCK_DIM; col++) // { // printf("%.0f ", s_input[tile_size * row + col]); // } // printf("\n"); // } // } float result = 0.f; for (int filter_row = -filter_size / 2; filter_row <= filter_size / 2; ++filter_row) { for (int filter_col = -filter_size / 2; filter_col <= filter_size / 2; ++filter_col) { // Find the global position to apply the given filter int image_row = threadIdx.y + pad_size + filter_row; int image_col = threadIdx.x + pad_size + filter_col; float image_value = s_input[tile_size * image_row + image_col]; float filter_value = c_filter[(filter_row + filter_size / 2) * filter_size + filter_col + filter_size / 2]; result += image_value * filter_value; } } d_output[idx_y * num_col + idx_x] = result; } void convolution_gpu(int version, float *d_output, float *d_input, float *d_filter, int num_row, int num_col, int filter_size) { dim3 dimBlock(BLOCK_DIM, BLOCK_DIM); dim3 dimGrid((num_col + BLOCK_DIM - 1) / BLOCK_DIM, (num_row + BLOCK_DIM - 1) / BLOCK_DIM); if (version == 1) hipLaunchKernelGGL(( convolution_kernel_v1), dim3(dimGrid), dim3(dimBlock), 0, 0, d_output, d_input, d_filter, num_row, num_col, filter_size); else if (version == 2) hipLaunchKernelGGL(( convolution_kernel_v2), dim3(dimGrid), dim3(dimBlock), 0, 0, d_output, d_input, d_filter, num_row, num_col, filter_size); else // version == 3 { int shared_mem_size = (2*filter_size+BLOCK_DIM) * (2*filter_size+BLOCK_DIM) * sizeof(float); hipLaunchKernelGGL(( convolution_kernel_v3), dim3(dimGrid), dim3(dimBlock), shared_mem_size, 0 , d_output, d_input, d_filter, num_row, num_col, filter_size); } checkCudaErrors(hipGetLastError()); } void convolution_host(float *h_output, float *h_input, float *h_filter, int num_row, int num_col, int filter_size) { //For every pixel in the image #pragma omp parallel for (int row = 0; row < (int)num_row; ++row) { for (int col = 0; col < (int)num_col; ++col) { float result = 0.f; //For every value in the filter around the pixel (c, r) for (int filter_row = -filter_size / 2; filter_row <= filter_size / 2; ++filter_row) { for (int filter_col = -filter_size / 2; filter_col <= filter_size / 2; ++filter_col) { // Find the global image position for this filter position int image_row = row + filter_row; int image_col = col + filter_col; float image_value = (image_row >= 0 && image_row < num_row && image_col >= 0 && image_col < num_col) ? h_input[image_row * num_col + image_col] : 0.f; float filter_value = h_filter[(filter_row + filter_size / 2) * filter_size + filter_col + filter_size / 2]; result += image_value * filter_value; } } h_output[row * num_col + col] = result; } } } /* Generates Bi-symetric Gaussian Filter */ void generate_filter(float *h_filter, int filter_size) { float blur_kernel_sigma = 2.; float sum_filter = 0.f; //for normalization for (int row = -filter_size / 2; row <= filter_size / 2; row++) { for (int col = -filter_size / 2; col <= filter_size / 2; col++) { float filterValue = expf(-(float)(col * col + row * row) / (2.f * blur_kernel_sigma * blur_kernel_sigma)); h_filter[(row + filter_size / 2) * filter_size + col + filter_size / 2] = filterValue; sum_filter += filterValue; } } // normalization float normalizationFactor = 1.f / sum_filter; for (int row = -filter_size / 2; row <= filter_size / 2; row++) for (int col = -filter_size / 2; col <= filter_size / 2; col++) h_filter[(row + filter_size / 2) * filter_size + col + filter_size / 2] *= normalizationFactor; } void generate_data(float *h_buffer, int num_row, int num_col) { for (int row = 0; row < num_row; row++) { for (int col = 0; col < num_col; col++) { // h_buffer[row * num_col + col] = float(rand() & 0xFFFFFF) / RAND_MAX; h_buffer[row * num_col + col] = 1.f; } } } bool value_test(float *a, float *b, int length) { float epsilon = 0.000001; bool result = true; for (int i = 0; i < length; i++) if (abs(a[i] - b[i]) >= epsilon) result = false; return result; } int main() { int num_row = 2048; int num_col = 2048; int filter_size = 9; int buf_size = num_row * num_col * sizeof(float); float *h_input, *d_input; float *h_output_host, *h_output_gpu, *d_output; float *h_filter, *d_filter; float elapsed_time_gpu; // initialize timer StopWatchInterface *timer_host, *timer_gpu; sdkCreateTimer(&timer_host); sdkCreateTimer(&timer_gpu); srand(2019); // allocate host memories h_input = (float *)malloc(buf_size); h_output_host = (float *)malloc(buf_size); h_output_gpu = (float *)malloc(buf_size); h_filter = (float *)malloc(filter_size * filter_size * sizeof(float)); // allocate gpu memories hipMalloc((void **)&d_input, buf_size); hipMalloc((void **)&d_output, buf_size); hipMalloc((void **)&d_filter, filter_size * filter_size * sizeof(float)); // generate data generate_data(h_input, num_row, num_col); generate_filter(h_filter, filter_size); // copy input date to gpu hipMemcpy(d_input, h_input, buf_size, hipMemcpyHostToDevice); hipMemcpy(d_filter, h_filter, filter_size * filter_size * sizeof(float), hipMemcpyHostToDevice); hipMemcpyToSymbol(c_filter, h_filter, filter_size * filter_size * sizeof(float)); // processing in GPU sdkStartTimer(&timer_gpu); hipProfilerStart(); convolution_gpu(1, d_output, d_input, d_filter, num_row, num_col, filter_size); hipDeviceSynchronize(); sdkStopTimer(&timer_gpu); elapsed_time_gpu = sdkGetTimerValue(&timer_gpu); printf("Processing Time (1) -> GPU: %.2f ms\n", elapsed_time_gpu); // processing in GPU sdkResetTimer(&timer_gpu); sdkStartTimer(&timer_gpu); convolution_gpu(2, d_output, d_input, d_filter, num_row, num_col, filter_size); hipDeviceSynchronize(); sdkStopTimer(&timer_gpu); elapsed_time_gpu = sdkGetTimerValue(&timer_gpu); printf("Processing Time (2) -> GPU: %.2f ms\n", elapsed_time_gpu); // processing in GPU (3) sdkResetTimer(&timer_gpu); sdkStartTimer(&timer_gpu); convolution_gpu(3, d_output, d_input, d_filter, num_row, num_col, filter_size); hipDeviceSynchronize(); sdkStopTimer(&timer_gpu); hipProfilerStop(); elapsed_time_gpu = sdkGetTimerValue(&timer_gpu); printf("Processing Time (3) -> GPU: %.2f ms\n", elapsed_time_gpu); #if (RESULT_VERIFICATION) // processing in CPU sdkStartTimer(&timer_host); convolution_host(h_output_host, h_input, h_filter, num_row, num_col, filter_size); sdkStopTimer(&timer_host); float elapsed_time_host = sdkGetTimerValue(&timer_host); printf("Processing Time -> Host: %.2f ms\n", elapsed_time_host); // compare the result hipMemcpy(h_output_gpu, d_output, buf_size, hipMemcpyDeviceToHost); if (value_test(h_output_host, h_output_gpu, num_row * num_col)) printf("SUCCESS!!\n"); else printf("Error\n"); #endif // finalize free(h_input); free(h_output_host); free(h_output_gpu); free(h_filter); return 0; }
83175ca60c2ae8c5af4a7c5e1eb0ecee542679d8.cu
#include <cuda.h> #include <cuda_runtime.h> #include <cuda_profiler_api.h> #include <stdio.h> #include <stdlib.h> #include <algorithm> #include <helper_timer.h> #include "helper_cuda.h" #include <assert.h> #define BLOCK_DIM 16 #define MAX_FILTER_LENGTH 128 #define RESULT_VERIFICATION 1 // change 1 if you want to verify the result __global__ void convolution_kernel_v1(float *d_output, float *d_input, float *d_filter, int num_row, int num_col, int filter_size) { int idx_x = blockDim.x * blockIdx.x + threadIdx.x; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; float result = 0.f; for (int filter_row = -filter_size / 2; filter_row <= filter_size / 2; ++filter_row) { for (int filter_col = -filter_size / 2; filter_col <= filter_size / 2; ++filter_col) { // Find the global position to apply the given filter int image_row = idx_y + filter_row; int image_col = idx_x + filter_col; float image_value = (image_row >= 0 && image_row < num_row && image_col >= 0 && image_col < num_col) ? d_input[image_row * num_col + image_col] : 0.f; float filter_value = d_filter[(filter_row + filter_size / 2) * filter_size + filter_col + filter_size / 2]; result += image_value * filter_value; } } d_output[idx_y * num_col + idx_x] = result; } __constant__ float c_filter[MAX_FILTER_LENGTH * MAX_FILTER_LENGTH]; __global__ void convolution_kernel_v2(float *d_output, float *d_input, float *d_filter, int num_row, int num_col, int filter_size) { int idx_x = blockDim.x * blockIdx.x + threadIdx.x; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; float result = 0.f; for (int filter_row = -filter_size / 2; filter_row <= filter_size / 2; ++filter_row) { for (int filter_col = -filter_size / 2; filter_col <= filter_size / 2; ++filter_col) { int image_row = idx_y + filter_row; int image_col = idx_x + filter_col; float image_value = (image_row >= 0 && image_row < num_row && image_col >= 0 && image_col < num_col) ? d_input[image_row * num_col + image_col] : 0.f; float filter_value = c_filter[(filter_row + filter_size / 2) * filter_size + filter_col + filter_size / 2]; result += image_value * filter_value; } } d_output[idx_y * num_col + idx_x] = result; } __global__ void convolution_kernel_v3(float *d_output, float *d_input, float *d_filter, int num_row, int num_col, int filter_size) { int idx_x = blockDim.x * blockIdx.x + threadIdx.x; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int pad_size = filter_size / 2; int tile_size = BLOCK_DIM + 2 * pad_size; extern __shared__ float s_input[]; for (int row = 0; row <= tile_size / BLOCK_DIM; row++) { for (int col = 0; col <= tile_size / BLOCK_DIM; col++) { int idx_row = idx_y + BLOCK_DIM * row - pad_size; // input data index row int idx_col = idx_x + BLOCK_DIM * col - pad_size; // input data index column int fid_row = threadIdx.y + BLOCK_DIM * row; // filter index row int fid_col = threadIdx.x + BLOCK_DIM * col; // filter index column if (fid_row >= tile_size || fid_col >= tile_size) continue; s_input[tile_size * fid_row + fid_col] = \ (idx_row >= 0 && idx_row < num_row && idx_col >= 0 && idx_col < num_col) ? d_input[num_col * idx_row + idx_col] : 0.f; } } __syncthreads(); /* Tile Debugging */ // if (idx_x == BLOCK_DIM*1 && idx_y == BLOCK_DIM*1) // { // for (int row = 0; row < 2*pad_size + BLOCK_DIM; row++) // { // for (int col = 0; col < 2*pad_size + BLOCK_DIM; col++) // { // printf("%.0f ", s_input[tile_size * row + col]); // } // printf("\n"); // } // } float result = 0.f; for (int filter_row = -filter_size / 2; filter_row <= filter_size / 2; ++filter_row) { for (int filter_col = -filter_size / 2; filter_col <= filter_size / 2; ++filter_col) { // Find the global position to apply the given filter int image_row = threadIdx.y + pad_size + filter_row; int image_col = threadIdx.x + pad_size + filter_col; float image_value = s_input[tile_size * image_row + image_col]; float filter_value = c_filter[(filter_row + filter_size / 2) * filter_size + filter_col + filter_size / 2]; result += image_value * filter_value; } } d_output[idx_y * num_col + idx_x] = result; } void convolution_gpu(int version, float *d_output, float *d_input, float *d_filter, int num_row, int num_col, int filter_size) { dim3 dimBlock(BLOCK_DIM, BLOCK_DIM); dim3 dimGrid((num_col + BLOCK_DIM - 1) / BLOCK_DIM, (num_row + BLOCK_DIM - 1) / BLOCK_DIM); if (version == 1) convolution_kernel_v1<<<dimGrid, dimBlock>>>(d_output, d_input, d_filter, num_row, num_col, filter_size); else if (version == 2) convolution_kernel_v2<<<dimGrid, dimBlock>>>(d_output, d_input, d_filter, num_row, num_col, filter_size); else // version == 3 { int shared_mem_size = (2*filter_size+BLOCK_DIM) * (2*filter_size+BLOCK_DIM) * sizeof(float); convolution_kernel_v3<<<dimGrid, dimBlock, shared_mem_size, 0 >>>(d_output, d_input, d_filter, num_row, num_col, filter_size); } checkCudaErrors(cudaGetLastError()); } void convolution_host(float *h_output, float *h_input, float *h_filter, int num_row, int num_col, int filter_size) { //For every pixel in the image #pragma omp parallel for (int row = 0; row < (int)num_row; ++row) { for (int col = 0; col < (int)num_col; ++col) { float result = 0.f; //For every value in the filter around the pixel (c, r) for (int filter_row = -filter_size / 2; filter_row <= filter_size / 2; ++filter_row) { for (int filter_col = -filter_size / 2; filter_col <= filter_size / 2; ++filter_col) { // Find the global image position for this filter position int image_row = row + filter_row; int image_col = col + filter_col; float image_value = (image_row >= 0 && image_row < num_row && image_col >= 0 && image_col < num_col) ? h_input[image_row * num_col + image_col] : 0.f; float filter_value = h_filter[(filter_row + filter_size / 2) * filter_size + filter_col + filter_size / 2]; result += image_value * filter_value; } } h_output[row * num_col + col] = result; } } } /* Generates Bi-symetric Gaussian Filter */ void generate_filter(float *h_filter, int filter_size) { float blur_kernel_sigma = 2.; float sum_filter = 0.f; //for normalization for (int row = -filter_size / 2; row <= filter_size / 2; row++) { for (int col = -filter_size / 2; col <= filter_size / 2; col++) { float filterValue = expf(-(float)(col * col + row * row) / (2.f * blur_kernel_sigma * blur_kernel_sigma)); h_filter[(row + filter_size / 2) * filter_size + col + filter_size / 2] = filterValue; sum_filter += filterValue; } } // normalization float normalizationFactor = 1.f / sum_filter; for (int row = -filter_size / 2; row <= filter_size / 2; row++) for (int col = -filter_size / 2; col <= filter_size / 2; col++) h_filter[(row + filter_size / 2) * filter_size + col + filter_size / 2] *= normalizationFactor; } void generate_data(float *h_buffer, int num_row, int num_col) { for (int row = 0; row < num_row; row++) { for (int col = 0; col < num_col; col++) { // h_buffer[row * num_col + col] = float(rand() & 0xFFFFFF) / RAND_MAX; h_buffer[row * num_col + col] = 1.f; } } } bool value_test(float *a, float *b, int length) { float epsilon = 0.000001; bool result = true; for (int i = 0; i < length; i++) if (abs(a[i] - b[i]) >= epsilon) result = false; return result; } int main() { int num_row = 2048; int num_col = 2048; int filter_size = 9; int buf_size = num_row * num_col * sizeof(float); float *h_input, *d_input; float *h_output_host, *h_output_gpu, *d_output; float *h_filter, *d_filter; float elapsed_time_gpu; // initialize timer StopWatchInterface *timer_host, *timer_gpu; sdkCreateTimer(&timer_host); sdkCreateTimer(&timer_gpu); srand(2019); // allocate host memories h_input = (float *)malloc(buf_size); h_output_host = (float *)malloc(buf_size); h_output_gpu = (float *)malloc(buf_size); h_filter = (float *)malloc(filter_size * filter_size * sizeof(float)); // allocate gpu memories cudaMalloc((void **)&d_input, buf_size); cudaMalloc((void **)&d_output, buf_size); cudaMalloc((void **)&d_filter, filter_size * filter_size * sizeof(float)); // generate data generate_data(h_input, num_row, num_col); generate_filter(h_filter, filter_size); // copy input date to gpu cudaMemcpy(d_input, h_input, buf_size, cudaMemcpyHostToDevice); cudaMemcpy(d_filter, h_filter, filter_size * filter_size * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpyToSymbol(c_filter, h_filter, filter_size * filter_size * sizeof(float)); // processing in GPU sdkStartTimer(&timer_gpu); cudaProfilerStart(); convolution_gpu(1, d_output, d_input, d_filter, num_row, num_col, filter_size); cudaDeviceSynchronize(); sdkStopTimer(&timer_gpu); elapsed_time_gpu = sdkGetTimerValue(&timer_gpu); printf("Processing Time (1) -> GPU: %.2f ms\n", elapsed_time_gpu); // processing in GPU sdkResetTimer(&timer_gpu); sdkStartTimer(&timer_gpu); convolution_gpu(2, d_output, d_input, d_filter, num_row, num_col, filter_size); cudaDeviceSynchronize(); sdkStopTimer(&timer_gpu); elapsed_time_gpu = sdkGetTimerValue(&timer_gpu); printf("Processing Time (2) -> GPU: %.2f ms\n", elapsed_time_gpu); // processing in GPU (3) sdkResetTimer(&timer_gpu); sdkStartTimer(&timer_gpu); convolution_gpu(3, d_output, d_input, d_filter, num_row, num_col, filter_size); cudaDeviceSynchronize(); sdkStopTimer(&timer_gpu); cudaProfilerStop(); elapsed_time_gpu = sdkGetTimerValue(&timer_gpu); printf("Processing Time (3) -> GPU: %.2f ms\n", elapsed_time_gpu); #if (RESULT_VERIFICATION) // processing in CPU sdkStartTimer(&timer_host); convolution_host(h_output_host, h_input, h_filter, num_row, num_col, filter_size); sdkStopTimer(&timer_host); float elapsed_time_host = sdkGetTimerValue(&timer_host); printf("Processing Time -> Host: %.2f ms\n", elapsed_time_host); // compare the result cudaMemcpy(h_output_gpu, d_output, buf_size, cudaMemcpyDeviceToHost); if (value_test(h_output_host, h_output_gpu, num_row * num_col)) printf("SUCCESS!!\n"); else printf("Error\n"); #endif // finalize free(h_input); free(h_output_host); free(h_output_gpu); free(h_filter); return 0; }
d86e3f3a20cb63d374a5073228db4bb60ddf6df8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<cuda_runtime.h> #include<hiprand/hiprand.h> #include<hiprand/hiprand_kernel.h> #include<device_launch_parameters.h> #include"sde_builder_cuda.h" #include<cassert> #include"random_kernel_initializers.cuh" #include"mc_types.h" #include"path_collector.h" #include"one_factor_kernels.h" namespace fdm_engine_cuda { using mc_types::FDMScheme; using mc_types::GPUConfiguration; using mc_types::PathValuesType; //===================================================================== //====== equidistant overloads //===================================================================== void fdm_engine_cuda::GBMPathEngineDouble::_generate1D(double *d_paths, hiprandState_t *states, FDMScheme scheme, unsigned int nPaths, unsigned int nSteps, double dt) const { // initialise RNG states const unsigned int threadsPerBlock = THREADS_PER_BLOCK; unsigned int blocksPerGrid = (nPaths + threadsPerBlock - 1) / threadsPerBlock; random_kernel_initializers::initialiseRandomKernel1D << <threadsPerBlock, blocksPerGrid >> > (time(0), states, nPaths); switch (scheme) { case FDMScheme::EulerScheme: { kernels::one_factor_kernels::euler_scheme:: hipLaunchKernelGGL(( generatePathsKernel1D<>) , dim3(threadsPerBlock), dim3(blocksPerGrid) , 0, 0, this->gbm_, d_paths, states, nPaths, nSteps, dt); } break; case FDMScheme::MilsteinScheme: { kernels::one_factor_kernels::milstein_scheme:: generatePathsKernel1D<><< <threadsPerBlock, blocksPerGrid >> > (this->gbm_, d_paths, states, nPaths, nSteps, dt); } break; } } void fdm_engine_cuda::GBMPathEngineDouble::_generate2D(double *d_paths, hiprandState_t *states, FDMScheme scheme, unsigned int nPaths, unsigned int nSteps, double dt)const { const unsigned int widthSize{ 1000 }; assert((nPaths%widthSize) == 0); unsigned int heightSize{ nPaths / widthSize }; const unsigned int threadsPerBlockX = THREADS_2D_PER_BLOCK_X; const unsigned int threadsPerBlockY = THREADS_2D_PER_BLOCK_Y; unsigned int blocksPerGridX = (widthSize + threadsPerBlockX - 1) / threadsPerBlockX; unsigned int blocksPerGridY = (heightSize + threadsPerBlockY - 1) / threadsPerBlockY; const dim3 blockSize = dim3(threadsPerBlockX, threadsPerBlockY); const dim3 gridSize = dim3(blocksPerGridX, blocksPerGridY); random_kernel_initializers::initialiseRandomKernel2D << <gridSize, blockSize >> > (time(0), states, widthSize, heightSize); switch (scheme) { case FDMScheme::EulerScheme: { kernels::one_factor_kernels::euler_scheme::generatePathsKernel2D<><<<gridSize, blockSize >> > (this->gbm_, d_paths, states, widthSize, heightSize, nSteps, dt); } break; case FDMScheme::MilsteinScheme: { kernels::one_factor_kernels::milstein_scheme::generatePathsKernel2D<><< <gridSize, blockSize >> > (this->gbm_, d_paths, states, widthSize, heightSize, nSteps, dt); } break; } } void fdm_engine_cuda::GBMPathEngineDouble::_generate3D(double *d_paths, hiprandState_t *states, FDMScheme scheme, unsigned int nPaths, unsigned int nSteps, double dt)const { const unsigned int widthSize{ 100 }; const unsigned int heightSize{ 100 }; assert((nPaths % (widthSize*heightSize)) == 0); unsigned int depthSize{ nPaths / (widthSize*heightSize) }; const unsigned int threadsPerBlockX = THREADS_3D_PER_BLOCK_X; const unsigned int threadsPerBlockY = THREADS_3D_PER_BLOCK_Y; const unsigned int threadsPerBlockZ = THREADS_3D_PER_BLOCK_Z; unsigned int blocksPerGridX = (widthSize + threadsPerBlockX - 1) / threadsPerBlockX; unsigned int blocksPerGridY = (heightSize + threadsPerBlockY - 1) / threadsPerBlockY; unsigned int blocksPerGridZ = (depthSize + threadsPerBlockZ - 1) / threadsPerBlockZ; const dim3 blockSize = dim3(threadsPerBlockX, threadsPerBlockY, threadsPerBlockZ); const dim3 gridSize = dim3(blocksPerGridX, blocksPerGridY, blocksPerGridZ); random_kernel_initializers::initialiseRandomKernel3D << <gridSize, blockSize >> > (time(0), states, widthSize, heightSize, depthSize); switch (scheme) { case FDMScheme::EulerScheme: { kernels::one_factor_kernels::euler_scheme::generatePathsKernel3D<> << <gridSize, blockSize >> > (this->gbm_, d_paths, states, widthSize, heightSize, depthSize, nSteps, dt); } break; case FDMScheme::MilsteinScheme: { kernels::one_factor_kernels::milstein_scheme::generatePathsKernel3D<> << <gridSize, blockSize >> > (this->gbm_, d_paths, states, widthSize, heightSize, depthSize, nSteps, dt); } break; } } //===================================================================== //====== non-equidistant overloads //===================================================================== void fdm_engine_cuda::GBMPathEngineDouble::_generate1D(double *d_paths, hiprandState_t *states, FDMScheme scheme, unsigned int nPaths, double const *d_times, unsigned int size) const { // initialise RNG states const unsigned int threadsPerBlock = THREADS_PER_BLOCK; unsigned int blocksPerGrid = (nPaths + threadsPerBlock - 1) / threadsPerBlock; random_kernel_initializers::initialiseRandomKernel1D << <threadsPerBlock, blocksPerGrid >> > (time(0), states, nPaths); switch (scheme) { case FDMScheme::EulerScheme: { kernels::one_factor_kernels::euler_scheme::generatePathsKernel1D<> << <threadsPerBlock, blocksPerGrid >> > (this->gbm_, d_paths, states, nPaths, d_times, size); } break; case FDMScheme::MilsteinScheme: { kernels::one_factor_kernels::milstein_scheme::generatePathsKernel1D<> << <threadsPerBlock, blocksPerGrid >> > (this->gbm_, d_paths, states, nPaths, d_times, size); } break; } } void fdm_engine_cuda::GBMPathEngineDouble::_generate2D(double *d_paths, hiprandState_t *states, FDMScheme scheme, unsigned int nPaths, double const *d_times, unsigned int size)const { const unsigned int widthSize{ 1000 }; assert((nPaths%widthSize) == 0); unsigned int heightSize{ nPaths / widthSize }; const unsigned int threadsPerBlockX = THREADS_2D_PER_BLOCK_X; const unsigned int threadsPerBlockY = THREADS_2D_PER_BLOCK_Y; unsigned int blocksPerGridX = (widthSize + threadsPerBlockX - 1) / threadsPerBlockX; unsigned int blocksPerGridY = (heightSize + threadsPerBlockY - 1) / threadsPerBlockY; const dim3 blockSize = dim3(threadsPerBlockX, threadsPerBlockY); const dim3 gridSize = dim3(blocksPerGridX, blocksPerGridY); random_kernel_initializers::initialiseRandomKernel2D << <gridSize, blockSize >> > (time(0), states, widthSize, heightSize); switch (scheme) { case FDMScheme::EulerScheme: { kernels::one_factor_kernels::euler_scheme::generatePathsKernel2D<> << <gridSize, blockSize >> > (this->gbm_, d_paths, states, widthSize, heightSize, d_times, size); } break; case FDMScheme::MilsteinScheme: { kernels::one_factor_kernels::milstein_scheme::generatePathsKernel2D<> << <gridSize, blockSize >> > (this->gbm_, d_paths, states, widthSize, heightSize, d_times, size); } break; } } void fdm_engine_cuda::GBMPathEngineDouble::_generate3D(double *d_paths, hiprandState_t *states, FDMScheme scheme, unsigned int nPaths, double const *d_times, unsigned int size)const { const unsigned int widthSize{ 100 }; const unsigned int heightSize{ 100 }; assert((nPaths % (widthSize*heightSize)) == 0); unsigned int depthSize{ nPaths / (widthSize*heightSize) }; const unsigned int threadsPerBlockX = THREADS_3D_PER_BLOCK_X; const unsigned int threadsPerBlockY = THREADS_3D_PER_BLOCK_Y; const unsigned int threadsPerBlockZ = THREADS_3D_PER_BLOCK_Z; unsigned int blocksPerGridX = (widthSize + threadsPerBlockX - 1) / threadsPerBlockX; unsigned int blocksPerGridY = (heightSize + threadsPerBlockY - 1) / threadsPerBlockY; unsigned int blocksPerGridZ = (depthSize + threadsPerBlockZ - 1) / threadsPerBlockZ; const dim3 blockSize = dim3(threadsPerBlockX, threadsPerBlockY, threadsPerBlockZ); const dim3 gridSize = dim3(blocksPerGridX, blocksPerGridY, blocksPerGridZ); random_kernel_initializers::initialiseRandomKernel3D << <gridSize, blockSize >> > (time(0), states, widthSize, heightSize, depthSize); switch (scheme) { case FDMScheme::EulerScheme: { kernels::one_factor_kernels::euler_scheme::generatePathsKernel3D<> << <gridSize, blockSize >> > (this->gbm_, d_paths, states, widthSize, heightSize, depthSize, d_times, size); } break; case FDMScheme::MilsteinScheme: { kernels::one_factor_kernels::milstein_scheme::generatePathsKernel3D<> << <gridSize, blockSize >> > (this->gbm_, d_paths, states, widthSize, heightSize, depthSize, d_times, size); } break; } } //===================================================================== //====== equidistant overloads //===================================================================== //fdm_engine_cuda::PathValuesType<fdm_engine_cuda::PathValuesType<double>> std::shared_ptr<path_collector::PathCollector<1, double>> const fdm_engine_cuda::GBMPathEngineDouble::simulate(unsigned int nPaths, unsigned int nSteps, double dt, FDMScheme scheme, GPUConfiguration config)const { double *d_paths = NULL; hiprandState_t *states; // RNG state for each thread // Allocate memory for the paths hipMalloc(&d_paths, nPaths * nSteps * sizeof(double)); // Allocate memory for RNG states hipMalloc(&states, nPaths * sizeof(hiprandState_t)); switch (config) { case GPUConfiguration::Grid1D: { _generate1D(d_paths, states, scheme, nPaths, nSteps, dt); } break; case GPUConfiguration::Grid2D: { _generate2D(d_paths, states, scheme, nPaths, nSteps, dt); } break; case GPUConfiguration::Grid3D: { _generate3D(d_paths, states, scheme, nPaths, nSteps, dt); } break; default: { _generate1D(d_paths, states, scheme, nPaths, nSteps, dt); } break; } // Allocate memory on the host: double *h_paths = (double *)malloc(nPaths*nSteps * sizeof(double)); // Copy from device to host: hipMemcpy(h_paths, d_paths, nPaths*nSteps * sizeof(double), hipMemcpyKind::hipMemcpyDeviceToHost); //std::vector<std::vector<double>> paths(nPaths); //for (std::size_t s = 0; s < paths.size(); ++s) { // std::vector<double> path(nSteps); // for (std::size_t p = 0; p < path.size(); ++p) { // path[p] = std::move(h_paths[s + paths.size()*p]); // } // paths[s] = std::move(path); //} //free(h_paths); // Deallocate memory blocks on device: hipFree(d_paths); hipFree(states); // wrapp the raw pointer into unique_ptr: std::unique_ptr<double> uptr(h_paths); return std::shared_ptr<path_collector::PathCollector<1, double>> (new path_collector::PathCollector<1, double>{ std::move(uptr),nPaths,nSteps }); } //===================================================================== //====== non-equidistant overloads //===================================================================== //fdm_engine_cuda::PathValuesType<fdm_engine_cuda::PathValuesType<double>> std::shared_ptr<path_collector::PathCollector<1, double>> const fdm_engine_cuda::GBMPathEngineDouble::simulate(unsigned int nPaths, TimePointsType<double> const &timePoints, FDMScheme scheme, GPUConfiguration config)const { unsigned int size = timePoints.size(); // Allocate memory on the host: double *h_times = (double*)malloc(size * sizeof(double)); // Copy: for (std::size_t t = 0; t < size;++t) { h_times[t] = timePoints.at(t); } double *d_paths = NULL; double *d_times = NULL; // RNG state for each thread hiprandState_t *states; // Allocate memory for the paths hipMalloc(&d_paths, nPaths * size * sizeof(double)); // Allocate memory for the times hipMalloc(&d_times, size * sizeof(double)); // Allocate memory for RNG states hipMalloc(&states, nPaths * sizeof(hiprandState_t)); // Copy h_times to d_times,i.e. from hosdt to device: hipMemcpy(d_times, h_times, size*sizeof(double), hipMemcpyKind::hipMemcpyHostToDevice); switch (config) { case GPUConfiguration::Grid1D: { _generate1D(d_paths, states, scheme, nPaths, d_times, size); } break; case GPUConfiguration::Grid2D: { _generate2D(d_paths, states, scheme, nPaths, d_times, size); } break; case GPUConfiguration::Grid3D: { _generate3D(d_paths, states, scheme, nPaths, d_times, size); } break; default: { _generate1D(d_paths, states, scheme, nPaths, d_times, size); } break; } // Allocate memory on the host: double *h_paths = (double *)malloc(nPaths*size * sizeof(double)); // Copy from device to host: hipMemcpy(h_paths, d_paths, nPaths*size * sizeof(double), hipMemcpyKind::hipMemcpyDeviceToHost); //std::vector<std::vector<double>> paths(nPaths); //for (std::size_t s = 0; s < paths.size(); ++s) { // std::vector<double> path(size); // for (std::size_t p = 0; p < path.size(); ++p) { // path[p] = std::move(h_paths[s + paths.size()*p]); // } // paths[s] = std::move(path); //} //free(h_paths); free(h_times); hipFree(d_paths); hipFree(d_times); hipFree(states); // wrapp the raw pointer into unique_ptr: std::unique_ptr<double> uptr(h_paths); return std::shared_ptr<path_collector::PathCollector<1, double>> (new path_collector::PathCollector<1, double>{ std::move(uptr),nPaths,size }); } //===================================================================== //====== equidistant overloads //===================================================================== void fdm_engine_cuda::GBMPathEngineFloat::_generate1D(float *d_paths, hiprandState_t *states, FDMScheme scheme, unsigned int nPaths, unsigned int nSteps, float dt) const { // initialise RNG states const unsigned int threadsPerBlock = THREADS_PER_BLOCK; unsigned int blocksPerGrid = (nPaths + threadsPerBlock - 1) / threadsPerBlock; random_kernel_initializers::initialiseRandomKernel1D << <threadsPerBlock, blocksPerGrid >> > (time(0), states, nPaths); switch (scheme) { case FDMScheme::EulerScheme: { kernels::one_factor_kernels::euler_scheme::generatePathsKernel1D<> << <threadsPerBlock, blocksPerGrid >> > (this->gbm_, d_paths, states, nPaths, nSteps, dt); } break; case FDMScheme::MilsteinScheme: { kernels::one_factor_kernels::milstein_scheme::generatePathsKernel1D<><< <threadsPerBlock, blocksPerGrid >> > (this->gbm_, d_paths, states, nPaths, nSteps, dt); } break; } } void fdm_engine_cuda::GBMPathEngineFloat::_generate2D(float *d_paths, hiprandState_t *states, FDMScheme scheme, unsigned int nPaths, unsigned int nSteps, float dt)const { const unsigned int widthSize{ 1000 }; assert((nPaths%widthSize) == 0); unsigned int heightSize{ nPaths / widthSize }; const unsigned int threadsPerBlockX = THREADS_2D_PER_BLOCK_X; const unsigned int threadsPerBlockY = THREADS_2D_PER_BLOCK_Y; unsigned int blocksPerGridX = (widthSize + threadsPerBlockX - 1) / threadsPerBlockX; unsigned int blocksPerGridY = (heightSize + threadsPerBlockY - 1) / threadsPerBlockY; const dim3 blockSize = dim3(threadsPerBlockX, threadsPerBlockY); const dim3 gridSize = dim3(blocksPerGridX, blocksPerGridY); random_kernel_initializers::initialiseRandomKernel2D << <gridSize, blockSize >> > (time(0), states, widthSize, heightSize); switch (scheme) { case FDMScheme::EulerScheme: { kernels::one_factor_kernels::euler_scheme::generatePathsKernel2D<> << <gridSize, blockSize >> > (this->gbm_, d_paths, states, widthSize, heightSize, nSteps, dt); } break; case FDMScheme::MilsteinScheme: { kernels::one_factor_kernels::milstein_scheme::generatePathsKernel2D<> << <gridSize, blockSize >> > (this->gbm_, d_paths, states, widthSize, heightSize, nSteps, dt); } break; } } void fdm_engine_cuda::GBMPathEngineFloat::_generate3D(float *d_paths, hiprandState_t *states, FDMScheme scheme, unsigned int nPaths, unsigned int nSteps, float dt)const { const unsigned int widthSize{ 100 }; const unsigned int heightSize{ 100 }; assert((nPaths % (widthSize*heightSize)) == 0); unsigned int depthSize{ nPaths / (widthSize*heightSize) }; const unsigned int threadsPerBlockX = THREADS_3D_PER_BLOCK_X; const unsigned int threadsPerBlockY = THREADS_3D_PER_BLOCK_Y; const unsigned int threadsPerBlockZ = THREADS_3D_PER_BLOCK_Z; unsigned int blocksPerGridX = (widthSize + threadsPerBlockX - 1) / threadsPerBlockX; unsigned int blocksPerGridY = (heightSize + threadsPerBlockY - 1) / threadsPerBlockY; unsigned int blocksPerGridZ = (depthSize + threadsPerBlockZ - 1) / threadsPerBlockZ; const dim3 blockSize = dim3(threadsPerBlockX, threadsPerBlockY, threadsPerBlockZ); const dim3 gridSize = dim3(blocksPerGridX, blocksPerGridY, blocksPerGridZ); random_kernel_initializers::initialiseRandomKernel3D << <gridSize, blockSize >> > (time(0), states, widthSize, heightSize, depthSize); switch (scheme) { case FDMScheme::EulerScheme: { kernels::one_factor_kernels::euler_scheme::generatePathsKernel3D<> << <gridSize, blockSize >> > (this->gbm_, d_paths, states, widthSize, heightSize, depthSize, nSteps, dt); } break; case FDMScheme::MilsteinScheme: { kernels::one_factor_kernels::milstein_scheme::generatePathsKernel3D<> << <gridSize, blockSize >> > (this->gbm_, d_paths, states, widthSize, heightSize, depthSize, nSteps, dt); } break; } } //===================================================================== //====== non-equidistant overloads //===================================================================== void fdm_engine_cuda::GBMPathEngineFloat::_generate1D(float *d_paths, hiprandState_t *states, FDMScheme scheme, unsigned int nPaths, float const *d_times, unsigned int size) const { // initialise RNG states const unsigned int threadsPerBlock = THREADS_PER_BLOCK; unsigned int blocksPerGrid = (nPaths + threadsPerBlock - 1) / threadsPerBlock; random_kernel_initializers::initialiseRandomKernel1D << <threadsPerBlock, blocksPerGrid >> > (time(0), states, nPaths); switch (scheme) { case FDMScheme::EulerScheme: { kernels::one_factor_kernels::euler_scheme::generatePathsKernel1D<> << <threadsPerBlock, blocksPerGrid >> > (this->gbm_, d_paths, states, nPaths, d_times, size); } break; case FDMScheme::MilsteinScheme: { kernels::one_factor_kernels::milstein_scheme::generatePathsKernel1D<> << <threadsPerBlock, blocksPerGrid >> > (this->gbm_, d_paths, states, nPaths, d_times, size); } break; } } void fdm_engine_cuda::GBMPathEngineFloat::_generate2D(float *d_paths, hiprandState_t *states, FDMScheme scheme, unsigned int nPaths, float const *d_times, unsigned int size)const { const unsigned int widthSize{ 1000 }; assert((nPaths%widthSize) == 0); unsigned int heightSize{ nPaths / widthSize }; const unsigned int threadsPerBlockX = THREADS_2D_PER_BLOCK_X; const unsigned int threadsPerBlockY = THREADS_2D_PER_BLOCK_Y; unsigned int blocksPerGridX = (widthSize + threadsPerBlockX - 1) / threadsPerBlockX; unsigned int blocksPerGridY = (heightSize + threadsPerBlockY - 1) / threadsPerBlockY; const dim3 blockSize = dim3(threadsPerBlockX, threadsPerBlockY); const dim3 gridSize = dim3(blocksPerGridX, blocksPerGridY); random_kernel_initializers::initialiseRandomKernel2D << <gridSize, blockSize >> > (time(0), states, widthSize, heightSize); switch (scheme) { case FDMScheme::EulerScheme: { kernels::one_factor_kernels::euler_scheme::generatePathsKernel2D<> << <gridSize, blockSize >> > (this->gbm_, d_paths, states, widthSize, heightSize, d_times, size); } break; case FDMScheme::MilsteinScheme: { kernels::one_factor_kernels::milstein_scheme::generatePathsKernel2D<> << <gridSize, blockSize >> > (this->gbm_, d_paths, states, widthSize, heightSize, d_times, size); } break; } } void fdm_engine_cuda::GBMPathEngineFloat::_generate3D(float *d_paths, hiprandState_t *states, FDMScheme scheme, unsigned int nPaths, float const *d_times, unsigned int size)const { const unsigned int widthSize{ 100 }; const unsigned int heightSize{ 100 }; assert((nPaths % (widthSize*heightSize)) == 0); unsigned int depthSize{ nPaths / (widthSize*heightSize) }; const unsigned int threadsPerBlockX = THREADS_3D_PER_BLOCK_X; const unsigned int threadsPerBlockY = THREADS_3D_PER_BLOCK_Y; const unsigned int threadsPerBlockZ = THREADS_3D_PER_BLOCK_Z; unsigned int blocksPerGridX = (widthSize + threadsPerBlockX - 1) / threadsPerBlockX; unsigned int blocksPerGridY = (heightSize + threadsPerBlockY - 1) / threadsPerBlockY; unsigned int blocksPerGridZ = (depthSize + threadsPerBlockZ - 1) / threadsPerBlockZ; const dim3 blockSize = dim3(threadsPerBlockX, threadsPerBlockY, threadsPerBlockZ); const dim3 gridSize = dim3(blocksPerGridX, blocksPerGridY, blocksPerGridZ); random_kernel_initializers::initialiseRandomKernel3D << <gridSize, blockSize >> > (time(0), states, widthSize, heightSize, depthSize); switch (scheme) { case FDMScheme::EulerScheme: { kernels::one_factor_kernels::euler_scheme::generatePathsKernel3D<> << <gridSize, blockSize >> > (this->gbm_, d_paths, states, widthSize, heightSize, depthSize, d_times, size); } break; case FDMScheme::MilsteinScheme: { kernels::one_factor_kernels::milstein_scheme::generatePathsKernel3D<> << <gridSize, blockSize >> > (this->gbm_, d_paths, states, widthSize, heightSize, depthSize, d_times, size); } break; } } //===================================================================== //====== equidistant overloads //===================================================================== //fdm_engine_cuda::PathValuesType<fdm_engine_cuda::PathValuesType<float>> std::shared_ptr<path_collector::PathCollector<1, float>> const fdm_engine_cuda::GBMPathEngineFloat::simulate(unsigned int nPaths, unsigned int nSteps, float dt, FDMScheme scheme, GPUConfiguration config)const { float *d_paths = NULL; hiprandState_t *states; // RNG state for each thread // Allocate memory for the paths hipMalloc(&d_paths, nPaths * nSteps * sizeof(float)); // Allocate memory for RNG states hipMalloc(&states, nPaths * sizeof(hiprandState_t)); switch (config) { case GPUConfiguration::Grid1D: { _generate1D(d_paths, states, scheme, nPaths, nSteps, dt); } break; case GPUConfiguration::Grid2D: { _generate2D(d_paths, states, scheme, nPaths, nSteps, dt); } break; case GPUConfiguration::Grid3D: { _generate3D(d_paths, states, scheme, nPaths, nSteps, dt); } break; default: { _generate1D(d_paths, states, scheme, nPaths, nSteps, dt); } break; } // Allocate memory on the host: float *h_paths = (float *)malloc(nPaths*nSteps * sizeof(float)); // Copy from device to host: hipMemcpy(h_paths, d_paths, nPaths*nSteps * sizeof(float), hipMemcpyKind::hipMemcpyDeviceToHost); //std::vector<std::vector<float>> paths(nPaths); //for (std::size_t s = 0; s < paths.size(); ++s) { // std::vector<float> path(nSteps); // for (std::size_t p = 0; p < path.size(); ++p) { // path[p] = std::move(h_paths[s + paths.size()*p]); // } // paths[s] = std::move(path); //} //free(h_paths); hipFree(d_paths); hipFree(states); // wrapp the raw pointer into unique_ptr: std::unique_ptr<float> uptr(h_paths); return std::shared_ptr<path_collector::PathCollector<1, float>> (new path_collector::PathCollector<1, float>{ std::move(uptr),nPaths,nSteps }); } //===================================================================== //====== non-equidistant overloads //===================================================================== //fdm_engine_cuda::PathValuesType<fdm_engine_cuda::PathValuesType<float>> std::shared_ptr<path_collector::PathCollector<1, float>> const fdm_engine_cuda::GBMPathEngineFloat::simulate(unsigned int nPaths, TimePointsType<float> const &timePoints, FDMScheme scheme, GPUConfiguration config)const { unsigned int size = timePoints.size(); // Allocate memory on the host: float *h_times = (float*)malloc(size * sizeof(float)); // Copy: for (std::size_t t = 0; t < size; ++t) { h_times[t] = timePoints.at(t); } float *d_paths = NULL; float *d_times = NULL; // RNG state for each thread hiprandState_t *states; // Allocate memory for the paths hipMalloc(&d_paths, nPaths * size * sizeof(float)); // Allocate memory for the times hipMalloc(&d_times, size * sizeof(float)); // Allocate memory for RNG states hipMalloc(&states, nPaths * sizeof(hiprandState_t)); // Copy h_times to d_times,i.e. from hosdt to device: hipMemcpy(d_times, h_times, size * sizeof(float), hipMemcpyKind::hipMemcpyHostToDevice); switch (config) { case GPUConfiguration::Grid1D: { _generate1D(d_paths, states, scheme, nPaths, d_times, size); } break; case GPUConfiguration::Grid2D: { _generate2D(d_paths, states, scheme, nPaths, d_times, size); } break; case GPUConfiguration::Grid3D: { _generate3D(d_paths, states, scheme, nPaths, d_times, size); } break; default: { _generate1D(d_paths, states, scheme, nPaths, d_times, size); } break; } // Allocate memory on the host: float *h_paths = (float *)malloc(nPaths*size * sizeof(float)); // Copy from device to host: hipMemcpy(h_paths, d_paths, nPaths*size * sizeof(float), hipMemcpyKind::hipMemcpyDeviceToHost); //std::vector<std::vector<float>> paths(nPaths); //for (std::size_t s = 0; s < paths.size(); ++s) { // std::vector<float> path(size); // for (std::size_t p = 0; p < path.size(); ++p) { // path[p] = std::move(h_paths[s + paths.size()*p]); // } // paths[s] = std::move(path); //} //free(h_paths); free(h_times); hipFree(d_paths); hipFree(d_times); hipFree(states); // wrapp the raw pointer into unique_ptr: std::unique_ptr<float> uptr(h_paths); return std::shared_ptr<path_collector::PathCollector<1, float>> (new path_collector::PathCollector<1, float>{ std::move(uptr),nPaths,size }); } }
d86e3f3a20cb63d374a5073228db4bb60ddf6df8.cu
#include<cuda_runtime.h> #include<curand.h> #include<curand_kernel.h> #include<device_launch_parameters.h> #include"sde_builder_cuda.h" #include<cassert> #include"random_kernel_initializers.cuh" #include"mc_types.h" #include"path_collector.h" #include"one_factor_kernels.h" namespace fdm_engine_cuda { using mc_types::FDMScheme; using mc_types::GPUConfiguration; using mc_types::PathValuesType; //===================================================================== //====== equidistant overloads //===================================================================== void fdm_engine_cuda::GBMPathEngineDouble::_generate1D(double *d_paths, curandState_t *states, FDMScheme scheme, unsigned int nPaths, unsigned int nSteps, double dt) const { // initialise RNG states const unsigned int threadsPerBlock = THREADS_PER_BLOCK; unsigned int blocksPerGrid = (nPaths + threadsPerBlock - 1) / threadsPerBlock; random_kernel_initializers::initialiseRandomKernel1D << <threadsPerBlock, blocksPerGrid >> > (time(0), states, nPaths); switch (scheme) { case FDMScheme::EulerScheme: { kernels::one_factor_kernels::euler_scheme:: generatePathsKernel1D<> <<<threadsPerBlock, blocksPerGrid >>>(this->gbm_, d_paths, states, nPaths, nSteps, dt); } break; case FDMScheme::MilsteinScheme: { kernels::one_factor_kernels::milstein_scheme:: generatePathsKernel1D<><< <threadsPerBlock, blocksPerGrid >> > (this->gbm_, d_paths, states, nPaths, nSteps, dt); } break; } } void fdm_engine_cuda::GBMPathEngineDouble::_generate2D(double *d_paths, curandState_t *states, FDMScheme scheme, unsigned int nPaths, unsigned int nSteps, double dt)const { const unsigned int widthSize{ 1000 }; assert((nPaths%widthSize) == 0); unsigned int heightSize{ nPaths / widthSize }; const unsigned int threadsPerBlockX = THREADS_2D_PER_BLOCK_X; const unsigned int threadsPerBlockY = THREADS_2D_PER_BLOCK_Y; unsigned int blocksPerGridX = (widthSize + threadsPerBlockX - 1) / threadsPerBlockX; unsigned int blocksPerGridY = (heightSize + threadsPerBlockY - 1) / threadsPerBlockY; const dim3 blockSize = dim3(threadsPerBlockX, threadsPerBlockY); const dim3 gridSize = dim3(blocksPerGridX, blocksPerGridY); random_kernel_initializers::initialiseRandomKernel2D << <gridSize, blockSize >> > (time(0), states, widthSize, heightSize); switch (scheme) { case FDMScheme::EulerScheme: { kernels::one_factor_kernels::euler_scheme::generatePathsKernel2D<><<<gridSize, blockSize >> > (this->gbm_, d_paths, states, widthSize, heightSize, nSteps, dt); } break; case FDMScheme::MilsteinScheme: { kernels::one_factor_kernels::milstein_scheme::generatePathsKernel2D<><< <gridSize, blockSize >> > (this->gbm_, d_paths, states, widthSize, heightSize, nSteps, dt); } break; } } void fdm_engine_cuda::GBMPathEngineDouble::_generate3D(double *d_paths, curandState_t *states, FDMScheme scheme, unsigned int nPaths, unsigned int nSteps, double dt)const { const unsigned int widthSize{ 100 }; const unsigned int heightSize{ 100 }; assert((nPaths % (widthSize*heightSize)) == 0); unsigned int depthSize{ nPaths / (widthSize*heightSize) }; const unsigned int threadsPerBlockX = THREADS_3D_PER_BLOCK_X; const unsigned int threadsPerBlockY = THREADS_3D_PER_BLOCK_Y; const unsigned int threadsPerBlockZ = THREADS_3D_PER_BLOCK_Z; unsigned int blocksPerGridX = (widthSize + threadsPerBlockX - 1) / threadsPerBlockX; unsigned int blocksPerGridY = (heightSize + threadsPerBlockY - 1) / threadsPerBlockY; unsigned int blocksPerGridZ = (depthSize + threadsPerBlockZ - 1) / threadsPerBlockZ; const dim3 blockSize = dim3(threadsPerBlockX, threadsPerBlockY, threadsPerBlockZ); const dim3 gridSize = dim3(blocksPerGridX, blocksPerGridY, blocksPerGridZ); random_kernel_initializers::initialiseRandomKernel3D << <gridSize, blockSize >> > (time(0), states, widthSize, heightSize, depthSize); switch (scheme) { case FDMScheme::EulerScheme: { kernels::one_factor_kernels::euler_scheme::generatePathsKernel3D<> << <gridSize, blockSize >> > (this->gbm_, d_paths, states, widthSize, heightSize, depthSize, nSteps, dt); } break; case FDMScheme::MilsteinScheme: { kernels::one_factor_kernels::milstein_scheme::generatePathsKernel3D<> << <gridSize, blockSize >> > (this->gbm_, d_paths, states, widthSize, heightSize, depthSize, nSteps, dt); } break; } } //===================================================================== //====== non-equidistant overloads //===================================================================== void fdm_engine_cuda::GBMPathEngineDouble::_generate1D(double *d_paths, curandState_t *states, FDMScheme scheme, unsigned int nPaths, double const *d_times, unsigned int size) const { // initialise RNG states const unsigned int threadsPerBlock = THREADS_PER_BLOCK; unsigned int blocksPerGrid = (nPaths + threadsPerBlock - 1) / threadsPerBlock; random_kernel_initializers::initialiseRandomKernel1D << <threadsPerBlock, blocksPerGrid >> > (time(0), states, nPaths); switch (scheme) { case FDMScheme::EulerScheme: { kernels::one_factor_kernels::euler_scheme::generatePathsKernel1D<> << <threadsPerBlock, blocksPerGrid >> > (this->gbm_, d_paths, states, nPaths, d_times, size); } break; case FDMScheme::MilsteinScheme: { kernels::one_factor_kernels::milstein_scheme::generatePathsKernel1D<> << <threadsPerBlock, blocksPerGrid >> > (this->gbm_, d_paths, states, nPaths, d_times, size); } break; } } void fdm_engine_cuda::GBMPathEngineDouble::_generate2D(double *d_paths, curandState_t *states, FDMScheme scheme, unsigned int nPaths, double const *d_times, unsigned int size)const { const unsigned int widthSize{ 1000 }; assert((nPaths%widthSize) == 0); unsigned int heightSize{ nPaths / widthSize }; const unsigned int threadsPerBlockX = THREADS_2D_PER_BLOCK_X; const unsigned int threadsPerBlockY = THREADS_2D_PER_BLOCK_Y; unsigned int blocksPerGridX = (widthSize + threadsPerBlockX - 1) / threadsPerBlockX; unsigned int blocksPerGridY = (heightSize + threadsPerBlockY - 1) / threadsPerBlockY; const dim3 blockSize = dim3(threadsPerBlockX, threadsPerBlockY); const dim3 gridSize = dim3(blocksPerGridX, blocksPerGridY); random_kernel_initializers::initialiseRandomKernel2D << <gridSize, blockSize >> > (time(0), states, widthSize, heightSize); switch (scheme) { case FDMScheme::EulerScheme: { kernels::one_factor_kernels::euler_scheme::generatePathsKernel2D<> << <gridSize, blockSize >> > (this->gbm_, d_paths, states, widthSize, heightSize, d_times, size); } break; case FDMScheme::MilsteinScheme: { kernels::one_factor_kernels::milstein_scheme::generatePathsKernel2D<> << <gridSize, blockSize >> > (this->gbm_, d_paths, states, widthSize, heightSize, d_times, size); } break; } } void fdm_engine_cuda::GBMPathEngineDouble::_generate3D(double *d_paths, curandState_t *states, FDMScheme scheme, unsigned int nPaths, double const *d_times, unsigned int size)const { const unsigned int widthSize{ 100 }; const unsigned int heightSize{ 100 }; assert((nPaths % (widthSize*heightSize)) == 0); unsigned int depthSize{ nPaths / (widthSize*heightSize) }; const unsigned int threadsPerBlockX = THREADS_3D_PER_BLOCK_X; const unsigned int threadsPerBlockY = THREADS_3D_PER_BLOCK_Y; const unsigned int threadsPerBlockZ = THREADS_3D_PER_BLOCK_Z; unsigned int blocksPerGridX = (widthSize + threadsPerBlockX - 1) / threadsPerBlockX; unsigned int blocksPerGridY = (heightSize + threadsPerBlockY - 1) / threadsPerBlockY; unsigned int blocksPerGridZ = (depthSize + threadsPerBlockZ - 1) / threadsPerBlockZ; const dim3 blockSize = dim3(threadsPerBlockX, threadsPerBlockY, threadsPerBlockZ); const dim3 gridSize = dim3(blocksPerGridX, blocksPerGridY, blocksPerGridZ); random_kernel_initializers::initialiseRandomKernel3D << <gridSize, blockSize >> > (time(0), states, widthSize, heightSize, depthSize); switch (scheme) { case FDMScheme::EulerScheme: { kernels::one_factor_kernels::euler_scheme::generatePathsKernel3D<> << <gridSize, blockSize >> > (this->gbm_, d_paths, states, widthSize, heightSize, depthSize, d_times, size); } break; case FDMScheme::MilsteinScheme: { kernels::one_factor_kernels::milstein_scheme::generatePathsKernel3D<> << <gridSize, blockSize >> > (this->gbm_, d_paths, states, widthSize, heightSize, depthSize, d_times, size); } break; } } //===================================================================== //====== equidistant overloads //===================================================================== //fdm_engine_cuda::PathValuesType<fdm_engine_cuda::PathValuesType<double>> std::shared_ptr<path_collector::PathCollector<1, double>> const fdm_engine_cuda::GBMPathEngineDouble::simulate(unsigned int nPaths, unsigned int nSteps, double dt, FDMScheme scheme, GPUConfiguration config)const { double *d_paths = NULL; curandState_t *states; // RNG state for each thread // Allocate memory for the paths cudaMalloc(&d_paths, nPaths * nSteps * sizeof(double)); // Allocate memory for RNG states cudaMalloc(&states, nPaths * sizeof(curandState_t)); switch (config) { case GPUConfiguration::Grid1D: { _generate1D(d_paths, states, scheme, nPaths, nSteps, dt); } break; case GPUConfiguration::Grid2D: { _generate2D(d_paths, states, scheme, nPaths, nSteps, dt); } break; case GPUConfiguration::Grid3D: { _generate3D(d_paths, states, scheme, nPaths, nSteps, dt); } break; default: { _generate1D(d_paths, states, scheme, nPaths, nSteps, dt); } break; } // Allocate memory on the host: double *h_paths = (double *)malloc(nPaths*nSteps * sizeof(double)); // Copy from device to host: cudaMemcpy(h_paths, d_paths, nPaths*nSteps * sizeof(double), cudaMemcpyKind::cudaMemcpyDeviceToHost); //std::vector<std::vector<double>> paths(nPaths); //for (std::size_t s = 0; s < paths.size(); ++s) { // std::vector<double> path(nSteps); // for (std::size_t p = 0; p < path.size(); ++p) { // path[p] = std::move(h_paths[s + paths.size()*p]); // } // paths[s] = std::move(path); //} //free(h_paths); // Deallocate memory blocks on device: cudaFree(d_paths); cudaFree(states); // wrapp the raw pointer into unique_ptr: std::unique_ptr<double> uptr(h_paths); return std::shared_ptr<path_collector::PathCollector<1, double>> (new path_collector::PathCollector<1, double>{ std::move(uptr),nPaths,nSteps }); } //===================================================================== //====== non-equidistant overloads //===================================================================== //fdm_engine_cuda::PathValuesType<fdm_engine_cuda::PathValuesType<double>> std::shared_ptr<path_collector::PathCollector<1, double>> const fdm_engine_cuda::GBMPathEngineDouble::simulate(unsigned int nPaths, TimePointsType<double> const &timePoints, FDMScheme scheme, GPUConfiguration config)const { unsigned int size = timePoints.size(); // Allocate memory on the host: double *h_times = (double*)malloc(size * sizeof(double)); // Copy: for (std::size_t t = 0; t < size;++t) { h_times[t] = timePoints.at(t); } double *d_paths = NULL; double *d_times = NULL; // RNG state for each thread curandState_t *states; // Allocate memory for the paths cudaMalloc(&d_paths, nPaths * size * sizeof(double)); // Allocate memory for the times cudaMalloc(&d_times, size * sizeof(double)); // Allocate memory for RNG states cudaMalloc(&states, nPaths * sizeof(curandState_t)); // Copy h_times to d_times,i.e. from hosdt to device: cudaMemcpy(d_times, h_times, size*sizeof(double), cudaMemcpyKind::cudaMemcpyHostToDevice); switch (config) { case GPUConfiguration::Grid1D: { _generate1D(d_paths, states, scheme, nPaths, d_times, size); } break; case GPUConfiguration::Grid2D: { _generate2D(d_paths, states, scheme, nPaths, d_times, size); } break; case GPUConfiguration::Grid3D: { _generate3D(d_paths, states, scheme, nPaths, d_times, size); } break; default: { _generate1D(d_paths, states, scheme, nPaths, d_times, size); } break; } // Allocate memory on the host: double *h_paths = (double *)malloc(nPaths*size * sizeof(double)); // Copy from device to host: cudaMemcpy(h_paths, d_paths, nPaths*size * sizeof(double), cudaMemcpyKind::cudaMemcpyDeviceToHost); //std::vector<std::vector<double>> paths(nPaths); //for (std::size_t s = 0; s < paths.size(); ++s) { // std::vector<double> path(size); // for (std::size_t p = 0; p < path.size(); ++p) { // path[p] = std::move(h_paths[s + paths.size()*p]); // } // paths[s] = std::move(path); //} //free(h_paths); free(h_times); cudaFree(d_paths); cudaFree(d_times); cudaFree(states); // wrapp the raw pointer into unique_ptr: std::unique_ptr<double> uptr(h_paths); return std::shared_ptr<path_collector::PathCollector<1, double>> (new path_collector::PathCollector<1, double>{ std::move(uptr),nPaths,size }); } //===================================================================== //====== equidistant overloads //===================================================================== void fdm_engine_cuda::GBMPathEngineFloat::_generate1D(float *d_paths, curandState_t *states, FDMScheme scheme, unsigned int nPaths, unsigned int nSteps, float dt) const { // initialise RNG states const unsigned int threadsPerBlock = THREADS_PER_BLOCK; unsigned int blocksPerGrid = (nPaths + threadsPerBlock - 1) / threadsPerBlock; random_kernel_initializers::initialiseRandomKernel1D << <threadsPerBlock, blocksPerGrid >> > (time(0), states, nPaths); switch (scheme) { case FDMScheme::EulerScheme: { kernels::one_factor_kernels::euler_scheme::generatePathsKernel1D<> << <threadsPerBlock, blocksPerGrid >> > (this->gbm_, d_paths, states, nPaths, nSteps, dt); } break; case FDMScheme::MilsteinScheme: { kernels::one_factor_kernels::milstein_scheme::generatePathsKernel1D<><< <threadsPerBlock, blocksPerGrid >> > (this->gbm_, d_paths, states, nPaths, nSteps, dt); } break; } } void fdm_engine_cuda::GBMPathEngineFloat::_generate2D(float *d_paths, curandState_t *states, FDMScheme scheme, unsigned int nPaths, unsigned int nSteps, float dt)const { const unsigned int widthSize{ 1000 }; assert((nPaths%widthSize) == 0); unsigned int heightSize{ nPaths / widthSize }; const unsigned int threadsPerBlockX = THREADS_2D_PER_BLOCK_X; const unsigned int threadsPerBlockY = THREADS_2D_PER_BLOCK_Y; unsigned int blocksPerGridX = (widthSize + threadsPerBlockX - 1) / threadsPerBlockX; unsigned int blocksPerGridY = (heightSize + threadsPerBlockY - 1) / threadsPerBlockY; const dim3 blockSize = dim3(threadsPerBlockX, threadsPerBlockY); const dim3 gridSize = dim3(blocksPerGridX, blocksPerGridY); random_kernel_initializers::initialiseRandomKernel2D << <gridSize, blockSize >> > (time(0), states, widthSize, heightSize); switch (scheme) { case FDMScheme::EulerScheme: { kernels::one_factor_kernels::euler_scheme::generatePathsKernel2D<> << <gridSize, blockSize >> > (this->gbm_, d_paths, states, widthSize, heightSize, nSteps, dt); } break; case FDMScheme::MilsteinScheme: { kernels::one_factor_kernels::milstein_scheme::generatePathsKernel2D<> << <gridSize, blockSize >> > (this->gbm_, d_paths, states, widthSize, heightSize, nSteps, dt); } break; } } void fdm_engine_cuda::GBMPathEngineFloat::_generate3D(float *d_paths, curandState_t *states, FDMScheme scheme, unsigned int nPaths, unsigned int nSteps, float dt)const { const unsigned int widthSize{ 100 }; const unsigned int heightSize{ 100 }; assert((nPaths % (widthSize*heightSize)) == 0); unsigned int depthSize{ nPaths / (widthSize*heightSize) }; const unsigned int threadsPerBlockX = THREADS_3D_PER_BLOCK_X; const unsigned int threadsPerBlockY = THREADS_3D_PER_BLOCK_Y; const unsigned int threadsPerBlockZ = THREADS_3D_PER_BLOCK_Z; unsigned int blocksPerGridX = (widthSize + threadsPerBlockX - 1) / threadsPerBlockX; unsigned int blocksPerGridY = (heightSize + threadsPerBlockY - 1) / threadsPerBlockY; unsigned int blocksPerGridZ = (depthSize + threadsPerBlockZ - 1) / threadsPerBlockZ; const dim3 blockSize = dim3(threadsPerBlockX, threadsPerBlockY, threadsPerBlockZ); const dim3 gridSize = dim3(blocksPerGridX, blocksPerGridY, blocksPerGridZ); random_kernel_initializers::initialiseRandomKernel3D << <gridSize, blockSize >> > (time(0), states, widthSize, heightSize, depthSize); switch (scheme) { case FDMScheme::EulerScheme: { kernels::one_factor_kernels::euler_scheme::generatePathsKernel3D<> << <gridSize, blockSize >> > (this->gbm_, d_paths, states, widthSize, heightSize, depthSize, nSteps, dt); } break; case FDMScheme::MilsteinScheme: { kernels::one_factor_kernels::milstein_scheme::generatePathsKernel3D<> << <gridSize, blockSize >> > (this->gbm_, d_paths, states, widthSize, heightSize, depthSize, nSteps, dt); } break; } } //===================================================================== //====== non-equidistant overloads //===================================================================== void fdm_engine_cuda::GBMPathEngineFloat::_generate1D(float *d_paths, curandState_t *states, FDMScheme scheme, unsigned int nPaths, float const *d_times, unsigned int size) const { // initialise RNG states const unsigned int threadsPerBlock = THREADS_PER_BLOCK; unsigned int blocksPerGrid = (nPaths + threadsPerBlock - 1) / threadsPerBlock; random_kernel_initializers::initialiseRandomKernel1D << <threadsPerBlock, blocksPerGrid >> > (time(0), states, nPaths); switch (scheme) { case FDMScheme::EulerScheme: { kernels::one_factor_kernels::euler_scheme::generatePathsKernel1D<> << <threadsPerBlock, blocksPerGrid >> > (this->gbm_, d_paths, states, nPaths, d_times, size); } break; case FDMScheme::MilsteinScheme: { kernels::one_factor_kernels::milstein_scheme::generatePathsKernel1D<> << <threadsPerBlock, blocksPerGrid >> > (this->gbm_, d_paths, states, nPaths, d_times, size); } break; } } void fdm_engine_cuda::GBMPathEngineFloat::_generate2D(float *d_paths, curandState_t *states, FDMScheme scheme, unsigned int nPaths, float const *d_times, unsigned int size)const { const unsigned int widthSize{ 1000 }; assert((nPaths%widthSize) == 0); unsigned int heightSize{ nPaths / widthSize }; const unsigned int threadsPerBlockX = THREADS_2D_PER_BLOCK_X; const unsigned int threadsPerBlockY = THREADS_2D_PER_BLOCK_Y; unsigned int blocksPerGridX = (widthSize + threadsPerBlockX - 1) / threadsPerBlockX; unsigned int blocksPerGridY = (heightSize + threadsPerBlockY - 1) / threadsPerBlockY; const dim3 blockSize = dim3(threadsPerBlockX, threadsPerBlockY); const dim3 gridSize = dim3(blocksPerGridX, blocksPerGridY); random_kernel_initializers::initialiseRandomKernel2D << <gridSize, blockSize >> > (time(0), states, widthSize, heightSize); switch (scheme) { case FDMScheme::EulerScheme: { kernels::one_factor_kernels::euler_scheme::generatePathsKernel2D<> << <gridSize, blockSize >> > (this->gbm_, d_paths, states, widthSize, heightSize, d_times, size); } break; case FDMScheme::MilsteinScheme: { kernels::one_factor_kernels::milstein_scheme::generatePathsKernel2D<> << <gridSize, blockSize >> > (this->gbm_, d_paths, states, widthSize, heightSize, d_times, size); } break; } } void fdm_engine_cuda::GBMPathEngineFloat::_generate3D(float *d_paths, curandState_t *states, FDMScheme scheme, unsigned int nPaths, float const *d_times, unsigned int size)const { const unsigned int widthSize{ 100 }; const unsigned int heightSize{ 100 }; assert((nPaths % (widthSize*heightSize)) == 0); unsigned int depthSize{ nPaths / (widthSize*heightSize) }; const unsigned int threadsPerBlockX = THREADS_3D_PER_BLOCK_X; const unsigned int threadsPerBlockY = THREADS_3D_PER_BLOCK_Y; const unsigned int threadsPerBlockZ = THREADS_3D_PER_BLOCK_Z; unsigned int blocksPerGridX = (widthSize + threadsPerBlockX - 1) / threadsPerBlockX; unsigned int blocksPerGridY = (heightSize + threadsPerBlockY - 1) / threadsPerBlockY; unsigned int blocksPerGridZ = (depthSize + threadsPerBlockZ - 1) / threadsPerBlockZ; const dim3 blockSize = dim3(threadsPerBlockX, threadsPerBlockY, threadsPerBlockZ); const dim3 gridSize = dim3(blocksPerGridX, blocksPerGridY, blocksPerGridZ); random_kernel_initializers::initialiseRandomKernel3D << <gridSize, blockSize >> > (time(0), states, widthSize, heightSize, depthSize); switch (scheme) { case FDMScheme::EulerScheme: { kernels::one_factor_kernels::euler_scheme::generatePathsKernel3D<> << <gridSize, blockSize >> > (this->gbm_, d_paths, states, widthSize, heightSize, depthSize, d_times, size); } break; case FDMScheme::MilsteinScheme: { kernels::one_factor_kernels::milstein_scheme::generatePathsKernel3D<> << <gridSize, blockSize >> > (this->gbm_, d_paths, states, widthSize, heightSize, depthSize, d_times, size); } break; } } //===================================================================== //====== equidistant overloads //===================================================================== //fdm_engine_cuda::PathValuesType<fdm_engine_cuda::PathValuesType<float>> std::shared_ptr<path_collector::PathCollector<1, float>> const fdm_engine_cuda::GBMPathEngineFloat::simulate(unsigned int nPaths, unsigned int nSteps, float dt, FDMScheme scheme, GPUConfiguration config)const { float *d_paths = NULL; curandState_t *states; // RNG state for each thread // Allocate memory for the paths cudaMalloc(&d_paths, nPaths * nSteps * sizeof(float)); // Allocate memory for RNG states cudaMalloc(&states, nPaths * sizeof(curandState_t)); switch (config) { case GPUConfiguration::Grid1D: { _generate1D(d_paths, states, scheme, nPaths, nSteps, dt); } break; case GPUConfiguration::Grid2D: { _generate2D(d_paths, states, scheme, nPaths, nSteps, dt); } break; case GPUConfiguration::Grid3D: { _generate3D(d_paths, states, scheme, nPaths, nSteps, dt); } break; default: { _generate1D(d_paths, states, scheme, nPaths, nSteps, dt); } break; } // Allocate memory on the host: float *h_paths = (float *)malloc(nPaths*nSteps * sizeof(float)); // Copy from device to host: cudaMemcpy(h_paths, d_paths, nPaths*nSteps * sizeof(float), cudaMemcpyKind::cudaMemcpyDeviceToHost); //std::vector<std::vector<float>> paths(nPaths); //for (std::size_t s = 0; s < paths.size(); ++s) { // std::vector<float> path(nSteps); // for (std::size_t p = 0; p < path.size(); ++p) { // path[p] = std::move(h_paths[s + paths.size()*p]); // } // paths[s] = std::move(path); //} //free(h_paths); cudaFree(d_paths); cudaFree(states); // wrapp the raw pointer into unique_ptr: std::unique_ptr<float> uptr(h_paths); return std::shared_ptr<path_collector::PathCollector<1, float>> (new path_collector::PathCollector<1, float>{ std::move(uptr),nPaths,nSteps }); } //===================================================================== //====== non-equidistant overloads //===================================================================== //fdm_engine_cuda::PathValuesType<fdm_engine_cuda::PathValuesType<float>> std::shared_ptr<path_collector::PathCollector<1, float>> const fdm_engine_cuda::GBMPathEngineFloat::simulate(unsigned int nPaths, TimePointsType<float> const &timePoints, FDMScheme scheme, GPUConfiguration config)const { unsigned int size = timePoints.size(); // Allocate memory on the host: float *h_times = (float*)malloc(size * sizeof(float)); // Copy: for (std::size_t t = 0; t < size; ++t) { h_times[t] = timePoints.at(t); } float *d_paths = NULL; float *d_times = NULL; // RNG state for each thread curandState_t *states; // Allocate memory for the paths cudaMalloc(&d_paths, nPaths * size * sizeof(float)); // Allocate memory for the times cudaMalloc(&d_times, size * sizeof(float)); // Allocate memory for RNG states cudaMalloc(&states, nPaths * sizeof(curandState_t)); // Copy h_times to d_times,i.e. from hosdt to device: cudaMemcpy(d_times, h_times, size * sizeof(float), cudaMemcpyKind::cudaMemcpyHostToDevice); switch (config) { case GPUConfiguration::Grid1D: { _generate1D(d_paths, states, scheme, nPaths, d_times, size); } break; case GPUConfiguration::Grid2D: { _generate2D(d_paths, states, scheme, nPaths, d_times, size); } break; case GPUConfiguration::Grid3D: { _generate3D(d_paths, states, scheme, nPaths, d_times, size); } break; default: { _generate1D(d_paths, states, scheme, nPaths, d_times, size); } break; } // Allocate memory on the host: float *h_paths = (float *)malloc(nPaths*size * sizeof(float)); // Copy from device to host: cudaMemcpy(h_paths, d_paths, nPaths*size * sizeof(float), cudaMemcpyKind::cudaMemcpyDeviceToHost); //std::vector<std::vector<float>> paths(nPaths); //for (std::size_t s = 0; s < paths.size(); ++s) { // std::vector<float> path(size); // for (std::size_t p = 0; p < path.size(); ++p) { // path[p] = std::move(h_paths[s + paths.size()*p]); // } // paths[s] = std::move(path); //} //free(h_paths); free(h_times); cudaFree(d_paths); cudaFree(d_times); cudaFree(states); // wrapp the raw pointer into unique_ptr: std::unique_ptr<float> uptr(h_paths); return std::shared_ptr<path_collector::PathCollector<1, float>> (new path_collector::PathCollector<1, float>{ std::move(uptr),nPaths,size }); } }
fa8bf97c80be13dc10be949368676aa2f2503e86.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <time.h> #include "hip/hip_runtime.h" #include "device_launch_parameters.h" __global__ void mem_trs_test(int * input){ int gid = blockIdx.x * blockDim.x + threadIdx.x; printf("tid = %d, gid = %d, value = %d\n", threadIdx.x, gid, input[gid]); } __global__ void mem_trs_test2(int * input, int size){ int gid = blockIdx.x * blockDim.x + threadIdx.x; if (gid < size) { printf("tid = %d, gid = %d, value = %d\n", threadIdx.x, gid, input[gid]); } } int main(int argc, char ** argv) { int size = 150; int byte_size = size * sizeof(int); int *h_input; h_input = (int *)malloc(byte_size); time_t t; srand((unsigned)time(&t)); for (int i=0; i<size; i++) { h_input[i] = (int) (rand() & 0xff); } int * d_input; hipMalloc((void**)&d_input, byte_size); hipMemcpy(d_input, h_input, byte_size, hipMemcpyHostToDevice); dim3 block(32); dim3 grid(5); hipLaunchKernelGGL(( mem_trs_test2) , dim3(grid), dim3(block), 0, 0, d_input, size); hipDeviceSynchronize(); hipDeviceReset(); return 0; }
fa8bf97c80be13dc10be949368676aa2f2503e86.cu
#include <stdio.h> #include <stdlib.h> #include <time.h> #include "cuda_runtime.h" #include "device_launch_parameters.h" __global__ void mem_trs_test(int * input){ int gid = blockIdx.x * blockDim.x + threadIdx.x; printf("tid = %d, gid = %d, value = %d\n", threadIdx.x, gid, input[gid]); } __global__ void mem_trs_test2(int * input, int size){ int gid = blockIdx.x * blockDim.x + threadIdx.x; if (gid < size) { printf("tid = %d, gid = %d, value = %d\n", threadIdx.x, gid, input[gid]); } } int main(int argc, char ** argv) { int size = 150; int byte_size = size * sizeof(int); int *h_input; h_input = (int *)malloc(byte_size); time_t t; srand((unsigned)time(&t)); for (int i=0; i<size; i++) { h_input[i] = (int) (rand() & 0xff); } int * d_input; cudaMalloc((void**)&d_input, byte_size); cudaMemcpy(d_input, h_input, byte_size, cudaMemcpyHostToDevice); dim3 block(32); dim3 grid(5); mem_trs_test2 <<<grid, block>>>(d_input, size); cudaDeviceSynchronize(); cudaDeviceReset(); return 0; }
a00559fa2423b3fb5241d0a4b75fa32849bbb88b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/native/TensorAdvancedIndexing.h> #include <ATen/native/TensorTransformations.h> // flip #include <type_traits> #include <ATen/ATen.h> #include <ATen/Dispatch.h> #include <ATen/native/TensorIterator.h> #include <ATen/core/Array.h> #include <ATen/hip/HIPApplyUtils.cuh> #include <ATen/hip/HIPContext.h> #include <ATen/hip/cub.cuh> #include <ATen/hip/detail/IndexUtils.cuh> #include <ATen/hip/detail/OffsetCalculator.cuh> #include <ATen/ExpandUtils.h> #include <ATen/MemoryOverlap.h> #include <ATen/native/hip/Loops.cuh> #include <ATen/native/hip/KernelUtils.cuh> #include <c10/util/MaybeOwned.h> #include <THH/THHTensorInfo.cuh> namespace at { namespace native { static constexpr int launch_bound2 = 4; static constexpr int launch_size_nd = 128; template<int nt, int vt, typename func_t> C10_LAUNCH_BOUNDS_2(nt, launch_bound2) __global__ void index_elementwise_kernel(int N, func_t f) { int tid = threadIdx.x; int nv = nt * vt; int idx = nv * blockIdx.x + tid; #pragma unroll for (int i = 0; i < vt; i++) { if (idx < N) { f(idx); idx += nt; } } } template<int nt, int vt, typename func_t> static void launch_kernel(int64_t N, const func_t& f) { TORCH_INTERNAL_ASSERT(N >= 0 && N <= std::numeric_limits<int32_t>::max()); if (N == 0) { return; } dim3 block(nt); dim3 grid((N + block.x * vt - 1) / (block.x * vt)); auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); hipLaunchKernelGGL(( index_elementwise_kernel<nt, vt, func_t>), dim3(grid), dim3(block), 0, stream, N, f); C10_HIP_KERNEL_LAUNCH_CHECK(); } template <typename func_t> void gpu_index_kernel(TensorIterator& iter, IntArrayRef index_size, IntArrayRef index_stride, const func_t& f) { int num_indices = index_size.size(); AT_ASSERT(num_indices == index_stride.size()); AT_ASSERT(num_indices == iter.ntensors() - 2); if (iter.numel() == 0) { return; } if (!iter.can_use_32bit_indexing()) { for (auto& sub_iter : iter.with_32bit_indexing()) { gpu_index_kernel(sub_iter, index_size, index_stride, f); } return; } auto sizes = at::detail::Array<int64_t, 25>(0); auto strides = at::detail::Array<int64_t, 25>(0); auto index_ptrs = at::detail::Array<char*, 25>(nullptr); for (int i = 0; i < num_indices; i++) { sizes[i] = index_size[i]; strides[i] = index_stride[i]; index_ptrs[i] = (char*)iter.data_ptr(i + 2); } char* out_ptr = (char*)iter.data_ptr(0); char* in_ptr = (char*)iter.data_ptr(1); auto offset_calc = make_offset_calculator<3>(iter); launch_kernel<launch_size_nd, launch_bound2>(iter.numel(), [=]__device__(int idx) { auto offsets = offset_calc.get(idx); char* out_data = out_ptr + offsets[0]; char* in_data = in_ptr + offsets[1]; int64_t offset = 0; #pragma unroll for (int i = 0; i < num_indices; i++) { int64_t index = *(int64_t*)(index_ptrs[i] + offsets[2]); CUDA_KERNEL_ASSERT(index >= -sizes[i] && index < sizes[i] && "index out of bounds"); if (index < 0) { index += sizes[i]; } offset += index * strides[i]; } f(out_data, in_data, offset); }); } // The kernels are templated on an opaque, self-aligned type of the correct // size to avoid redundant kernels for different types of the same size. template <int N> struct alignas(N) OpaqueType { char data[N]; }; template <typename scalar_t> void index_fill_kernel_impl( TensorIterator& iter, int64_t dim, int64_t self_dim_size, int64_t self_dim_stride, scalar_t fill_val) { if (0 == iter.numel()) { return; } if (!iter.can_use_32bit_indexing()) { for (auto& sub_iter : iter.with_32bit_indexing()) { index_fill_kernel_impl(sub_iter, dim, self_dim_size, self_dim_stride, fill_val); } return; } char* __restrict__ self_ptr = reinterpret_cast<char*>(iter.data_ptr(0)); char* __restrict__ idx_ptr = reinterpret_cast<char*>(iter.data_ptr(1)); auto offset_calc = make_offset_calculator<2>(iter); auto loop = [=]C10_DEVICE(int i) { auto offsets = offset_calc.get(i); auto* __restrict__ self_data = reinterpret_cast<scalar_t*>(self_ptr + offsets[0]); auto idx = *reinterpret_cast<int64_t*>(idx_ptr + offsets[1]); CUDA_KERNEL_ASSERT(idx >= -self_dim_size && idx < self_dim_size && "index out of bounds"); if (idx < 0) { idx += self_dim_size; } self_data[idx * self_dim_stride] = fill_val; }; launch_kernel<launch_size_nd, launch_bound2>(iter.numel(), loop); } template <typename scalar_t> void index_copy_kernel_impl( TensorIterator& iter, int64_t dim, int64_t self_dim_size, int64_t self_dim_stride) { if (iter.numel() == 0) { return; } if (!iter.can_use_32bit_indexing()) { for (auto& sub_iter : iter.with_32bit_indexing()) { index_copy_kernel_impl<scalar_t>(sub_iter, dim, self_dim_size, self_dim_stride); } return; } char* __restrict__ self_ptr = reinterpret_cast<char*>(iter.data_ptr(0)); char* __restrict__ idx_ptr = reinterpret_cast<char*>(iter.data_ptr(1)); char* __restrict__ source_ptr = reinterpret_cast<char*>(iter.data_ptr(2)); auto offset_calc = make_offset_calculator<3>(iter); auto loop = [=]C10_DEVICE(int i) { auto offsets = offset_calc.get(i); auto* __restrict__ self_data = reinterpret_cast<scalar_t*>(self_ptr + offsets[0]); auto idx = *reinterpret_cast<int64_t*>(idx_ptr + offsets[1]); auto* __restrict__ source_data = reinterpret_cast<scalar_t*>(source_ptr + offsets[2]); CUDA_KERNEL_ASSERT(idx >= 0 && idx < self_dim_size && "index_copy_(): index out of bounds"); self_data[idx * self_dim_stride] = *source_data; }; launch_kernel<launch_size_nd, launch_bound2>(iter.numel(), loop); } template <typename scalar_t> void index_kernel_impl(TensorIterator& iter, IntArrayRef index_size, IntArrayRef index_stride) { gpu_index_kernel(iter, index_size, index_stride, []C10_DEVICE(char* out_data, char* in_data, int64_t offset) { *(scalar_t*)out_data = *(scalar_t*)(in_data + offset); }); } template <typename scalar_t> void index_put_kernel_impl(TensorIterator& iter, IntArrayRef index_size, IntArrayRef index_stride) { gpu_index_kernel(iter, index_size, index_stride, []C10_DEVICE(char* out_data, char* in_data, int64_t offset) { *(scalar_t*)(out_data + offset) = *(scalar_t*)in_data; }); } static void index_kernel(TensorIterator& iter, IntArrayRef index_size, IntArrayRef index_stride) { AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16, iter.dtype(), "index_cuda", [&] { using dtype = OpaqueType<sizeof(scalar_t)>; index_kernel_impl<dtype>(iter, index_size, index_stride); }); } static void index_fill_kernel( TensorIterator& iter, int64_t dim, int64_t self_dim_size, int64_t self_dim_stride, const Scalar& source) { AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3( at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16, iter.dtype(), "index_fill_cuda", [&] { using dtype = OpaqueType<sizeof(scalar_t)>; auto fill_val = source.to<scalar_t>(); auto fill_val_opaque = *reinterpret_cast<dtype*>(&fill_val); index_fill_kernel_impl<dtype>(iter, dim, self_dim_size, self_dim_stride, fill_val_opaque); }); } static void index_copy_kernel( TensorIterator& iter, int64_t dim, int64_t self_dim_size, int64_t self_dim_stride) { // See note [Writing Nondeterministic Operations] // Nondeterministic when index contains duplicate entries // this kernel will not be called when torch.use_deterministic_algorithms(True) AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3( at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16, iter.dtype(), "index_copy_cuda", [&] { using dtype = OpaqueType<sizeof(scalar_t)>; index_copy_kernel_impl<dtype>(iter, dim, self_dim_size, self_dim_stride); }); } static void index_put_kernel(TensorIterator& iter, IntArrayRef index_size, IntArrayRef index_stride, bool accumulate) { TORCH_CHECK(!accumulate, "index_put does not support accumulate=true"); AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16, iter.dtype(), "index_put", [&] { using dtype = OpaqueType<sizeof(scalar_t)>; index_put_kernel_impl<dtype>(iter, index_size, index_stride); }); } static Tensor & masked_select_out_cuda_impl(Tensor & result, const Tensor & self, const Tensor & mask) { NoNamesGuard guard; TORCH_CHECK(mask.scalar_type() == ScalarType::Byte || mask.scalar_type() == ScalarType::Bool, "masked_select: expected BoolTensor or ByteTensor for mask"); TORCH_CHECK(self.scalar_type() == result.scalar_type(), "masked_select(): self and result must have the same scalar type"); auto mask_temp = (mask.dim() == 0) ? c10::MaybeOwned<Tensor>::owned(mask.unsqueeze(0)) : c10::MaybeOwned<Tensor>::borrowed(mask); auto self_temp = (self.dim() == 0) ? c10::MaybeOwned<Tensor>::owned(self.unsqueeze(0)) : c10::MaybeOwned<Tensor>::borrowed(self); // Cannot reassign to mask_temp and self_temp here! if they are // owning and expand_outplace returns a borrow, the returned borrow // would dangle. auto mask_self_expanded = expand_outplace(*mask_temp, *self_temp); at::native::index_out(result, *std::get<1>(mask_self_expanded), c10::List<c10::optional<at::Tensor>>({*std::get<0>(std::move(mask_self_expanded))})); return result; } Tensor masked_select_cuda(const Tensor & self, const Tensor & mask) { namedinference::compute_broadcast_outnames(self, mask); Tensor result = at::empty({0}, self.options()); return masked_select_out_cuda_impl(result, self, mask); } Tensor & masked_select_out_cuda(const Tensor & self, const Tensor & mask, Tensor & result) { namedinference::compute_broadcast_outnames(self, mask); return masked_select_out_cuda_impl(result, self, mask); } template <typename scalar_t, typename index_t, typename func_t> void cuda_take_put_kernel( TensorIterator& iter, const Tensor& indexed, const func_t& f) { if (!iter.can_use_32bit_indexing()) { for (auto& sub_iter : iter.with_32bit_indexing()) { cuda_take_put_kernel<scalar_t, index_t>(sub_iter, indexed, f); } return; } const auto numel = indexed.numel(); const bool is_contiguous = indexed.is_contiguous(); char* __restrict__ iterated_ptr = reinterpret_cast<char*>(iter.data_ptr(0)); char* __restrict__ idx_ptr = reinterpret_cast<char*>(iter.data_ptr(1)); const auto offset_calc = make_offset_calculator<2>(iter); using uindex_t = std::make_unsigned_t<index_t>; // OffsetCalculator needs the sizes and strides reveresed const auto indexed_sizes = std::vector<int64_t>(indexed.sizes().rbegin(), indexed.sizes().rend()); const auto indexed_strides = std::vector<int64_t>(indexed.strides().rbegin(), indexed.strides().rend()); const auto* indexed_strides_data = indexed_strides.data(); const auto offset_indexed = OffsetCalculator<1, uindex_t>(indexed.dim(), indexed_sizes.data(), &indexed_strides_data); auto loop = [=]C10_DEVICE(int i) { auto offsets = offset_calc.get(i); auto& iterated = *reinterpret_cast<scalar_t*>(iterated_ptr + offsets[0]); const auto idx = *reinterpret_cast<int64_t*>(idx_ptr + offsets[1]); CUDA_KERNEL_ASSERT(idx < numel && idx >= -numel && "cuda_take_put_kernel() index out of bounds"); index_t offset = static_cast<index_t>(idx); if (offset < 0) { offset += numel; } if (!is_contiguous) { offset = offset_indexed.get(offset)[0]; } f(iterated, offset); }; launch_kernel<launch_size_nd, launch_bound2>(iter.numel(), loop); } void put_kernel(TensorIterator& iter, const Tensor& output, const bool accumulate) { AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16, iter.dtype(), "put_cuda", [&] { // Cannot use `OpaqueType`, as we need the actual type for `fastSpecializedgpuAtomicAdd` AT_DISPATCH_INDEX_TYPES(cuda::detail::canUse32BitIndexMath(output) ? ScalarType::Int : ScalarType::Long, "put_cuda_index", [&] { auto* __restrict__ indexed_ptr = output.template data<scalar_t>(); if (accumulate) { const auto numel = output.numel(); cuda_take_put_kernel<scalar_t, index_t>(iter, output, [numel, indexed_ptr] __device__(scalar_t& iterated, const index_t offset) { fastSpecializedAtomicAdd(indexed_ptr, offset, numel, iterated); }); } else { cuda_take_put_kernel<scalar_t, index_t>(iter, output, [indexed_ptr] __device__(scalar_t& iterated, const index_t offset) { indexed_ptr[offset] = iterated; }); } }); }); } void take_kernel( TensorIterator& iter, const Tensor& input) { AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16, iter.dtype(), "take_cuda", [&] { // Cannot use `OpaqueType`, as Tensor::data_ptr<OpaqueType<N>> is not implemented AT_DISPATCH_INDEX_TYPES(cuda::detail::canUse32BitIndexMath(input) ? ScalarType::Int : ScalarType::Long, "take_cuda_index", [&] { const auto* __restrict__ indexed_ptr = input.template data<scalar_t>(); cuda_take_put_kernel<scalar_t, index_t>(iter, input, [indexed_ptr] __device__(scalar_t& iterated, const index_t offset) { iterated = indexed_ptr[offset]; }); }); }); } namespace { __global__ void masked_scatter_size_check(int64_t *totalElements, int64_t srcSize) { CUDA_KERNEL_ASSERT(*totalElements <= srcSize); } template <typename mask_t> void masked_scatter_cuda_impl(Tensor& self, const Tensor& mask, const Tensor& source){ auto srcSize = source.numel(); if (self.numel() == 0) { return; } auto mask_cont = mask.contiguous(); // Use a prefix sum to determine the output locations of the masked elements auto maskPrefixSum = at::empty_like(mask_cont, mask.options().dtype(kLong)); at::cuda::cub::exclusive_scan( mask_cont.data_ptr<mask_t>(), maskPrefixSum.data_ptr<int64_t>(), []__device__(int64_t a, int64_t b) { return a + b; }, int64_t(0), mask_cont.numel()); // Determine our output size auto totalElements = (at::_unsafe_view(maskPrefixSum, -1)[-1] + at::_unsafe_view(mask_cont, -1)[-1]); // Asynchronously check that the number of `1` elements present in the mask // must be <= the number of elements available in `src`. hipLaunchKernelGGL(( masked_scatter_size_check), dim3(1), dim3(1), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), totalElements.data_ptr<int64_t>(), srcSize); C10_HIP_KERNEL_LAUNCH_CHECK(); // We are getting elements from `src` based on an offset from // `maskPrefixSum`, so that should be made contiguous too auto source_contig = source.contiguous(); auto iter = TensorIteratorConfig() .set_check_mem_overlap(false) .check_all_same_dtype(false) .resize_outputs(false) .add_output(self) .add_input(self) .add_input(mask_cont) .add_input(maskPrefixSum) .build(); AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3( ScalarType::Bool, ScalarType::BFloat16, ScalarType::Half, self.scalar_type(), "masked_scatter_", [&]() { auto source_ptr = source_contig.data_ptr<scalar_t>(); gpu_kernel( iter, [=] GPU_LAMBDA(scalar_t a, mask_t mask, int64_t maskPrefixSum) -> scalar_t { if (mask) { return source_ptr[maskPrefixSum]; } return a; }); hipGetLastError(); }); } } // anonymous namespace Tensor & masked_scatter__cuda(Tensor& self, const Tensor& mask, const Tensor& source) { at::assert_no_internal_overlap(self); TORCH_CHECK( self.scalar_type() == source.scalar_type(), "masked_scatter: expected self and source to have same dtypes but got", self.scalar_type(), " and ", source.scalar_type()); c10::MaybeOwned<Tensor> b_mask = expand_inplace(self, mask, "masked_scatter_"); if (b_mask->dtype() == ScalarType::Byte) { TORCH_WARN("masked_scatter_ received a mask with dtype torch.uint8, this behavior is now deprecated," \ "please use a mask with dtype torch.bool instead."); } auto mask_dtype = b_mask->scalar_type(); if (mask_dtype == ScalarType::Bool) { masked_scatter_cuda_impl<bool>(self, *b_mask, source); } else { masked_scatter_cuda_impl<uint8_t>(self, *b_mask, source); } return self; } template <typename scalar_t> void flip_kernel_impl(TensorIterator& iter) { if (!iter.can_use_32bit_indexing()) { for (auto& sub_iter : iter.with_32bit_indexing()) { flip_kernel_impl<scalar_t>(sub_iter); } return; } char* const __restrict__ out_ptr = reinterpret_cast<char*>(iter.data_ptr(0)); const char* const __restrict__ in_ptr = reinterpret_cast<const char*>(iter.data_ptr(1)); const auto offset_calc = make_offset_calculator<2, /*signed_strides=*/true>(iter); auto loop = [=]C10_DEVICE(const int i) { const auto offsets = offset_calc.get(i); // offsets can be negative here, but it's fine scalar_t* const __restrict__ out_data = reinterpret_cast<scalar_t*>(out_ptr + offsets[0]); const scalar_t* const __restrict__ in_data = reinterpret_cast<const scalar_t*>(in_ptr + offsets[1]); *out_data = *in_data; }; launch_kernel<launch_size_nd, launch_bound2>(iter.numel(), loop); } void flip_kernel(TensorIterator& iter, const bool quantized) { if (quantized) { AT_DISPATCH_QINT_AND_SUB_BYTE_TYPES(iter.dtype(), "flip_quantized_cuda", [&] { using dtype = OpaqueType<sizeof(scalar_t)>; flip_kernel_impl<dtype>(iter); }); } else { AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16, iter.dtype(), "flip_cuda", [&] { using dtype = OpaqueType<sizeof(scalar_t)>; flip_kernel_impl<dtype>(iter); }); } } REGISTER_DISPATCH(index_stub, &index_kernel); REGISTER_DISPATCH(index_fill_stub, &index_fill_kernel); REGISTER_DISPATCH(index_copy_stub, &index_copy_kernel); REGISTER_DISPATCH(index_put_stub, &index_put_kernel); REGISTER_DISPATCH(put_stub, &put_kernel); REGISTER_DISPATCH(take_stub, &take_kernel); REGISTER_DISPATCH(flip_stub, &flip_kernel); }} // namespace at::native
a00559fa2423b3fb5241d0a4b75fa32849bbb88b.cu
#include <ATen/native/TensorAdvancedIndexing.h> #include <ATen/native/TensorTransformations.h> // flip #include <type_traits> #include <ATen/ATen.h> #include <ATen/Dispatch.h> #include <ATen/native/TensorIterator.h> #include <ATen/core/Array.h> #include <ATen/cuda/CUDAApplyUtils.cuh> #include <ATen/cuda/CUDAContext.h> #include <ATen/cuda/cub.cuh> #include <ATen/cuda/detail/IndexUtils.cuh> #include <ATen/cuda/detail/OffsetCalculator.cuh> #include <ATen/ExpandUtils.h> #include <ATen/MemoryOverlap.h> #include <ATen/native/cuda/Loops.cuh> #include <ATen/native/cuda/KernelUtils.cuh> #include <c10/util/MaybeOwned.h> #include <THC/THCTensorInfo.cuh> namespace at { namespace native { static constexpr int launch_bound2 = 4; static constexpr int launch_size_nd = 128; template<int nt, int vt, typename func_t> C10_LAUNCH_BOUNDS_2(nt, launch_bound2) __global__ void index_elementwise_kernel(int N, func_t f) { int tid = threadIdx.x; int nv = nt * vt; int idx = nv * blockIdx.x + tid; #pragma unroll for (int i = 0; i < vt; i++) { if (idx < N) { f(idx); idx += nt; } } } template<int nt, int vt, typename func_t> static void launch_kernel(int64_t N, const func_t& f) { TORCH_INTERNAL_ASSERT(N >= 0 && N <= std::numeric_limits<int32_t>::max()); if (N == 0) { return; } dim3 block(nt); dim3 grid((N + block.x * vt - 1) / (block.x * vt)); auto stream = at::cuda::getCurrentCUDAStream(); index_elementwise_kernel<nt, vt, func_t><<<grid, block, 0, stream>>>(N, f); C10_CUDA_KERNEL_LAUNCH_CHECK(); } template <typename func_t> void gpu_index_kernel(TensorIterator& iter, IntArrayRef index_size, IntArrayRef index_stride, const func_t& f) { int num_indices = index_size.size(); AT_ASSERT(num_indices == index_stride.size()); AT_ASSERT(num_indices == iter.ntensors() - 2); if (iter.numel() == 0) { return; } if (!iter.can_use_32bit_indexing()) { for (auto& sub_iter : iter.with_32bit_indexing()) { gpu_index_kernel(sub_iter, index_size, index_stride, f); } return; } auto sizes = at::detail::Array<int64_t, 25>(0); auto strides = at::detail::Array<int64_t, 25>(0); auto index_ptrs = at::detail::Array<char*, 25>(nullptr); for (int i = 0; i < num_indices; i++) { sizes[i] = index_size[i]; strides[i] = index_stride[i]; index_ptrs[i] = (char*)iter.data_ptr(i + 2); } char* out_ptr = (char*)iter.data_ptr(0); char* in_ptr = (char*)iter.data_ptr(1); auto offset_calc = make_offset_calculator<3>(iter); launch_kernel<launch_size_nd, launch_bound2>(iter.numel(), [=]__device__(int idx) { auto offsets = offset_calc.get(idx); char* out_data = out_ptr + offsets[0]; char* in_data = in_ptr + offsets[1]; int64_t offset = 0; #pragma unroll for (int i = 0; i < num_indices; i++) { int64_t index = *(int64_t*)(index_ptrs[i] + offsets[2]); CUDA_KERNEL_ASSERT(index >= -sizes[i] && index < sizes[i] && "index out of bounds"); if (index < 0) { index += sizes[i]; } offset += index * strides[i]; } f(out_data, in_data, offset); }); } // The kernels are templated on an opaque, self-aligned type of the correct // size to avoid redundant kernels for different types of the same size. template <int N> struct alignas(N) OpaqueType { char data[N]; }; template <typename scalar_t> void index_fill_kernel_impl( TensorIterator& iter, int64_t dim, int64_t self_dim_size, int64_t self_dim_stride, scalar_t fill_val) { if (0 == iter.numel()) { return; } if (!iter.can_use_32bit_indexing()) { for (auto& sub_iter : iter.with_32bit_indexing()) { index_fill_kernel_impl(sub_iter, dim, self_dim_size, self_dim_stride, fill_val); } return; } char* __restrict__ self_ptr = reinterpret_cast<char*>(iter.data_ptr(0)); char* __restrict__ idx_ptr = reinterpret_cast<char*>(iter.data_ptr(1)); auto offset_calc = make_offset_calculator<2>(iter); auto loop = [=]C10_DEVICE(int i) { auto offsets = offset_calc.get(i); auto* __restrict__ self_data = reinterpret_cast<scalar_t*>(self_ptr + offsets[0]); auto idx = *reinterpret_cast<int64_t*>(idx_ptr + offsets[1]); CUDA_KERNEL_ASSERT(idx >= -self_dim_size && idx < self_dim_size && "index out of bounds"); if (idx < 0) { idx += self_dim_size; } self_data[idx * self_dim_stride] = fill_val; }; launch_kernel<launch_size_nd, launch_bound2>(iter.numel(), loop); } template <typename scalar_t> void index_copy_kernel_impl( TensorIterator& iter, int64_t dim, int64_t self_dim_size, int64_t self_dim_stride) { if (iter.numel() == 0) { return; } if (!iter.can_use_32bit_indexing()) { for (auto& sub_iter : iter.with_32bit_indexing()) { index_copy_kernel_impl<scalar_t>(sub_iter, dim, self_dim_size, self_dim_stride); } return; } char* __restrict__ self_ptr = reinterpret_cast<char*>(iter.data_ptr(0)); char* __restrict__ idx_ptr = reinterpret_cast<char*>(iter.data_ptr(1)); char* __restrict__ source_ptr = reinterpret_cast<char*>(iter.data_ptr(2)); auto offset_calc = make_offset_calculator<3>(iter); auto loop = [=]C10_DEVICE(int i) { auto offsets = offset_calc.get(i); auto* __restrict__ self_data = reinterpret_cast<scalar_t*>(self_ptr + offsets[0]); auto idx = *reinterpret_cast<int64_t*>(idx_ptr + offsets[1]); auto* __restrict__ source_data = reinterpret_cast<scalar_t*>(source_ptr + offsets[2]); CUDA_KERNEL_ASSERT(idx >= 0 && idx < self_dim_size && "index_copy_(): index out of bounds"); self_data[idx * self_dim_stride] = *source_data; }; launch_kernel<launch_size_nd, launch_bound2>(iter.numel(), loop); } template <typename scalar_t> void index_kernel_impl(TensorIterator& iter, IntArrayRef index_size, IntArrayRef index_stride) { gpu_index_kernel(iter, index_size, index_stride, []C10_DEVICE(char* out_data, char* in_data, int64_t offset) { *(scalar_t*)out_data = *(scalar_t*)(in_data + offset); }); } template <typename scalar_t> void index_put_kernel_impl(TensorIterator& iter, IntArrayRef index_size, IntArrayRef index_stride) { gpu_index_kernel(iter, index_size, index_stride, []C10_DEVICE(char* out_data, char* in_data, int64_t offset) { *(scalar_t*)(out_data + offset) = *(scalar_t*)in_data; }); } static void index_kernel(TensorIterator& iter, IntArrayRef index_size, IntArrayRef index_stride) { AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16, iter.dtype(), "index_cuda", [&] { using dtype = OpaqueType<sizeof(scalar_t)>; index_kernel_impl<dtype>(iter, index_size, index_stride); }); } static void index_fill_kernel( TensorIterator& iter, int64_t dim, int64_t self_dim_size, int64_t self_dim_stride, const Scalar& source) { AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3( at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16, iter.dtype(), "index_fill_cuda", [&] { using dtype = OpaqueType<sizeof(scalar_t)>; auto fill_val = source.to<scalar_t>(); auto fill_val_opaque = *reinterpret_cast<dtype*>(&fill_val); index_fill_kernel_impl<dtype>(iter, dim, self_dim_size, self_dim_stride, fill_val_opaque); }); } static void index_copy_kernel( TensorIterator& iter, int64_t dim, int64_t self_dim_size, int64_t self_dim_stride) { // See note [Writing Nondeterministic Operations] // Nondeterministic when index contains duplicate entries // this kernel will not be called when torch.use_deterministic_algorithms(True) AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3( at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16, iter.dtype(), "index_copy_cuda", [&] { using dtype = OpaqueType<sizeof(scalar_t)>; index_copy_kernel_impl<dtype>(iter, dim, self_dim_size, self_dim_stride); }); } static void index_put_kernel(TensorIterator& iter, IntArrayRef index_size, IntArrayRef index_stride, bool accumulate) { TORCH_CHECK(!accumulate, "index_put does not support accumulate=true"); AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16, iter.dtype(), "index_put", [&] { using dtype = OpaqueType<sizeof(scalar_t)>; index_put_kernel_impl<dtype>(iter, index_size, index_stride); }); } static Tensor & masked_select_out_cuda_impl(Tensor & result, const Tensor & self, const Tensor & mask) { NoNamesGuard guard; TORCH_CHECK(mask.scalar_type() == ScalarType::Byte || mask.scalar_type() == ScalarType::Bool, "masked_select: expected BoolTensor or ByteTensor for mask"); TORCH_CHECK(self.scalar_type() == result.scalar_type(), "masked_select(): self and result must have the same scalar type"); auto mask_temp = (mask.dim() == 0) ? c10::MaybeOwned<Tensor>::owned(mask.unsqueeze(0)) : c10::MaybeOwned<Tensor>::borrowed(mask); auto self_temp = (self.dim() == 0) ? c10::MaybeOwned<Tensor>::owned(self.unsqueeze(0)) : c10::MaybeOwned<Tensor>::borrowed(self); // Cannot reassign to mask_temp and self_temp here! if they are // owning and expand_outplace returns a borrow, the returned borrow // would dangle. auto mask_self_expanded = expand_outplace(*mask_temp, *self_temp); at::native::index_out(result, *std::get<1>(mask_self_expanded), c10::List<c10::optional<at::Tensor>>({*std::get<0>(std::move(mask_self_expanded))})); return result; } Tensor masked_select_cuda(const Tensor & self, const Tensor & mask) { namedinference::compute_broadcast_outnames(self, mask); Tensor result = at::empty({0}, self.options()); return masked_select_out_cuda_impl(result, self, mask); } Tensor & masked_select_out_cuda(const Tensor & self, const Tensor & mask, Tensor & result) { namedinference::compute_broadcast_outnames(self, mask); return masked_select_out_cuda_impl(result, self, mask); } template <typename scalar_t, typename index_t, typename func_t> void cuda_take_put_kernel( TensorIterator& iter, const Tensor& indexed, const func_t& f) { if (!iter.can_use_32bit_indexing()) { for (auto& sub_iter : iter.with_32bit_indexing()) { cuda_take_put_kernel<scalar_t, index_t>(sub_iter, indexed, f); } return; } const auto numel = indexed.numel(); const bool is_contiguous = indexed.is_contiguous(); char* __restrict__ iterated_ptr = reinterpret_cast<char*>(iter.data_ptr(0)); char* __restrict__ idx_ptr = reinterpret_cast<char*>(iter.data_ptr(1)); const auto offset_calc = make_offset_calculator<2>(iter); using uindex_t = std::make_unsigned_t<index_t>; // OffsetCalculator needs the sizes and strides reveresed const auto indexed_sizes = std::vector<int64_t>(indexed.sizes().rbegin(), indexed.sizes().rend()); const auto indexed_strides = std::vector<int64_t>(indexed.strides().rbegin(), indexed.strides().rend()); const auto* indexed_strides_data = indexed_strides.data(); const auto offset_indexed = OffsetCalculator<1, uindex_t>(indexed.dim(), indexed_sizes.data(), &indexed_strides_data); auto loop = [=]C10_DEVICE(int i) { auto offsets = offset_calc.get(i); auto& iterated = *reinterpret_cast<scalar_t*>(iterated_ptr + offsets[0]); const auto idx = *reinterpret_cast<int64_t*>(idx_ptr + offsets[1]); CUDA_KERNEL_ASSERT(idx < numel && idx >= -numel && "cuda_take_put_kernel() index out of bounds"); index_t offset = static_cast<index_t>(idx); if (offset < 0) { offset += numel; } if (!is_contiguous) { offset = offset_indexed.get(offset)[0]; } f(iterated, offset); }; launch_kernel<launch_size_nd, launch_bound2>(iter.numel(), loop); } void put_kernel(TensorIterator& iter, const Tensor& output, const bool accumulate) { AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16, iter.dtype(), "put_cuda", [&] { // Cannot use `OpaqueType`, as we need the actual type for `fastSpecializedgpuAtomicAdd` AT_DISPATCH_INDEX_TYPES(cuda::detail::canUse32BitIndexMath(output) ? ScalarType::Int : ScalarType::Long, "put_cuda_index", [&] { auto* __restrict__ indexed_ptr = output.template data<scalar_t>(); if (accumulate) { const auto numel = output.numel(); cuda_take_put_kernel<scalar_t, index_t>(iter, output, [numel, indexed_ptr] __device__(scalar_t& iterated, const index_t offset) { fastSpecializedAtomicAdd(indexed_ptr, offset, numel, iterated); }); } else { cuda_take_put_kernel<scalar_t, index_t>(iter, output, [indexed_ptr] __device__(scalar_t& iterated, const index_t offset) { indexed_ptr[offset] = iterated; }); } }); }); } void take_kernel( TensorIterator& iter, const Tensor& input) { AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16, iter.dtype(), "take_cuda", [&] { // Cannot use `OpaqueType`, as Tensor::data_ptr<OpaqueType<N>> is not implemented AT_DISPATCH_INDEX_TYPES(cuda::detail::canUse32BitIndexMath(input) ? ScalarType::Int : ScalarType::Long, "take_cuda_index", [&] { const auto* __restrict__ indexed_ptr = input.template data<scalar_t>(); cuda_take_put_kernel<scalar_t, index_t>(iter, input, [indexed_ptr] __device__(scalar_t& iterated, const index_t offset) { iterated = indexed_ptr[offset]; }); }); }); } namespace { __global__ void masked_scatter_size_check(int64_t *totalElements, int64_t srcSize) { CUDA_KERNEL_ASSERT(*totalElements <= srcSize); } template <typename mask_t> void masked_scatter_cuda_impl(Tensor& self, const Tensor& mask, const Tensor& source){ auto srcSize = source.numel(); if (self.numel() == 0) { return; } auto mask_cont = mask.contiguous(); // Use a prefix sum to determine the output locations of the masked elements auto maskPrefixSum = at::empty_like(mask_cont, mask.options().dtype(kLong)); at::cuda::cub::exclusive_scan( mask_cont.data_ptr<mask_t>(), maskPrefixSum.data_ptr<int64_t>(), []__device__(int64_t a, int64_t b) { return a + b; }, int64_t(0), mask_cont.numel()); // Determine our output size auto totalElements = (at::_unsafe_view(maskPrefixSum, -1)[-1] + at::_unsafe_view(mask_cont, -1)[-1]); // Asynchronously check that the number of `1` elements present in the mask // must be <= the number of elements available in `src`. masked_scatter_size_check<<<1, 1, 0, at::cuda::getCurrentCUDAStream()>>>( totalElements.data_ptr<int64_t>(), srcSize); C10_CUDA_KERNEL_LAUNCH_CHECK(); // We are getting elements from `src` based on an offset from // `maskPrefixSum`, so that should be made contiguous too auto source_contig = source.contiguous(); auto iter = TensorIteratorConfig() .set_check_mem_overlap(false) .check_all_same_dtype(false) .resize_outputs(false) .add_output(self) .add_input(self) .add_input(mask_cont) .add_input(maskPrefixSum) .build(); AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3( ScalarType::Bool, ScalarType::BFloat16, ScalarType::Half, self.scalar_type(), "masked_scatter_", [&]() { auto source_ptr = source_contig.data_ptr<scalar_t>(); gpu_kernel( iter, [=] GPU_LAMBDA(scalar_t a, mask_t mask, int64_t maskPrefixSum) -> scalar_t { if (mask) { return source_ptr[maskPrefixSum]; } return a; }); cudaGetLastError(); }); } } // anonymous namespace Tensor & masked_scatter__cuda(Tensor& self, const Tensor& mask, const Tensor& source) { at::assert_no_internal_overlap(self); TORCH_CHECK( self.scalar_type() == source.scalar_type(), "masked_scatter: expected self and source to have same dtypes but got", self.scalar_type(), " and ", source.scalar_type()); c10::MaybeOwned<Tensor> b_mask = expand_inplace(self, mask, "masked_scatter_"); if (b_mask->dtype() == ScalarType::Byte) { TORCH_WARN("masked_scatter_ received a mask with dtype torch.uint8, this behavior is now deprecated," \ "please use a mask with dtype torch.bool instead."); } auto mask_dtype = b_mask->scalar_type(); if (mask_dtype == ScalarType::Bool) { masked_scatter_cuda_impl<bool>(self, *b_mask, source); } else { masked_scatter_cuda_impl<uint8_t>(self, *b_mask, source); } return self; } template <typename scalar_t> void flip_kernel_impl(TensorIterator& iter) { if (!iter.can_use_32bit_indexing()) { for (auto& sub_iter : iter.with_32bit_indexing()) { flip_kernel_impl<scalar_t>(sub_iter); } return; } char* const __restrict__ out_ptr = reinterpret_cast<char*>(iter.data_ptr(0)); const char* const __restrict__ in_ptr = reinterpret_cast<const char*>(iter.data_ptr(1)); const auto offset_calc = make_offset_calculator<2, /*signed_strides=*/true>(iter); auto loop = [=]C10_DEVICE(const int i) { const auto offsets = offset_calc.get(i); // offsets can be negative here, but it's fine scalar_t* const __restrict__ out_data = reinterpret_cast<scalar_t*>(out_ptr + offsets[0]); const scalar_t* const __restrict__ in_data = reinterpret_cast<const scalar_t*>(in_ptr + offsets[1]); *out_data = *in_data; }; launch_kernel<launch_size_nd, launch_bound2>(iter.numel(), loop); } void flip_kernel(TensorIterator& iter, const bool quantized) { if (quantized) { AT_DISPATCH_QINT_AND_SUB_BYTE_TYPES(iter.dtype(), "flip_quantized_cuda", [&] { using dtype = OpaqueType<sizeof(scalar_t)>; flip_kernel_impl<dtype>(iter); }); } else { AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16, iter.dtype(), "flip_cuda", [&] { using dtype = OpaqueType<sizeof(scalar_t)>; flip_kernel_impl<dtype>(iter); }); } } REGISTER_DISPATCH(index_stub, &index_kernel); REGISTER_DISPATCH(index_fill_stub, &index_fill_kernel); REGISTER_DISPATCH(index_copy_stub, &index_copy_kernel); REGISTER_DISPATCH(index_put_stub, &index_put_kernel); REGISTER_DISPATCH(put_stub, &put_kernel); REGISTER_DISPATCH(take_stub, &take_kernel); REGISTER_DISPATCH(flip_stub, &flip_kernel); }} // namespace at::native
abfafb24bcec695c333083f17386f3aa35793c76.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2017 Sony Corporation. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <nbla/array.hpp> #include <nbla/cuda/common.hpp> #include <nbla/cuda/function/gather_nd.hpp> #include <nbla/cuda/utils/atomic_add.cuh> #include <nbla/utils/nd_index.hpp> #include <nbla/variable.hpp> namespace nbla { namespace gather_nd_cuda { template <typename T> __global__ void forward(const int y_size, T *y_data, const int x_size, const T *x_data, const int *x_shape, const int *x_stride, const int *idx_data, const int idx_rows, const int idx_cols) { NBLA_CUDA_KERNEL_LOOP(tid, y_size) { auto slice_length = y_size / idx_cols; auto index_column = tid / slice_length; auto x_offset = tid - index_column * slice_length; for (int m = 0; m < idx_rows; m++) { auto index = idx_data[m * idx_cols + index_column]; x_offset += (index < 0 ? x_shape[m] + index : index) * x_stride[m]; } // The idx_data comes from a Variable that may be different at any forward // call. Unlike the CPU code we do not want to check the error in device // code (that would imply raising a trap plus always synchronization and // costly recovery). Still we don't want to read from unaccessible memory. if (x_offset < x_size) { y_data[tid] = x_data[x_offset]; } } } template <typename T> __global__ void backward(const int y_size, const T *y_grad, const int x_size, T *x_grad, const int *x_shape, const int *x_stride, const int *idx_data, const int idx_rows, const int idx_cols) { NBLA_CUDA_KERNEL_LOOP(tid, y_size) { auto slice_length = y_size / idx_cols; auto index_column = tid / slice_length; auto x_offset = tid - index_column * slice_length; for (int m = 0; m < idx_rows; m++) { auto index = idx_data[m * idx_cols + index_column]; x_offset += (index < 0 ? x_shape[m] + index : index) * x_stride[m]; } if (x_offset < x_size) { atomic_add(&x_grad[x_offset], y_grad[tid]); } } } template <typename T> __global__ void accum_grad(const int size, const int *idx, const T *y_grad, T *x_grad) { NBLA_CUDA_KERNEL_LOOP(i, size) { atomic_add(x_grad + idx[i], y_grad[i]); } } } template <typename T> void GatherNdCuda<T>::setup_impl(const Variables &inputs, const Variables &outputs) { GatherNd<T>::setup_impl(inputs, outputs); Shape_t src_meta_shape = {2 * inputs[0]->ndim()}; src_meta_.reshape(src_meta_shape, true); Context cpu_ctx{{"cpu:float"}, "CpuCachedArray", "0"}; auto ptr = src_meta_.cast_data_and_get_pointer<int>(cpu_ctx, true); for (auto s : inputs[0]->shape()) { *ptr++ = static_cast<int>(s); } for (auto s : inputs[0]->strides()) { *ptr++ = static_cast<int>(s); } } template <typename T> void GatherNdCuda<T>::forward_impl(const Variables &inputs, const Variables &outputs) { cuda_set_device(this->device_); auto src = inputs[0]->get_data_pointer<Tcu>(this->ctx_); auto idx = inputs[1]->get_data_pointer<int>(this->ctx_); auto dst = outputs[0]->cast_data_and_get_pointer<Tcu>(this->ctx_, true); auto idx_rows = static_cast<int>(inputs[1]->shape().at(0)); auto idx_cols = static_cast<int>(ndi::inner_size(inputs[1]->shape(), 1)); auto src_shape_ptr = src_meta_.get_data_pointer<int>(this->ctx_); auto src_stride_ptr = src_shape_ptr + inputs[0]->ndim(); NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(gather_nd_cuda::forward, outputs[0]->size(), dst, inputs[0]->size(), src, src_shape_ptr, src_stride_ptr, idx, idx_rows, idx_cols); } template <typename T> void GatherNdCuda<T>::backward_impl(const Variables &inputs, const Variables &outputs, const vector<bool> &propagate_down, const vector<bool> &accum) { if (!propagate_down[0]) { return; } cuda_set_device(this->device_); if (!accum[0]) { inputs[0]->grad()->zero(); } auto g_y = outputs[0]->get_grad_pointer<Tcu>(this->ctx_); auto g_x = inputs[0]->cast_grad_and_get_pointer<Tcu>(this->ctx_, false); auto idx = inputs[1]->get_data_pointer<int>(this->ctx_); auto idx_rows = static_cast<int>(inputs[1]->shape().at(0)); auto idx_cols = static_cast<int>(ndi::inner_size(inputs[1]->shape(), 1)); auto x_shape_ptr = src_meta_.get_data_pointer<int>(this->ctx_); auto x_stride_ptr = x_shape_ptr + inputs[0]->ndim(); NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(gather_nd_cuda::backward, outputs[0]->size(), g_y, inputs[0]->size(), g_x, x_shape_ptr, x_stride_ptr, idx, idx_rows, idx_cols); } }
abfafb24bcec695c333083f17386f3aa35793c76.cu
// Copyright (c) 2017 Sony Corporation. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <nbla/array.hpp> #include <nbla/cuda/common.hpp> #include <nbla/cuda/function/gather_nd.hpp> #include <nbla/cuda/utils/atomic_add.cuh> #include <nbla/utils/nd_index.hpp> #include <nbla/variable.hpp> namespace nbla { namespace gather_nd_cuda { template <typename T> __global__ void forward(const int y_size, T *y_data, const int x_size, const T *x_data, const int *x_shape, const int *x_stride, const int *idx_data, const int idx_rows, const int idx_cols) { NBLA_CUDA_KERNEL_LOOP(tid, y_size) { auto slice_length = y_size / idx_cols; auto index_column = tid / slice_length; auto x_offset = tid - index_column * slice_length; for (int m = 0; m < idx_rows; m++) { auto index = idx_data[m * idx_cols + index_column]; x_offset += (index < 0 ? x_shape[m] + index : index) * x_stride[m]; } // The idx_data comes from a Variable that may be different at any forward // call. Unlike the CPU code we do not want to check the error in device // code (that would imply raising a trap plus always synchronization and // costly recovery). Still we don't want to read from unaccessible memory. if (x_offset < x_size) { y_data[tid] = x_data[x_offset]; } } } template <typename T> __global__ void backward(const int y_size, const T *y_grad, const int x_size, T *x_grad, const int *x_shape, const int *x_stride, const int *idx_data, const int idx_rows, const int idx_cols) { NBLA_CUDA_KERNEL_LOOP(tid, y_size) { auto slice_length = y_size / idx_cols; auto index_column = tid / slice_length; auto x_offset = tid - index_column * slice_length; for (int m = 0; m < idx_rows; m++) { auto index = idx_data[m * idx_cols + index_column]; x_offset += (index < 0 ? x_shape[m] + index : index) * x_stride[m]; } if (x_offset < x_size) { atomic_add(&x_grad[x_offset], y_grad[tid]); } } } template <typename T> __global__ void accum_grad(const int size, const int *idx, const T *y_grad, T *x_grad) { NBLA_CUDA_KERNEL_LOOP(i, size) { atomic_add(x_grad + idx[i], y_grad[i]); } } } template <typename T> void GatherNdCuda<T>::setup_impl(const Variables &inputs, const Variables &outputs) { GatherNd<T>::setup_impl(inputs, outputs); Shape_t src_meta_shape = {2 * inputs[0]->ndim()}; src_meta_.reshape(src_meta_shape, true); Context cpu_ctx{{"cpu:float"}, "CpuCachedArray", "0"}; auto ptr = src_meta_.cast_data_and_get_pointer<int>(cpu_ctx, true); for (auto s : inputs[0]->shape()) { *ptr++ = static_cast<int>(s); } for (auto s : inputs[0]->strides()) { *ptr++ = static_cast<int>(s); } } template <typename T> void GatherNdCuda<T>::forward_impl(const Variables &inputs, const Variables &outputs) { cuda_set_device(this->device_); auto src = inputs[0]->get_data_pointer<Tcu>(this->ctx_); auto idx = inputs[1]->get_data_pointer<int>(this->ctx_); auto dst = outputs[0]->cast_data_and_get_pointer<Tcu>(this->ctx_, true); auto idx_rows = static_cast<int>(inputs[1]->shape().at(0)); auto idx_cols = static_cast<int>(ndi::inner_size(inputs[1]->shape(), 1)); auto src_shape_ptr = src_meta_.get_data_pointer<int>(this->ctx_); auto src_stride_ptr = src_shape_ptr + inputs[0]->ndim(); NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(gather_nd_cuda::forward, outputs[0]->size(), dst, inputs[0]->size(), src, src_shape_ptr, src_stride_ptr, idx, idx_rows, idx_cols); } template <typename T> void GatherNdCuda<T>::backward_impl(const Variables &inputs, const Variables &outputs, const vector<bool> &propagate_down, const vector<bool> &accum) { if (!propagate_down[0]) { return; } cuda_set_device(this->device_); if (!accum[0]) { inputs[0]->grad()->zero(); } auto g_y = outputs[0]->get_grad_pointer<Tcu>(this->ctx_); auto g_x = inputs[0]->cast_grad_and_get_pointer<Tcu>(this->ctx_, false); auto idx = inputs[1]->get_data_pointer<int>(this->ctx_); auto idx_rows = static_cast<int>(inputs[1]->shape().at(0)); auto idx_cols = static_cast<int>(ndi::inner_size(inputs[1]->shape(), 1)); auto x_shape_ptr = src_meta_.get_data_pointer<int>(this->ctx_); auto x_stride_ptr = x_shape_ptr + inputs[0]->ndim(); NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(gather_nd_cuda::backward, outputs[0]->size(), g_y, inputs[0]->size(), g_x, x_shape_ptr, x_stride_ptr, idx, idx_rows, idx_cols); } }
811fdfa339a78de47d51c2f805ce6dca285dc98f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by ops.py // __constant__ int xdim0_update_halo_kernel2_zvel_plus_4_bot; int xdim0_update_halo_kernel2_zvel_plus_4_bot_h = -1; __constant__ int ydim0_update_halo_kernel2_zvel_plus_4_bot; int ydim0_update_halo_kernel2_zvel_plus_4_bot_h = -1; __constant__ int xdim1_update_halo_kernel2_zvel_plus_4_bot; int xdim1_update_halo_kernel2_zvel_plus_4_bot_h = -1; __constant__ int ydim1_update_halo_kernel2_zvel_plus_4_bot; int ydim1_update_halo_kernel2_zvel_plus_4_bot_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #define OPS_ACC0(x, y, z) \ (x + xdim0_update_halo_kernel2_zvel_plus_4_bot * (y) + \ xdim0_update_halo_kernel2_zvel_plus_4_bot * \ ydim0_update_halo_kernel2_zvel_plus_4_bot * (z)) #define OPS_ACC1(x, y, z) \ (x + xdim1_update_halo_kernel2_zvel_plus_4_bot * (y) + \ xdim1_update_halo_kernel2_zvel_plus_4_bot * \ ydim1_update_halo_kernel2_zvel_plus_4_bot * (z)) // user function __device__ inline void update_halo_kernel2_zvel_plus_4_bot(double *zvel0, double *zvel1, const int *fields) { if (fields[FIELD_ZVEL0] == 1) zvel0[OPS_ACC0(0, 0, 0)] = zvel0[OPS_ACC0(0, 4, 0)]; if (fields[FIELD_ZVEL1] == 1) zvel1[OPS_ACC1(0, 0, 0)] = zvel1[OPS_ACC1(0, 4, 0)]; } #undef OPS_ACC0 #undef OPS_ACC1 __global__ void ops_update_halo_kernel2_zvel_plus_4_bot( double *__restrict arg0, double *__restrict arg1, const int *__restrict arg2, int size0, int size1, int size2) { int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_update_halo_kernel2_zvel_plus_4_bot + idx_z * 1 * 1 * xdim0_update_halo_kernel2_zvel_plus_4_bot * ydim0_update_halo_kernel2_zvel_plus_4_bot; arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_update_halo_kernel2_zvel_plus_4_bot + idx_z * 1 * 1 * xdim1_update_halo_kernel2_zvel_plus_4_bot * ydim1_update_halo_kernel2_zvel_plus_4_bot; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { update_halo_kernel2_zvel_plus_4_bot(arg0, arg1, arg2); } } // host stub function void ops_par_loop_update_halo_kernel2_zvel_plus_4_bot(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { // Timing double t1, t2, c1, c2; ops_arg args[3] = {arg0, arg1, arg2}; #ifdef CHECKPOINTING if (!ops_checkpointing_before(args, 3, range, 93)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(93, "update_halo_kernel2_zvel_plus_4_bot"); OPS_kernels[93].count++; ops_timers_core(&c1, &t1); } // compute locally allocated range for the sub-block int start[3]; int end[3]; #ifdef OPS_MPI sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for (int n = 0; n < 3; n++) { start[n] = sb->decomp_disp[n]; end[n] = sb->decomp_disp[n] + sb->decomp_size[n]; if (start[n] >= range[2 * n]) { start[n] = 0; } else { start[n] = range[2 * n] - start[n]; } if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0) start[n] = range[2 * n]; if (end[n] >= range[2 * n + 1]) { end[n] = range[2 * n + 1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n] == MPI_PROC_NULL && (range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n])) end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]); } #else for (int n = 0; n < 3; n++) { start[n] = range[2 * n]; end[n] = range[2 * n + 1]; } #endif int x_size = MAX(0, end[0] - start[0]); int y_size = MAX(0, end[1] - start[1]); int z_size = MAX(0, end[2] - start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; if (xdim0 != xdim0_update_halo_kernel2_zvel_plus_4_bot_h || ydim0 != ydim0_update_halo_kernel2_zvel_plus_4_bot_h || xdim1 != xdim1_update_halo_kernel2_zvel_plus_4_bot_h || ydim1 != ydim1_update_halo_kernel2_zvel_plus_4_bot_h) { hipMemcpyToSymbol(xdim0_update_halo_kernel2_zvel_plus_4_bot, &xdim0, sizeof(int)); xdim0_update_halo_kernel2_zvel_plus_4_bot_h = xdim0; hipMemcpyToSymbol(ydim0_update_halo_kernel2_zvel_plus_4_bot, &ydim0, sizeof(int)); ydim0_update_halo_kernel2_zvel_plus_4_bot_h = ydim0; hipMemcpyToSymbol(xdim1_update_halo_kernel2_zvel_plus_4_bot, &xdim1, sizeof(int)); xdim1_update_halo_kernel2_zvel_plus_4_bot_h = xdim1; hipMemcpyToSymbol(ydim1_update_halo_kernel2_zvel_plus_4_bot, &ydim1, sizeof(int)); ydim1_update_halo_kernel2_zvel_plus_4_bot_h = ydim1; } int *arg2h = (int *)arg2.data; dim3 grid((x_size - 1) / OPS_block_size_x + 1, (y_size - 1) / OPS_block_size_y + 1, z_size); dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1); int consts_bytes = 0; consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg2.data = OPS_consts_h + consts_bytes; arg2.data_d = OPS_consts_d + consts_bytes; for (int d = 0; d < NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d]; consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int)); mvConstArraysToDevice(consts_bytes); int dat0 = args[0].dat->elem_size; int dat1 = args[1].dat->elem_size; char *p_a[3]; // set up initial pointers int d_m[OPS_MAX_DIM]; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d]; #endif int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]); base0 = base0 + dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]); base0 = base0 + dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] - d_m[2]); p_a[0] = (char *)args[0].data_d + base0; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d]; #endif int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]); base1 = base1 + dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]); base1 = base1 + dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] - d_m[2]); p_a[1] = (char *)args[1].data_d + base1; ops_H_D_exchanges_device(args, 3); ops_halo_exchanges(args, 3, range); if (OPS_diags > 1) { ops_timers_core(&c2, &t2); OPS_kernels[93].mpi_time += t2 - t1; } // call kernel wrapper function, passing in pointers to data hipLaunchKernelGGL(( ops_update_halo_kernel2_zvel_plus_4_bot), dim3(grid), dim3(tblock), 0, 0, (double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size, z_size); if (OPS_diags > 1) { cutilSafeCall(hipDeviceSynchronize()); ops_timers_core(&c1, &t1); OPS_kernels[93].time += t1 - t2; } ops_set_dirtybit_device(args, 3); ops_set_halo_dirtybit3(&args[0], range); ops_set_halo_dirtybit3(&args[1], range); if (OPS_diags > 1) { // Update kernel record ops_timers_core(&c2, &t2); OPS_kernels[93].mpi_time += t2 - t1; OPS_kernels[93].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[93].transfer += ops_compute_transfer(dim, start, end, &arg1); } }
811fdfa339a78de47d51c2f805ce6dca285dc98f.cu
// // auto-generated by ops.py // __constant__ int xdim0_update_halo_kernel2_zvel_plus_4_bot; int xdim0_update_halo_kernel2_zvel_plus_4_bot_h = -1; __constant__ int ydim0_update_halo_kernel2_zvel_plus_4_bot; int ydim0_update_halo_kernel2_zvel_plus_4_bot_h = -1; __constant__ int xdim1_update_halo_kernel2_zvel_plus_4_bot; int xdim1_update_halo_kernel2_zvel_plus_4_bot_h = -1; __constant__ int ydim1_update_halo_kernel2_zvel_plus_4_bot; int ydim1_update_halo_kernel2_zvel_plus_4_bot_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #define OPS_ACC0(x, y, z) \ (x + xdim0_update_halo_kernel2_zvel_plus_4_bot * (y) + \ xdim0_update_halo_kernel2_zvel_plus_4_bot * \ ydim0_update_halo_kernel2_zvel_plus_4_bot * (z)) #define OPS_ACC1(x, y, z) \ (x + xdim1_update_halo_kernel2_zvel_plus_4_bot * (y) + \ xdim1_update_halo_kernel2_zvel_plus_4_bot * \ ydim1_update_halo_kernel2_zvel_plus_4_bot * (z)) // user function __device__ inline void update_halo_kernel2_zvel_plus_4_bot(double *zvel0, double *zvel1, const int *fields) { if (fields[FIELD_ZVEL0] == 1) zvel0[OPS_ACC0(0, 0, 0)] = zvel0[OPS_ACC0(0, 4, 0)]; if (fields[FIELD_ZVEL1] == 1) zvel1[OPS_ACC1(0, 0, 0)] = zvel1[OPS_ACC1(0, 4, 0)]; } #undef OPS_ACC0 #undef OPS_ACC1 __global__ void ops_update_halo_kernel2_zvel_plus_4_bot( double *__restrict arg0, double *__restrict arg1, const int *__restrict arg2, int size0, int size1, int size2) { int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_update_halo_kernel2_zvel_plus_4_bot + idx_z * 1 * 1 * xdim0_update_halo_kernel2_zvel_plus_4_bot * ydim0_update_halo_kernel2_zvel_plus_4_bot; arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_update_halo_kernel2_zvel_plus_4_bot + idx_z * 1 * 1 * xdim1_update_halo_kernel2_zvel_plus_4_bot * ydim1_update_halo_kernel2_zvel_plus_4_bot; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { update_halo_kernel2_zvel_plus_4_bot(arg0, arg1, arg2); } } // host stub function void ops_par_loop_update_halo_kernel2_zvel_plus_4_bot(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { // Timing double t1, t2, c1, c2; ops_arg args[3] = {arg0, arg1, arg2}; #ifdef CHECKPOINTING if (!ops_checkpointing_before(args, 3, range, 93)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(93, "update_halo_kernel2_zvel_plus_4_bot"); OPS_kernels[93].count++; ops_timers_core(&c1, &t1); } // compute locally allocated range for the sub-block int start[3]; int end[3]; #ifdef OPS_MPI sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for (int n = 0; n < 3; n++) { start[n] = sb->decomp_disp[n]; end[n] = sb->decomp_disp[n] + sb->decomp_size[n]; if (start[n] >= range[2 * n]) { start[n] = 0; } else { start[n] = range[2 * n] - start[n]; } if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0) start[n] = range[2 * n]; if (end[n] >= range[2 * n + 1]) { end[n] = range[2 * n + 1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n] == MPI_PROC_NULL && (range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n])) end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]); } #else for (int n = 0; n < 3; n++) { start[n] = range[2 * n]; end[n] = range[2 * n + 1]; } #endif int x_size = MAX(0, end[0] - start[0]); int y_size = MAX(0, end[1] - start[1]); int z_size = MAX(0, end[2] - start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; if (xdim0 != xdim0_update_halo_kernel2_zvel_plus_4_bot_h || ydim0 != ydim0_update_halo_kernel2_zvel_plus_4_bot_h || xdim1 != xdim1_update_halo_kernel2_zvel_plus_4_bot_h || ydim1 != ydim1_update_halo_kernel2_zvel_plus_4_bot_h) { cudaMemcpyToSymbol(xdim0_update_halo_kernel2_zvel_plus_4_bot, &xdim0, sizeof(int)); xdim0_update_halo_kernel2_zvel_plus_4_bot_h = xdim0; cudaMemcpyToSymbol(ydim0_update_halo_kernel2_zvel_plus_4_bot, &ydim0, sizeof(int)); ydim0_update_halo_kernel2_zvel_plus_4_bot_h = ydim0; cudaMemcpyToSymbol(xdim1_update_halo_kernel2_zvel_plus_4_bot, &xdim1, sizeof(int)); xdim1_update_halo_kernel2_zvel_plus_4_bot_h = xdim1; cudaMemcpyToSymbol(ydim1_update_halo_kernel2_zvel_plus_4_bot, &ydim1, sizeof(int)); ydim1_update_halo_kernel2_zvel_plus_4_bot_h = ydim1; } int *arg2h = (int *)arg2.data; dim3 grid((x_size - 1) / OPS_block_size_x + 1, (y_size - 1) / OPS_block_size_y + 1, z_size); dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1); int consts_bytes = 0; consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg2.data = OPS_consts_h + consts_bytes; arg2.data_d = OPS_consts_d + consts_bytes; for (int d = 0; d < NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d]; consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int)); mvConstArraysToDevice(consts_bytes); int dat0 = args[0].dat->elem_size; int dat1 = args[1].dat->elem_size; char *p_a[3]; // set up initial pointers int d_m[OPS_MAX_DIM]; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d]; #endif int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]); base0 = base0 + dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]); base0 = base0 + dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] - d_m[2]); p_a[0] = (char *)args[0].data_d + base0; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d]; #endif int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]); base1 = base1 + dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]); base1 = base1 + dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] - d_m[2]); p_a[1] = (char *)args[1].data_d + base1; ops_H_D_exchanges_device(args, 3); ops_halo_exchanges(args, 3, range); if (OPS_diags > 1) { ops_timers_core(&c2, &t2); OPS_kernels[93].mpi_time += t2 - t1; } // call kernel wrapper function, passing in pointers to data ops_update_halo_kernel2_zvel_plus_4_bot<<<grid, tblock>>>( (double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size, z_size); if (OPS_diags > 1) { cutilSafeCall(cudaDeviceSynchronize()); ops_timers_core(&c1, &t1); OPS_kernels[93].time += t1 - t2; } ops_set_dirtybit_device(args, 3); ops_set_halo_dirtybit3(&args[0], range); ops_set_halo_dirtybit3(&args[1], range); if (OPS_diags > 1) { // Update kernel record ops_timers_core(&c2, &t2); OPS_kernels[93].mpi_time += t2 - t1; OPS_kernels[93].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[93].transfer += ops_compute_transfer(dim, start, end, &arg1); } }
7e68007c987dad6348b292f6a9c1a6a60963c0cb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" In this attempt, we plan on using the cudasgemm approach to do the convolution. Problem hipblasSgemm does only float multiplication of the matrices and we are looking for double precision. How will it affect my accuracy Training of the CNN is done using Keras. After training for 10 epochs, the obtained accuracy on the training data set is 99.70 and on the test data set is 99.14. This model implements the following layes in order- 2DConvolution----Maxpooling----2D Convolution----Maxpooling----Fully_connected layer----Fully_connected layer. The image is a 2828 greyscale image. The specifications of the layers are as follows Layer_0 Convolution 32 33 kernels with no padding and 1 stride. Layer_1 Maxpooling 22 filters with with no padding and 1 stride. Layer_2 Convolution 64 33 kernels with no padding and 1 stride. Layer_3 Maxpooling 22 filters with with no padding and 1 stride. Layer_4 Flattening Layer_5 Fully connected dense layer with 1204 output units. Layer_6 Dropout (done during training only). Layer_7 Fully connected dense layer with 10 output units. #include cuda_runtime.h #include device_launch_parameters.h #include stdio.h #includestdlib.h #includemath.h #include thrusthost_vector.h #include thrustdevice_vector.h #include rocblas.h Kernel that does bias addition to hipblasSgemm output and Relu activation. __global__ void bias_and_relu_kernel(float gpu_in,double kernel_biases,int op_h, int op_w, int op_d, double gpu_out) { int row = blockDim.yblockIdx.y + threadIdx.y; int col = blockDim.xblockIdx.x + threadIdx.x; int deep = blockDim.z blockIdx.z + threadIdx.z; if (row = op_h col = op_w deep = op_d) return; float c=gpu_in[deepop_wop_h + row op_w + col]; Add the bias, each sheet along z represents the output from one kernel and hence all elements in that sheet (or deep) needs to be added with the corresponding bias element. c += kernel_biases[deep]; Float and double are added. Result is therefore double. Relu activation relu(a)=max(0,a). if (c 0.0) { c = 0.0; } gpu_out[deepop_wop_h + row op_w + col]= c; } Kernel that does maxpooling. __global__ void maxpool_kernel(int h, int w, int d, double gpu_in, int pool_height, int pool_width, int op_h, int op_w, int op_d, double gpu_out) { int row = blockDim.yblockIdx.y + threadIdx.y; int col = blockDim.xblockIdx.x + threadIdx.x; int deep = blockDim.z blockIdx.z + threadIdx.z; if (row = op_h col = op_w deep = op_d) return; double max; max = gpu_in[(deepwh) + (rowpool_height)w + (colpool_width)]; for (int row_pointer = 0; row_pointer pool_height; row_pointer++) { for (int column_pointer = 0; column_pointer pool_width; column_pointer++) { if (gpu_in[(deepwh) + (rowpool_height)w + (colpool_width) + (row_pointerw) + (column_pointer)] max) { max = gpu_in[(deepwh) + (rowpool_height)w + (colpool_width) + (row_pointerw) + (column_pointer)]; } } } gpu_out[deepop_wop_h + row op_w + col] = max; } __global__ void dense_kernel(int num_input, int num_output, double gpu_in, double weights, double biases, double gpu_out, int num_classes) { int tid = blockDim.xblockIdx.x + threadIdx.x; if (tid = num_output) return; double sum = 0.0l; for (int count = 0; count num_input; count++) { sum += gpu_in[count] weights[tidnum_input + count]; } sum += biases[tid]; Activation If the layer is the final layer, then don't do anything (we do softmax in the CPU), otherwise relu activation max(0,value) is taken. if ((num_output) != num_classes) { if (sum 0.0) { sum = 0.0l; } } gpu_out[tid] = sum; } __host__ double data_patch_preparation(double in, int h, int w, int d,int k_h,int k_w,int k_d) { Kernels' order is perfectly fine, they are already column ordered(one kernel after the other). Input data needs change. It is required that the patches that are used for convolution (element wise multiplication be grouped together). Thus, we need to prepare the data such that there is patch after patch, where each patch is the group of elements that are used in one particular convolution. int op_h = h - k_h + 1; int op_w = w - k_w + 1; int k = 0; patches will contain all the patches in row ordered fashion. There will definitely be a repeat of the elements from the original matrix because the patches overlap. double patches = (double )malloc((w - k_w + 1)(h - k_h + 1)(k_hk_wk_d) sizeof(double)); for (int r = 0; r op_h; r++) { for (int c = 0; c op_w; c++) { for (int sheet_pointer = 0; sheet_pointer k_d; sheet_pointer++) { for (int row_pointer = r; row_pointer r+k_h; row_pointer++) { for (int column_pointer = c; column_pointer c+k_w; column_pointer++) { patches[k]=in[sheet_pointerwh + row_pointer w + column_pointer]; k++; } } } } } Now from these row ordered patches, we convert them into column ordered fashion so that they can be passed into the hipblasSgemm function. Size of each patch is k_hk_wk_d. double co_patches = (double )malloc((w - k_w + 1)(h - k_h + 1)(k_hk_wk_d) sizeof(double)); Stands for column oredered patches int patch_size = k_h k_wk_d; int num_patches = op_h op_w; k = 0; for (int i = 0; i patch_size; i++) { for (int j = 0; j num_patches; j++) { co_patches[k] = patches[i + j patch_size]; k++; } } return co_patches; } int main() { -------------------------------Reading all the weights and biases and the original image---------------------- File pointers to all the weights and biases and the image. FILE pFileImg; FILE pFileW0; FILE pFileB0; FILE pFileW2; FILE pFileB2; FILE pFileDW5; FILE pFileDB5; FILE pFileDW7; FILE pFileDB7; Note The weights are pulled out after training the mnist digit recognition dataset on keras with handwritten digits 0-9. The images are greysvale and hence to start with they have only one channel. Weights are pulled out and inputted into the respective arrays. Pulling out image values double img_arr = (double )malloc(28 28 sizeof(double)); pFileImg = fopen(CUsersmeghaDownloadsFinal_GPU_weightsImage_RO.txt, r); if (pFileImg == NULL) { fputs(File error, stderr); exit(1); } for (int i = 0; i 784; i++) { fscanf(pFileImg, %lf, &img_arr[i]); } Pulling out kernel weights for first conv layer. double W0_arr = (double )malloc(288 sizeof(double)); pFileW0 = fopen(CUsersmeghaDownloadsFinal_GPU_weightsW0_RO.txt, r); if (pFileW0 == NULL) { fputs(File error, stderr); exit(1); } for (int i = 0; i 288; i++) { fscanf(pFileW0, %lf, &W0_arr[i]); } Pulling out kernel biases for first conv layer. double B0_arr = (double )malloc(32 sizeof(double)); pFileB0 = fopen(CUsersmeghaDownloadsFinal_GPU_weightsB0.txt, r); if (pFileB0 == NULL) { fputs(File error, stderr); exit(1); } for (int i = 0; i 32; i++) { fscanf(pFileB0, %lf, &B0_arr[i]); } Pulling out kernel weights for second conv layer. double W2_arr = (double )malloc(18432 sizeof(double)); pFileW2 = fopen(CUsersmeghaDownloadsFinal_GPU_weightsW2_RO.txt, r); if (pFileW2 == NULL) { fputs(File error, stderr); exit(1); } for (int i = 0; i 18432; i++) { fscanf(pFileW2, %lf, &W2_arr[i]); } Pulling out kernel biases for second conv layer. double B2_arr = (double )malloc(64 sizeof(double)); pFileB2 = fopen(CUsersmeghaDownloadsFinal_GPU_weightsB2.txt, r); if (pFileB2 == NULL) { fputs(File error, stderr); exit(1); } for (int i = 0; i 64; i++) { fscanf(pFileB2, %lf, &B2_arr[i]); } Pulling out weights for first fully connected layer. double DW5_arr = (double )malloc(1638400 sizeof(double)); pFileDW5 = fopen(CUsersmeghaDownloadsFinal_GPU_weightsDW5_RO.txt, r); if (pFileDW5 == NULL) { fputs(File error, stderr); exit(1); } for (int i = 0; i 1638400; i++) { fscanf(pFileDW5, %lf, &DW5_arr[i]); } Pulling out biases for first fully connected layer. double DB5_arr = (double )malloc(1024 sizeof(double)); pFileDB5 = fopen(CUsersmeghaDownloadsFinal_GPU_weightsDB5.txt, r); if (pFileDB5 == NULL) { fputs(File error, stderr); exit(1); } for (int i = 0; i 1024; i++) { fscanf(pFileDB5, %lf, &DB5_arr[i]); } Pulling out weights for second fully connected layer. double DW7_arr = (double )malloc(10240 sizeof(double)); pFileDW7 = fopen(CUsersmeghaDownloadsFinal_GPU_weightsDW7_RO.txt, r); if (pFileDW7 == NULL) { fputs(File error, stderr); exit(1); } for (int i = 0; i 10240; i++) { fscanf(pFileDW7, %lf, &DW7_arr[i]); } Pulling out biases for second fully connected layer. double DB7_arr = (double )malloc(10 sizeof(double)); pFileDB7 = fopen(CUsersmeghaDownloadsFinal_GPU_weightsDB7.txt, r); if (pFileDB7 == NULL) { fputs(File error, stderr); exit(1); } for (int i = 0; i 10; i++) { fscanf(pFileDB7, %lf, &DB7_arr[i]); } -------------------------------------Reading done------------------------------------------------ int number_of_classes = 10; hipDeviceProp_t prop; hipGetDeviceProperties(&prop, 0); int max_threads_per_block = prop.maxThreadsPerBlock; --------------------------Layer_0Convolution-------------------------------------------------- Convolution is done using the hipblasSgemm function. Details on how the kernel weights and inputs are organised to perform this multipliocation is on the report. Convolution parameters defined (parameters are self explantory from their names). int input_image_height = 28; int input_image_width = 28; int input_image_depth = 1; int kernel_height = 3; int kernel_width = 3; int kernel_depth = 1; int no_of_kernels = 32; int output_image_height = input_image_height - kernel_height + 1; int output_image_width = input_image_width - kernel_width + 1; int output_image_depth = no_of_kernels; Block and grid dimensions definitions. Defined 3 D blocks with z_threads=no_of_kernels and x_threadsy_threadsz_threads=max_threads_per_block. So, if x_threads=y_threads, then x_threads=sqrt(max_threads_per_blockz_threads). Defined 2 D grids. int z_threads = no_of_kernels; int x_threads = sqrt(max_threads_per_block z_threads); int y_threads = x_threads; dim3 blockdim0(x_threads, y_threads, z_threads); dim3 griddim0((output_image_width x_threads)+1, (output_image_height y_threads)+1, 1); Arranging the input image and the kernel in proper order (column major) to send off to cudaSgemm for multiplication. double co_patches_0; co_patches_0=data_patch_preparation(img_arr, input_image_height, input_image_width, input_image_depth, kernel_height, kernel_width, kernel_depth); co_patches contain the pathes in column order. Note It is observed that hipblasSgemm supports only float multiplication, thus the double-precision values are explicitly converted to float. const float A_0 = (float)co_patches_0; const float B_0 = (float)W0_arr; float C_0 = (float )malloc(output_image_heightoutput_image_widthoutput_image_depthsizeof(float)); int m = output_image_height output_image_width;Number of rows in A. int n = no_of_kernels; Number of kernels. int k = kernel_height kernel_widthkernel_depth; Patch size. int lda = m, ldb = k, ldc = m; const float alf = 1; const float bet = 0; const float alpha = &alf; const float beta = &bet; Create a handle for CUBLAS hipblasHandle_t handle; hipblasCreate(&handle); hipblasSgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, m, n, k, alpha, A_0, lda, B_0, ldb, beta, C_0, ldc); hipblasDestroy(handle); The resulting column ordered matrix is the column ordered matrix, so it should have elements in the order (First_patch_1st kernel,second_patch_1st_kernel,third_patch_1st_kernel,.....last_patch_1st_kernel, and so on for all the patches and kernels). To get the corresponding value on the output,divide C into (num_kernels) parts of (num_patches) each. Copy co_patches into GPU. float gpu_C_0; hipMalloc((void )&gpu_C_0, output_image_heightoutput_image_widthkernel_heightkernel_widthkernel_depth sizeof(float)); hipMemcpy(gpu_C_0, C_0, output_image_heightoutput_image_widthkernel_heightkernel_widthkernel_depth sizeof(float), hipMemcpyHostToDevice); We can do the bias addition and relu activation by writing GPU kernels for the same. Copying kernel biases into GPU. double kernel_biases_0; hipMalloc((void )&kernel_biases_0, no_of_kernels sizeof(double)); hipMemcpy(kernel_biases_0, B0_arr, no_of_kernels sizeof(double), hipMemcpyHostToDevice); Creating output array inside GPU. double gpu_out_0; hipMalloc((void )&gpu_out_0, output_image_heightoutput_image_widthno_of_kernels sizeof(double)); bias_and_relu_kernel griddim0, blockdim0 (gpu_C_0, kernel_biases_0,output_image_height, output_image_width, output_image_depth, gpu_out_0); double layer_0 = (double )malloc(output_image_heightoutput_image_widthno_of_kernels sizeof(double)); hipMemcpy(layer_0, gpu_out_0, output_image_heightoutput_image_widthno_of_kernels sizeof(double), hipMemcpyDeviceToHost); layer_0 is the output from the first layer. Free all the unnecessary things from the GPU to make space for the next kernel. hipFree(gpu_C_0); hipFree(kernel_biases_0); hipFree(gpu_out_0); free(co_patches_0); --------------------------Layer 0 done------------------------------------------------------------ -------------------------------Layer 1 Maxpooling------------------------------------------------- Maxpooling layer kernel preparation. int pool_height = 3; int pool_width = 3; input_image_height = output_image_height; input_image_width = output_image_width; input_image_depth = output_image_depth; z_threads = input_image_depth; x_threads = sqrt(max_threads_per_block z_threads); y_threads = x_threads; When faced with image dimensions not perfectly devisible by the pool dimension, Keras removes the excess indivisible rows and columns before pooling. Doing the same thing here. output_image_height = (input_image_height - input_image_height % pool_height) pool_height; output_image_width = (input_image_width - input_image_width % pool_width) pool_width; output_image_depth = input_image_depth; dim3 blockdim1(x_threads, y_threads, z_threads); dim3 griddim1((output_image_width x_threads)+1, (output_image_height y_threads)+1, 1); Copying the previous output into GPU. double gpu_in_1; hipMalloc((void )&gpu_in_1, input_image_heightinput_image_widthinput_image_depth sizeof(double)); hipMemcpy(gpu_in_1, layer_0, input_image_heightinput_image_widthinput_image_depth sizeof(double), hipMemcpyHostToDevice); Creating output array inside GPU. double gpu_out_1; hipMalloc((void )&gpu_out_1, output_image_heightoutput_image_widthoutput_image_depth sizeof(double)); maxpool_kernel griddim1, blockdim1 (input_image_height, input_image_width, input_image_depth, gpu_in_1, pool_height, pool_width, output_image_height, output_image_width, output_image_depth, gpu_out_1); double layer_1 = (double )malloc(output_image_heightoutput_image_widthoutput_image_depth sizeof(double)); hipMemcpy(layer_1, gpu_out_1, output_image_heightoutput_image_widthoutput_image_depth sizeof(double), hipMemcpyDeviceToHost); layer 1 is the output. hipFree(gpu_in_1); hipFree(gpu_out_1); ---------------------------------------Layer 1 done----------------------------------------------- --------------------------------------Layer 2 Convolution---------------------------------------- Convolution layer preparation. input_image_height = output_image_height; input_image_width = output_image_width; input_image_depth = output_image_depth; kernel_height = 3; kernel_width = 3; kernel_depth = 32; no_of_kernels = 64; output_image_height = input_image_height - kernel_height + 1; output_image_width = input_image_width - kernel_width + 1; output_image_depth = no_of_kernels; Defined 3 D blocks with z_threads=no_of_kernels and x_threadsy_threadsz_threads=max_threads_per_block. So, if x_threads=y_threads, then x_threads=sqrt(max_threads_per_blockz_threads). Defined 2 D grids. z_threads = no_of_kernels; x_threads = sqrt(max_threads_per_block z_threads); y_threads = x_threads; dim3 blockdim2(x_threads, y_threads, z_threads); dim3 griddim2((output_image_width x_threads) + 1, (output_image_height y_threads) + 1, 1); Arranging the input image and the kernel in proper order (column major) to send off to cudaSgemm for multiplication. double co_patches_2; co_patches_2 = data_patch_preparation(layer_1, input_image_height, input_image_width, input_image_depth, kernel_height, kernel_width, kernel_depth); co_patches contain the pathes in column order. Note It is observed that hipblasSgemm supports only float multiplication, thus the double-precision values are explicitly converted to float. const float A_2 = (float)co_patches_2; const float B_2 = (float)W2_arr; float C_2 = (float )malloc(output_image_heightoutput_image_widthoutput_image_depth sizeof(float)); m = output_image_height output_image_width;Number of rows in A. n = no_of_kernels; Number of kernels. k = kernel_height kernel_widthkernel_depth; Patch size. lda = m, ldb = k, ldc = m; Create a handle for CUBLAS hipblasHandle_t handle2; hipblasCreate(&handle2); hipblasSgemm(handle2, HIPBLAS_OP_N, HIPBLAS_OP_N, m, n, k, alpha, A_2, lda, B_2, ldb, beta, C_2, ldc); hipblasDestroy(handle2); The resulting column ordered matrix is the column ordered matrix, so it should have elements in the order (First_patch_1st kernel,second_patch_1st_kernel,third_patch_1st_kernel,.....last_patch_1st_kernel, and so on for all the patches and kernels). To get the corresponding value on the output,divide C into (num_kernels) parts of (num_patches) each. Getting co_patches into GPU. float gpu_C_2; hipMalloc((void )&gpu_C_2, output_image_heightoutput_image_widthkernel_heightkernel_widthkernel_depth sizeof(float)); hipMemcpy(gpu_C_2, C_2, output_image_heightoutput_image_widthkernel_heightkernel_widthkernel_depth sizeof(float), hipMemcpyHostToDevice); We can do the bias addition and relu activation by writing GPU kernels for the same. Copying kernel biases into GPU. double kernel_biases_2; hipMalloc((void )&kernel_biases_2, no_of_kernels sizeof(double)); hipMemcpy(kernel_biases_2, B2_arr, no_of_kernels sizeof(double), hipMemcpyHostToDevice); Creating output array inside GPU. double gpu_out_2; hipMalloc((void )&gpu_out_2, output_image_heightoutput_image_widthno_of_kernels sizeof(double)); bias_and_relu_kernel griddim2, blockdim2 (gpu_C_2, kernel_biases_2, output_image_height, output_image_width, output_image_depth, gpu_out_2); double layer_2 = (double )malloc(output_image_heightoutput_image_widthno_of_kernels sizeof(double)); hipMemcpy(layer_2, gpu_out_2, output_image_heightoutput_image_widthno_of_kernels sizeof(double), hipMemcpyDeviceToHost); layer_2 is the output from the second layer. Free all the unnecessary things from the GPU to make space for the next kernel. hipFree(gpu_C_2); hipFree(kernel_biases_2); hipFree(gpu_out_2); free(co_patches_2); ---------------------------------Layer 2 done-------------------------------------------------------- ----------------------------------Layer 3 Maxpooling------------------------------------------------------ Maxpooling layer. pool_height = 3; pool_width = 3; input_image_height = output_image_height; input_image_width = output_image_width; input_image_depth = output_image_depth; z_threads = input_image_depth; x_threads = sqrt(max_threads_per_block z_threads); y_threads = x_threads; output_image_height = (input_image_height - input_image_height % pool_height) pool_height; output_image_width = (input_image_width - input_image_width % pool_width) pool_width; output_image_depth = input_image_depth; dim3 blockdim3(x_threads, y_threads, z_threads); dim3 griddim3(output_image_width x_threads, output_image_height y_threads, 1); Copying the previous output into GPU. double gpu_in_3; hipMalloc((void )&gpu_in_3, input_image_heightinput_image_widthinput_image_depth sizeof(double)); hipMemcpy(gpu_in_3, layer_2, input_image_heightinput_image_widthinput_image_depth sizeof(double), hipMemcpyHostToDevice); Creating output array inside GPU. double gpu_out_3; hipMalloc((void )&gpu_out_3, output_image_heightoutput_image_widthoutput_image_depth sizeof(double)); maxpool_kernel griddim3, blockdim3 (input_image_height, input_image_width, input_image_depth, gpu_in_3, pool_height, pool_width, output_image_height, output_image_width, output_image_depth, gpu_out_3); double layer_3 = (double )malloc(output_image_heightoutput_image_widthoutput_image_depth sizeof(double)); hipMemcpy(layer_3, gpu_out_3, output_image_heightoutput_image_widthoutput_image_depth sizeof(double), hipMemcpyDeviceToHost); layer 1 is the output. hipFree(gpu_in_3); hipFree(gpu_out_3); ---------------------------------------Layer 3 done-------------------------------------------------- --------------------------------------Layer 4 Flattening---------------------------------------- Flattening in the CPU itself. The idea is to apply the same kind of C major flattening that keras does to the elements coming in from the second pooling layer. The array coming in consists of rows of each sheet arranged side by side followed by the rows of the next sheet and so on. Jumbling up that order to stick with keras type flattening which is the C-major ordering consisting of z-axis changing fastest, followed by column and then row changing. int in_h = output_image_height; int in_w = output_image_width; int in_d = output_image_depth; int image_pointer; int channel_pointer; k = 0; double flattened = (double )malloc(in_hin_win_d sizeof(double)); for (image_pointer = 0; image_pointer in_hin_w; image_pointer++) { for (channel_pointer = 0; channel_pointer in_d; channel_pointer++) { flattened[k] = layer_3[image_pointer + channel_pointer in_hin_w]; k++; } } ----------------------------------------Layer 4 done----------------------------------------------- ----------------------------------------Layer 5 Fully connecteddense layer-------------------------- int input_layer_nodes = output_image_height output_image_widthoutput_image_depth; int output_layer_nodes = 1024; This layer has 1024 output nodes. double gpu_in_5; hipMalloc((void )&gpu_in_5, input_layer_nodes sizeof(double)); hipMemcpy(gpu_in_5, flattened, input_layer_nodes sizeof(double), hipMemcpyHostToDevice); double FC_weights_5; hipMalloc((void )&FC_weights_5, input_layer_nodes output_layer_nodes sizeof(double)); hipMemcpy(FC_weights_5, DW5_arr, input_layer_nodes output_layer_nodes sizeof(double), hipMemcpyHostToDevice); double FC_biases_5; hipMalloc((void )&FC_biases_5, output_layer_nodes sizeof(double)); hipMemcpy(FC_biases_5, DB5_arr, output_layer_nodes sizeof(double), hipMemcpyHostToDevice); double gpu_out_5; hipMalloc((void )&gpu_out_5, output_layer_nodes sizeof(double)); dim3 blocksize5(max_threads_per_block, 1, 1); dim3 gridsize5(output_layer_nodes max_threads_per_block, 1, 1); dense_kernel gridsize5, blocksize5 (input_layer_nodes, output_layer_nodes, gpu_in_5, FC_weights_5, FC_biases_5, gpu_out_5, number_of_classes); layer5 is the output. double layer_5 = (double )malloc(output_layer_nodes sizeof(double)); hipMemcpy(layer_5, gpu_out_5, output_layer_nodes sizeof(double), hipMemcpyDeviceToHost); hipFree(gpu_in_5); hipFree(gpu_out_5); hipFree(FC_biases_5); hipFree(FC_weights_5); ---------------------------------------Layer 5 done-------------------------------------------- ------------------------------------Layer 6 Fully connecteddense layer-------------------------- input_layer_nodes = output_layer_nodes; output_layer_nodes = number_of_classes; double gpu_in_6; hipMalloc((void )&gpu_in_6, input_layer_nodes sizeof(double)); hipMemcpy(gpu_in_6, layer_5, input_layer_nodes sizeof(double), hipMemcpyHostToDevice); double FC_weights_6; hipMalloc((void )&FC_weights_6, input_layer_nodes output_layer_nodes sizeof(double)); hipMemcpy(FC_weights_6, DW7_arr, input_layer_nodes output_layer_nodes sizeof(double), hipMemcpyHostToDevice); double FC_biases_6; hipMalloc((void )&FC_biases_6, output_layer_nodes sizeof(double)); hipMemcpy(FC_biases_6, DB7_arr, output_layer_nodes sizeof(double), hipMemcpyHostToDevice); double gpu_out_6; hipMalloc((void )&gpu_out_6, output_layer_nodes sizeof(double)); dim3 blocksize6(max_threads_per_block, 1, 1); dim3 gridsize6(output_layer_nodes max_threads_per_block, 1, 1); dense_kernel gridsize6, blocksize6 (input_layer_nodes, output_layer_nodes, gpu_in_6, FC_weights_6, FC_biases_6, gpu_out_6,number_of_classes); double layer_7 = (double )malloc(output_layer_nodes sizeof(double)); hipMemcpy(layer_7, gpu_out_6, output_layer_nodes sizeof(double), hipMemcpyDeviceToHost); layer7 is the output. hipFree(gpu_in_6); hipFree(gpu_out_6); hipFree(FC_biases_6); hipFree(FC_weights_6); -----------------------------------Layer 7 done-------------------------------------------------------- Softmax of the output layer. int op_layer_size = number_of_classes; int i; double sum = 0.0; for (i = 0; i op_layer_size; i++) { sum += exp(layer_7[i]); } printf(n%fn, exp(3)); double max = layer_7[0] sum; int max_no = 0; for (i = 0; i op_layer_size; i++) { printf(%lfn, layer_7[i]); if ((layer_7[i] sum) max) { max_no = i; } } }
7e68007c987dad6348b292f6a9c1a6a60963c0cb.cu
In this attempt, we plan on using the cudasgemm approach to do the convolution. Problem cublasSgemm does only float multiplication of the matrices and we are looking for double precision. How will it affect my accuracy Training of the CNN is done using Keras. After training for 10 epochs, the obtained accuracy on the training data set is 99.70 and on the test data set is 99.14. This model implements the following layes in order- 2DConvolution----Maxpooling----2D Convolution----Maxpooling----Fully_connected layer----Fully_connected layer. The image is a 2828 greyscale image. The specifications of the layers are as follows Layer_0 Convolution 32 33 kernels with no padding and 1 stride. Layer_1 Maxpooling 22 filters with with no padding and 1 stride. Layer_2 Convolution 64 33 kernels with no padding and 1 stride. Layer_3 Maxpooling 22 filters with with no padding and 1 stride. Layer_4 Flattening Layer_5 Fully connected dense layer with 1204 output units. Layer_6 Dropout (done during training only). Layer_7 Fully connected dense layer with 10 output units. #include cuda_runtime.h #include device_launch_parameters.h #include stdio.h #includestdlib.h #includemath.h #include thrusthost_vector.h #include thrustdevice_vector.h #include cublas_v2.h Kernel that does bias addition to cublasSgemm output and Relu activation. __global__ void bias_and_relu_kernel(float gpu_in,double kernel_biases,int op_h, int op_w, int op_d, double gpu_out) { int row = blockDim.yblockIdx.y + threadIdx.y; int col = blockDim.xblockIdx.x + threadIdx.x; int deep = blockDim.z blockIdx.z + threadIdx.z; if (row = op_h col = op_w deep = op_d) return; float c=gpu_in[deepop_wop_h + row op_w + col]; Add the bias, each sheet along z represents the output from one kernel and hence all elements in that sheet (or deep) needs to be added with the corresponding bias element. c += kernel_biases[deep]; Float and double are added. Result is therefore double. Relu activation relu(a)=max(0,a). if (c 0.0) { c = 0.0; } gpu_out[deepop_wop_h + row op_w + col]= c; } Kernel that does maxpooling. __global__ void maxpool_kernel(int h, int w, int d, double gpu_in, int pool_height, int pool_width, int op_h, int op_w, int op_d, double gpu_out) { int row = blockDim.yblockIdx.y + threadIdx.y; int col = blockDim.xblockIdx.x + threadIdx.x; int deep = blockDim.z blockIdx.z + threadIdx.z; if (row = op_h col = op_w deep = op_d) return; double max; max = gpu_in[(deepwh) + (rowpool_height)w + (colpool_width)]; for (int row_pointer = 0; row_pointer pool_height; row_pointer++) { for (int column_pointer = 0; column_pointer pool_width; column_pointer++) { if (gpu_in[(deepwh) + (rowpool_height)w + (colpool_width) + (row_pointerw) + (column_pointer)] max) { max = gpu_in[(deepwh) + (rowpool_height)w + (colpool_width) + (row_pointerw) + (column_pointer)]; } } } gpu_out[deepop_wop_h + row op_w + col] = max; } __global__ void dense_kernel(int num_input, int num_output, double gpu_in, double weights, double biases, double gpu_out, int num_classes) { int tid = blockDim.xblockIdx.x + threadIdx.x; if (tid = num_output) return; double sum = 0.0l; for (int count = 0; count num_input; count++) { sum += gpu_in[count] weights[tidnum_input + count]; } sum += biases[tid]; Activation If the layer is the final layer, then don't do anything (we do softmax in the CPU), otherwise relu activation max(0,value) is taken. if ((num_output) != num_classes) { if (sum 0.0) { sum = 0.0l; } } gpu_out[tid] = sum; } __host__ double data_patch_preparation(double in, int h, int w, int d,int k_h,int k_w,int k_d) { Kernels' order is perfectly fine, they are already column ordered(one kernel after the other). Input data needs change. It is required that the patches that are used for convolution (element wise multiplication be grouped together). Thus, we need to prepare the data such that there is patch after patch, where each patch is the group of elements that are used in one particular convolution. int op_h = h - k_h + 1; int op_w = w - k_w + 1; int k = 0; patches will contain all the patches in row ordered fashion. There will definitely be a repeat of the elements from the original matrix because the patches overlap. double patches = (double )malloc((w - k_w + 1)(h - k_h + 1)(k_hk_wk_d) sizeof(double)); for (int r = 0; r op_h; r++) { for (int c = 0; c op_w; c++) { for (int sheet_pointer = 0; sheet_pointer k_d; sheet_pointer++) { for (int row_pointer = r; row_pointer r+k_h; row_pointer++) { for (int column_pointer = c; column_pointer c+k_w; column_pointer++) { patches[k]=in[sheet_pointerwh + row_pointer w + column_pointer]; k++; } } } } } Now from these row ordered patches, we convert them into column ordered fashion so that they can be passed into the cublasSgemm function. Size of each patch is k_hk_wk_d. double co_patches = (double )malloc((w - k_w + 1)(h - k_h + 1)(k_hk_wk_d) sizeof(double)); Stands for column oredered patches int patch_size = k_h k_wk_d; int num_patches = op_h op_w; k = 0; for (int i = 0; i patch_size; i++) { for (int j = 0; j num_patches; j++) { co_patches[k] = patches[i + j patch_size]; k++; } } return co_patches; } int main() { -------------------------------Reading all the weights and biases and the original image---------------------- File pointers to all the weights and biases and the image. FILE pFileImg; FILE pFileW0; FILE pFileB0; FILE pFileW2; FILE pFileB2; FILE pFileDW5; FILE pFileDB5; FILE pFileDW7; FILE pFileDB7; Note The weights are pulled out after training the mnist digit recognition dataset on keras with handwritten digits 0-9. The images are greysvale and hence to start with they have only one channel. Weights are pulled out and inputted into the respective arrays. Pulling out image values double img_arr = (double )malloc(28 28 sizeof(double)); pFileImg = fopen(CUsersmeghaDownloadsFinal_GPU_weightsImage_RO.txt, r); if (pFileImg == NULL) { fputs(File error, stderr); exit(1); } for (int i = 0; i 784; i++) { fscanf(pFileImg, %lf, &img_arr[i]); } Pulling out kernel weights for first conv layer. double W0_arr = (double )malloc(288 sizeof(double)); pFileW0 = fopen(CUsersmeghaDownloadsFinal_GPU_weightsW0_RO.txt, r); if (pFileW0 == NULL) { fputs(File error, stderr); exit(1); } for (int i = 0; i 288; i++) { fscanf(pFileW0, %lf, &W0_arr[i]); } Pulling out kernel biases for first conv layer. double B0_arr = (double )malloc(32 sizeof(double)); pFileB0 = fopen(CUsersmeghaDownloadsFinal_GPU_weightsB0.txt, r); if (pFileB0 == NULL) { fputs(File error, stderr); exit(1); } for (int i = 0; i 32; i++) { fscanf(pFileB0, %lf, &B0_arr[i]); } Pulling out kernel weights for second conv layer. double W2_arr = (double )malloc(18432 sizeof(double)); pFileW2 = fopen(CUsersmeghaDownloadsFinal_GPU_weightsW2_RO.txt, r); if (pFileW2 == NULL) { fputs(File error, stderr); exit(1); } for (int i = 0; i 18432; i++) { fscanf(pFileW2, %lf, &W2_arr[i]); } Pulling out kernel biases for second conv layer. double B2_arr = (double )malloc(64 sizeof(double)); pFileB2 = fopen(CUsersmeghaDownloadsFinal_GPU_weightsB2.txt, r); if (pFileB2 == NULL) { fputs(File error, stderr); exit(1); } for (int i = 0; i 64; i++) { fscanf(pFileB2, %lf, &B2_arr[i]); } Pulling out weights for first fully connected layer. double DW5_arr = (double )malloc(1638400 sizeof(double)); pFileDW5 = fopen(CUsersmeghaDownloadsFinal_GPU_weightsDW5_RO.txt, r); if (pFileDW5 == NULL) { fputs(File error, stderr); exit(1); } for (int i = 0; i 1638400; i++) { fscanf(pFileDW5, %lf, &DW5_arr[i]); } Pulling out biases for first fully connected layer. double DB5_arr = (double )malloc(1024 sizeof(double)); pFileDB5 = fopen(CUsersmeghaDownloadsFinal_GPU_weightsDB5.txt, r); if (pFileDB5 == NULL) { fputs(File error, stderr); exit(1); } for (int i = 0; i 1024; i++) { fscanf(pFileDB5, %lf, &DB5_arr[i]); } Pulling out weights for second fully connected layer. double DW7_arr = (double )malloc(10240 sizeof(double)); pFileDW7 = fopen(CUsersmeghaDownloadsFinal_GPU_weightsDW7_RO.txt, r); if (pFileDW7 == NULL) { fputs(File error, stderr); exit(1); } for (int i = 0; i 10240; i++) { fscanf(pFileDW7, %lf, &DW7_arr[i]); } Pulling out biases for second fully connected layer. double DB7_arr = (double )malloc(10 sizeof(double)); pFileDB7 = fopen(CUsersmeghaDownloadsFinal_GPU_weightsDB7.txt, r); if (pFileDB7 == NULL) { fputs(File error, stderr); exit(1); } for (int i = 0; i 10; i++) { fscanf(pFileDB7, %lf, &DB7_arr[i]); } -------------------------------------Reading done------------------------------------------------ int number_of_classes = 10; cudaDeviceProp prop; cudaGetDeviceProperties(&prop, 0); int max_threads_per_block = prop.maxThreadsPerBlock; --------------------------Layer_0Convolution-------------------------------------------------- Convolution is done using the cublasSgemm function. Details on how the kernel weights and inputs are organised to perform this multipliocation is on the report. Convolution parameters defined (parameters are self explantory from their names). int input_image_height = 28; int input_image_width = 28; int input_image_depth = 1; int kernel_height = 3; int kernel_width = 3; int kernel_depth = 1; int no_of_kernels = 32; int output_image_height = input_image_height - kernel_height + 1; int output_image_width = input_image_width - kernel_width + 1; int output_image_depth = no_of_kernels; Block and grid dimensions definitions. Defined 3 D blocks with z_threads=no_of_kernels and x_threadsy_threadsz_threads=max_threads_per_block. So, if x_threads=y_threads, then x_threads=sqrt(max_threads_per_blockz_threads). Defined 2 D grids. int z_threads = no_of_kernels; int x_threads = sqrt(max_threads_per_block z_threads); int y_threads = x_threads; dim3 blockdim0(x_threads, y_threads, z_threads); dim3 griddim0((output_image_width x_threads)+1, (output_image_height y_threads)+1, 1); Arranging the input image and the kernel in proper order (column major) to send off to cudaSgemm for multiplication. double co_patches_0; co_patches_0=data_patch_preparation(img_arr, input_image_height, input_image_width, input_image_depth, kernel_height, kernel_width, kernel_depth); co_patches contain the pathes in column order. Note It is observed that cublasSgemm supports only float multiplication, thus the double-precision values are explicitly converted to float. const float A_0 = (float)co_patches_0; const float B_0 = (float)W0_arr; float C_0 = (float )malloc(output_image_heightoutput_image_widthoutput_image_depthsizeof(float)); int m = output_image_height output_image_width;Number of rows in A. int n = no_of_kernels; Number of kernels. int k = kernel_height kernel_widthkernel_depth; Patch size. int lda = m, ldb = k, ldc = m; const float alf = 1; const float bet = 0; const float alpha = &alf; const float beta = &bet; Create a handle for CUBLAS cublasHandle_t handle; cublasCreate(&handle); cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, m, n, k, alpha, A_0, lda, B_0, ldb, beta, C_0, ldc); cublasDestroy(handle); The resulting column ordered matrix is the column ordered matrix, so it should have elements in the order (First_patch_1st kernel,second_patch_1st_kernel,third_patch_1st_kernel,.....last_patch_1st_kernel, and so on for all the patches and kernels). To get the corresponding value on the output,divide C into (num_kernels) parts of (num_patches) each. Copy co_patches into GPU. float gpu_C_0; cudaMalloc((void )&gpu_C_0, output_image_heightoutput_image_widthkernel_heightkernel_widthkernel_depth sizeof(float)); cudaMemcpy(gpu_C_0, C_0, output_image_heightoutput_image_widthkernel_heightkernel_widthkernel_depth sizeof(float), cudaMemcpyHostToDevice); We can do the bias addition and relu activation by writing GPU kernels for the same. Copying kernel biases into GPU. double kernel_biases_0; cudaMalloc((void )&kernel_biases_0, no_of_kernels sizeof(double)); cudaMemcpy(kernel_biases_0, B0_arr, no_of_kernels sizeof(double), cudaMemcpyHostToDevice); Creating output array inside GPU. double gpu_out_0; cudaMalloc((void )&gpu_out_0, output_image_heightoutput_image_widthno_of_kernels sizeof(double)); bias_and_relu_kernel griddim0, blockdim0 (gpu_C_0, kernel_biases_0,output_image_height, output_image_width, output_image_depth, gpu_out_0); double layer_0 = (double )malloc(output_image_heightoutput_image_widthno_of_kernels sizeof(double)); cudaMemcpy(layer_0, gpu_out_0, output_image_heightoutput_image_widthno_of_kernels sizeof(double), cudaMemcpyDeviceToHost); layer_0 is the output from the first layer. Free all the unnecessary things from the GPU to make space for the next kernel. cudaFree(gpu_C_0); cudaFree(kernel_biases_0); cudaFree(gpu_out_0); free(co_patches_0); --------------------------Layer 0 done------------------------------------------------------------ -------------------------------Layer 1 Maxpooling------------------------------------------------- Maxpooling layer kernel preparation. int pool_height = 3; int pool_width = 3; input_image_height = output_image_height; input_image_width = output_image_width; input_image_depth = output_image_depth; z_threads = input_image_depth; x_threads = sqrt(max_threads_per_block z_threads); y_threads = x_threads; When faced with image dimensions not perfectly devisible by the pool dimension, Keras removes the excess indivisible rows and columns before pooling. Doing the same thing here. output_image_height = (input_image_height - input_image_height % pool_height) pool_height; output_image_width = (input_image_width - input_image_width % pool_width) pool_width; output_image_depth = input_image_depth; dim3 blockdim1(x_threads, y_threads, z_threads); dim3 griddim1((output_image_width x_threads)+1, (output_image_height y_threads)+1, 1); Copying the previous output into GPU. double gpu_in_1; cudaMalloc((void )&gpu_in_1, input_image_heightinput_image_widthinput_image_depth sizeof(double)); cudaMemcpy(gpu_in_1, layer_0, input_image_heightinput_image_widthinput_image_depth sizeof(double), cudaMemcpyHostToDevice); Creating output array inside GPU. double gpu_out_1; cudaMalloc((void )&gpu_out_1, output_image_heightoutput_image_widthoutput_image_depth sizeof(double)); maxpool_kernel griddim1, blockdim1 (input_image_height, input_image_width, input_image_depth, gpu_in_1, pool_height, pool_width, output_image_height, output_image_width, output_image_depth, gpu_out_1); double layer_1 = (double )malloc(output_image_heightoutput_image_widthoutput_image_depth sizeof(double)); cudaMemcpy(layer_1, gpu_out_1, output_image_heightoutput_image_widthoutput_image_depth sizeof(double), cudaMemcpyDeviceToHost); layer 1 is the output. cudaFree(gpu_in_1); cudaFree(gpu_out_1); ---------------------------------------Layer 1 done----------------------------------------------- --------------------------------------Layer 2 Convolution---------------------------------------- Convolution layer preparation. input_image_height = output_image_height; input_image_width = output_image_width; input_image_depth = output_image_depth; kernel_height = 3; kernel_width = 3; kernel_depth = 32; no_of_kernels = 64; output_image_height = input_image_height - kernel_height + 1; output_image_width = input_image_width - kernel_width + 1; output_image_depth = no_of_kernels; Defined 3 D blocks with z_threads=no_of_kernels and x_threadsy_threadsz_threads=max_threads_per_block. So, if x_threads=y_threads, then x_threads=sqrt(max_threads_per_blockz_threads). Defined 2 D grids. z_threads = no_of_kernels; x_threads = sqrt(max_threads_per_block z_threads); y_threads = x_threads; dim3 blockdim2(x_threads, y_threads, z_threads); dim3 griddim2((output_image_width x_threads) + 1, (output_image_height y_threads) + 1, 1); Arranging the input image and the kernel in proper order (column major) to send off to cudaSgemm for multiplication. double co_patches_2; co_patches_2 = data_patch_preparation(layer_1, input_image_height, input_image_width, input_image_depth, kernel_height, kernel_width, kernel_depth); co_patches contain the pathes in column order. Note It is observed that cublasSgemm supports only float multiplication, thus the double-precision values are explicitly converted to float. const float A_2 = (float)co_patches_2; const float B_2 = (float)W2_arr; float C_2 = (float )malloc(output_image_heightoutput_image_widthoutput_image_depth sizeof(float)); m = output_image_height output_image_width;Number of rows in A. n = no_of_kernels; Number of kernels. k = kernel_height kernel_widthkernel_depth; Patch size. lda = m, ldb = k, ldc = m; Create a handle for CUBLAS cublasHandle_t handle2; cublasCreate(&handle2); cublasSgemm(handle2, CUBLAS_OP_N, CUBLAS_OP_N, m, n, k, alpha, A_2, lda, B_2, ldb, beta, C_2, ldc); cublasDestroy(handle2); The resulting column ordered matrix is the column ordered matrix, so it should have elements in the order (First_patch_1st kernel,second_patch_1st_kernel,third_patch_1st_kernel,.....last_patch_1st_kernel, and so on for all the patches and kernels). To get the corresponding value on the output,divide C into (num_kernels) parts of (num_patches) each. Getting co_patches into GPU. float gpu_C_2; cudaMalloc((void )&gpu_C_2, output_image_heightoutput_image_widthkernel_heightkernel_widthkernel_depth sizeof(float)); cudaMemcpy(gpu_C_2, C_2, output_image_heightoutput_image_widthkernel_heightkernel_widthkernel_depth sizeof(float), cudaMemcpyHostToDevice); We can do the bias addition and relu activation by writing GPU kernels for the same. Copying kernel biases into GPU. double kernel_biases_2; cudaMalloc((void )&kernel_biases_2, no_of_kernels sizeof(double)); cudaMemcpy(kernel_biases_2, B2_arr, no_of_kernels sizeof(double), cudaMemcpyHostToDevice); Creating output array inside GPU. double gpu_out_2; cudaMalloc((void )&gpu_out_2, output_image_heightoutput_image_widthno_of_kernels sizeof(double)); bias_and_relu_kernel griddim2, blockdim2 (gpu_C_2, kernel_biases_2, output_image_height, output_image_width, output_image_depth, gpu_out_2); double layer_2 = (double )malloc(output_image_heightoutput_image_widthno_of_kernels sizeof(double)); cudaMemcpy(layer_2, gpu_out_2, output_image_heightoutput_image_widthno_of_kernels sizeof(double), cudaMemcpyDeviceToHost); layer_2 is the output from the second layer. Free all the unnecessary things from the GPU to make space for the next kernel. cudaFree(gpu_C_2); cudaFree(kernel_biases_2); cudaFree(gpu_out_2); free(co_patches_2); ---------------------------------Layer 2 done-------------------------------------------------------- ----------------------------------Layer 3 Maxpooling------------------------------------------------------ Maxpooling layer. pool_height = 3; pool_width = 3; input_image_height = output_image_height; input_image_width = output_image_width; input_image_depth = output_image_depth; z_threads = input_image_depth; x_threads = sqrt(max_threads_per_block z_threads); y_threads = x_threads; output_image_height = (input_image_height - input_image_height % pool_height) pool_height; output_image_width = (input_image_width - input_image_width % pool_width) pool_width; output_image_depth = input_image_depth; dim3 blockdim3(x_threads, y_threads, z_threads); dim3 griddim3(output_image_width x_threads, output_image_height y_threads, 1); Copying the previous output into GPU. double gpu_in_3; cudaMalloc((void )&gpu_in_3, input_image_heightinput_image_widthinput_image_depth sizeof(double)); cudaMemcpy(gpu_in_3, layer_2, input_image_heightinput_image_widthinput_image_depth sizeof(double), cudaMemcpyHostToDevice); Creating output array inside GPU. double gpu_out_3; cudaMalloc((void )&gpu_out_3, output_image_heightoutput_image_widthoutput_image_depth sizeof(double)); maxpool_kernel griddim3, blockdim3 (input_image_height, input_image_width, input_image_depth, gpu_in_3, pool_height, pool_width, output_image_height, output_image_width, output_image_depth, gpu_out_3); double layer_3 = (double )malloc(output_image_heightoutput_image_widthoutput_image_depth sizeof(double)); cudaMemcpy(layer_3, gpu_out_3, output_image_heightoutput_image_widthoutput_image_depth sizeof(double), cudaMemcpyDeviceToHost); layer 1 is the output. cudaFree(gpu_in_3); cudaFree(gpu_out_3); ---------------------------------------Layer 3 done-------------------------------------------------- --------------------------------------Layer 4 Flattening---------------------------------------- Flattening in the CPU itself. The idea is to apply the same kind of C major flattening that keras does to the elements coming in from the second pooling layer. The array coming in consists of rows of each sheet arranged side by side followed by the rows of the next sheet and so on. Jumbling up that order to stick with keras type flattening which is the C-major ordering consisting of z-axis changing fastest, followed by column and then row changing. int in_h = output_image_height; int in_w = output_image_width; int in_d = output_image_depth; int image_pointer; int channel_pointer; k = 0; double flattened = (double )malloc(in_hin_win_d sizeof(double)); for (image_pointer = 0; image_pointer in_hin_w; image_pointer++) { for (channel_pointer = 0; channel_pointer in_d; channel_pointer++) { flattened[k] = layer_3[image_pointer + channel_pointer in_hin_w]; k++; } } ----------------------------------------Layer 4 done----------------------------------------------- ----------------------------------------Layer 5 Fully connecteddense layer-------------------------- int input_layer_nodes = output_image_height output_image_widthoutput_image_depth; int output_layer_nodes = 1024; This layer has 1024 output nodes. double gpu_in_5; cudaMalloc((void )&gpu_in_5, input_layer_nodes sizeof(double)); cudaMemcpy(gpu_in_5, flattened, input_layer_nodes sizeof(double), cudaMemcpyHostToDevice); double FC_weights_5; cudaMalloc((void )&FC_weights_5, input_layer_nodes output_layer_nodes sizeof(double)); cudaMemcpy(FC_weights_5, DW5_arr, input_layer_nodes output_layer_nodes sizeof(double), cudaMemcpyHostToDevice); double FC_biases_5; cudaMalloc((void )&FC_biases_5, output_layer_nodes sizeof(double)); cudaMemcpy(FC_biases_5, DB5_arr, output_layer_nodes sizeof(double), cudaMemcpyHostToDevice); double gpu_out_5; cudaMalloc((void )&gpu_out_5, output_layer_nodes sizeof(double)); dim3 blocksize5(max_threads_per_block, 1, 1); dim3 gridsize5(output_layer_nodes max_threads_per_block, 1, 1); dense_kernel gridsize5, blocksize5 (input_layer_nodes, output_layer_nodes, gpu_in_5, FC_weights_5, FC_biases_5, gpu_out_5, number_of_classes); layer5 is the output. double layer_5 = (double )malloc(output_layer_nodes sizeof(double)); cudaMemcpy(layer_5, gpu_out_5, output_layer_nodes sizeof(double), cudaMemcpyDeviceToHost); cudaFree(gpu_in_5); cudaFree(gpu_out_5); cudaFree(FC_biases_5); cudaFree(FC_weights_5); ---------------------------------------Layer 5 done-------------------------------------------- ------------------------------------Layer 6 Fully connecteddense layer-------------------------- input_layer_nodes = output_layer_nodes; output_layer_nodes = number_of_classes; double gpu_in_6; cudaMalloc((void )&gpu_in_6, input_layer_nodes sizeof(double)); cudaMemcpy(gpu_in_6, layer_5, input_layer_nodes sizeof(double), cudaMemcpyHostToDevice); double FC_weights_6; cudaMalloc((void )&FC_weights_6, input_layer_nodes output_layer_nodes sizeof(double)); cudaMemcpy(FC_weights_6, DW7_arr, input_layer_nodes output_layer_nodes sizeof(double), cudaMemcpyHostToDevice); double FC_biases_6; cudaMalloc((void )&FC_biases_6, output_layer_nodes sizeof(double)); cudaMemcpy(FC_biases_6, DB7_arr, output_layer_nodes sizeof(double), cudaMemcpyHostToDevice); double gpu_out_6; cudaMalloc((void )&gpu_out_6, output_layer_nodes sizeof(double)); dim3 blocksize6(max_threads_per_block, 1, 1); dim3 gridsize6(output_layer_nodes max_threads_per_block, 1, 1); dense_kernel gridsize6, blocksize6 (input_layer_nodes, output_layer_nodes, gpu_in_6, FC_weights_6, FC_biases_6, gpu_out_6,number_of_classes); double layer_7 = (double )malloc(output_layer_nodes sizeof(double)); cudaMemcpy(layer_7, gpu_out_6, output_layer_nodes sizeof(double), cudaMemcpyDeviceToHost); layer7 is the output. cudaFree(gpu_in_6); cudaFree(gpu_out_6); cudaFree(FC_biases_6); cudaFree(FC_weights_6); -----------------------------------Layer 7 done-------------------------------------------------------- Softmax of the output layer. int op_layer_size = number_of_classes; int i; double sum = 0.0; for (i = 0; i op_layer_size; i++) { sum += exp(layer_7[i]); } printf(n%fn, exp(3)); double max = layer_7[0] sum; int max_no = 0; for (i = 0; i op_layer_size; i++) { printf(%lfn, layer_7[i]); if ((layer_7[i] sum) max) { max_no = i; } } }
faca60749b618ca96bb3ed19a6d00c19c9c8dfa0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2011, Alex Krizhevsky (akrizhevsky@gmail.com) * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * - Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * - Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <set> #include <vector> #include <assert.h> #include <rocblas.h> #include <cutil_inline.h> #include <stdlib.h> #include <stdio.h> #include <fstream> #include <iostream> #include <algorithm> #include <typeinfo> #include <nvmatrix.cuh> #include <nvmatrix_operators.cuh> #include <map> using namespace std; /* * Device random number generator pointers. */ //map<int,hiprandGenerator_t> NVMatrix::rndGen; map<int,hiprandState_t*> NVMatrix::rndDevStates; pthread_mutex_t* NVMatrix::_rndMutex = makeMutex(); pthread_mutex_t* NVMatrix::makeMutex() { pthread_mutex_t* m = (pthread_mutex_t*) malloc(sizeof(pthread_mutex_t)); pthread_mutex_init(m, NULL); return m; } void NVMatrix::_init(int numRows, int numCols, int stride, bool isTrans) { _numRows = numRows; _numCols = numCols; _numElements = numRows * numCols; _ownsData = true; _isTrans = isTrans; _devData = NULL; if (_numElements > 0) { hipblasAlloc(_numElements, sizeof(float), (void**) &_devData); checkCublasError("!!!! device memory allocation error\n"); } _stride = stride < 0 ? getLeadingDim() : stride; } NVMatrix::NVMatrix() { _init(0, 0, -1, false); } NVMatrix::NVMatrix(bool isTrans) { _init(0, 0, -1, isTrans); } NVMatrix::NVMatrix(int numRows, int numCols, bool isTrans) { _init(numRows, numCols, -1, isTrans); } NVMatrix::NVMatrix(const Matrix& like, bool copy) { _init(like.getNumRows(), like.getNumCols(), -1, like.isTrans()); if (copy) { copyFromHost(like); } } NVMatrix::NVMatrix(const NVMatrix& like, bool copy) { _init(like.getNumRows(), like.getNumCols(), -1, like.isTrans()); if (copy) { like.copy(*this); } } /* * Initializes NVMatrix with same dimensions as given matrix but * does not copy any data. */ NVMatrix::NVMatrix(const NVMatrix& like) { _init(like.getNumRows(), like.getNumCols(), -1, like.isTrans()); } /* * Initializes NVMatrix with same dimensions as given matrix but * does not copy any data. */ NVMatrix::NVMatrix(const Matrix& like) { _init(like.getNumRows(), like.getNumCols(), -1, false); } NVMatrix::NVMatrix(float* devData, int numRows, int numCols, int stride, bool isTrans) : _numRows(numRows), _numCols(numCols), _numElements(numRows*numCols), _ownsData(false), _devData(devData), _isTrans(isTrans) { _stride = stride < 0 ? getLeadingDim() : stride; } NVMatrix::~NVMatrix() { if(_ownsData && _numElements > 0) { cublasStatus status = hipblasFree(_devData); if (status != HIPBLAS_STATUS_SUCCESS) { fprintf(stderr, "!!!! memory free error\n"); exit(EXIT_FAILURE); } } } void NVMatrix::copyFromHost(const Matrix& hostMatrix, bool resizeDeviceMatrix) { if (resizeDeviceMatrix) { resize(hostMatrix); } copyFromHost(hostMatrix); } void NVMatrix::copyFromHost(const Matrix& hostMatrix) { // assert(getStride() == getLeadingDim()); assert(isSameDims(hostMatrix)); setTrans(hostMatrix.isTrans()); if (getNumElements() > 0) { cublasStatus status = hipblasSetMatrix(hostMatrix.getLeadingDim(), hostMatrix.getFollowingDim(), sizeof(float), hostMatrix.getData(), hostMatrix.getLeadingDim(), _devData, _stride); if (status != HIPBLAS_STATUS_SUCCESS) { fprintf(stderr, "!!!! device access error (write)\n"); exit( EXIT_FAILURE); } } } void NVMatrix::copyToHost(Matrix& hostMatrix) const { // assert(getStride() == getLeadingDim()); assert(isSameDims(hostMatrix)); hostMatrix.setTrans(_isTrans); if (getNumElements() > 0) { // printf("rows: %d, cols: %d, stride: %d\n", getNumRows(), getNumCols(), getStride()); cublasStatus status = hipblasGetMatrix(getLeadingDim(),getFollowingDim(), sizeof(float), _devData, getStride(), hostMatrix.getData(), hostMatrix.getLeadingDim()); if (status != HIPBLAS_STATUS_SUCCESS) { fprintf(stderr, "!!!! device access error (read)\n"); exit( EXIT_FAILURE); } } } void NVMatrix::copyToHost(Matrix& hostMatrix, bool resizeTarget) const { if (resizeTarget) { hostMatrix.resize(_numRows, _numCols); } copyToHost(hostMatrix); } void NVMatrix::copy(NVMatrix& dest) const { dest.resize(*this); copy(dest, 0, -1, 0, -1, 0, 0); } NVMatrix& NVMatrix::copy() const { NVMatrix* c = new NVMatrix(); copy(*c); return *c; } //target = scaleAB * this * b; the target is always of column major void NVMatrix::rightMult(const NVMatrix &b, float scaleAB, NVMatrix &target) const { assert(isContiguous() && b.isContiguous() && target.isContiguous()); // assert(&target != &b); assert(_numCols == b.getNumRows()); if(&target != this) { target.resize(_numRows, b.getNumCols()); //target.setTrans(true); // default column major } assert(target.getNumRows() == _numRows); assert(target.getNumCols() == b.getNumCols()); if(_numRows % 64 != 0 || _numCols % 64 != 0 || b.getNumCols() % 64 != 0) { WARN("Matrix dimensions not divisible by 64 -- hipblasSgemm performance may suffer."); } hipblasSgemm(getTransChar(), b.getTransChar(), _numRows, b.getNumCols(), _numCols, scaleAB, _devData, getLeadingDim(), b.getDevData(), b.getLeadingDim(), 0, target.getDevData(), getNumRows()); target.setTrans(true); checkCublasError("hipblasSgemm failed"); // hipDeviceSynchronize(); } // this = scaleAB * this * b void NVMatrix::rightMult(const NVMatrix &b, float scaleAB) { rightMult(b, scaleAB, *this); } // target = this * b void NVMatrix::rightMult(const NVMatrix &b, NVMatrix& target) const { rightMult(b, 1, target); } /* * This will only work if this matrix is in column-major order! In other words, * if isTrans() returns true. But a and b does not have to be */ // this = scaleAB * a * b + scaleThis * this (column-major only), output is also column-major void NVMatrix::addProduct(const NVMatrix& a, const NVMatrix &b, float scaleThis, float scaleAB) { if (scaleThis == 0) { a.rightMult(b, scaleAB, *this); return; } assert(isContiguous()); assert(a.getNumCols() == b.getNumRows()); assert(this->getNumRows() == a.getNumRows()); assert(this->getNumCols() == b.getNumCols()); assert(_isTrans); // make sure about column-major if(a.getNumRows() % 64 != 0 || a.getNumCols() % 64 != 0 || b.getNumCols() % 64 != 0) { WARN("Matrix dimensions not divisible by 64 -- hipblasSgemm performance may suffer."); } hipblasSgemm(a.getTransChar(), b.getTransChar(), a.getNumRows(), b.getNumCols(), a.getNumCols(), scaleAB, a.getDevData(), a.getLeadingDim(), b.getDevData(), b.getLeadingDim(), scaleThis, _devData, getLeadingDim()); checkCublasError("hipblasSgemm failed"); // hipDeviceSynchronize(); } // this = a * b + this void NVMatrix::addProduct(const NVMatrix& a, const NVMatrix &b) { addProduct(a, b, 1, 1); } template <class Randomizer> void NVMatrix::_unaryRandomize(NVMatrix& target, Randomizer rnd) { assert(isRndInitialized()); assert(isContiguous() && target.isContiguous()); if (!isSameDims(target)) { target.resize(*this); } assert(isTrans() == target.isTrans()); hipLaunchKernelGGL(( kUnaryRandomize), dim3(NUM_RND_BLOCKS),dim3(NUM_RND_THREADS_PER_BLOCK), 0, 0, getDevData(), target.getDevData(), getCurandState(), getNumElements(), rnd); cutilCheckMsg("kUnaryRandomize: Kernel execution failed"); } template <class Randomizer> void NVMatrix::_binaryRandomize(NVMatrix& data2, NVMatrix& target, Randomizer rnd) { assert(isRndInitialized()); assert(isContiguous() && data2.isContiguous() && target.isContiguous()); assert(isSameDims(data2)); assert(isTrans() == data2.isTrans()); if (!isSameDims(target)) { target.resize(*this); } assert(isTrans() == target.isTrans()); hipLaunchKernelGGL(( kBinaryRandomize), dim3(NUM_RND_BLOCKS),dim3(NUM_RND_THREADS_PER_BLOCK), 0, 0, getDevData(), data2.getDevData(), target.getDevData(), getCurandState(), getNumElements(), rnd); cutilCheckMsg("kBinaryRandomize: Kernel execution failed"); } void NVMatrix::initRandom(unsigned long long seed) { assert(!isRndInitialized()); pthread_mutex_lock(_rndMutex); int d = getDeviceID(); rndDevStates[d] = NULL; CUDA_CALL(hipMalloc((void **)&rndDevStates[d], NUM_RND_STREAMS * sizeof(hiprandState_t))); pthread_mutex_unlock(_rndMutex); hipLaunchKernelGGL(( kSetupCurand), dim3(NUM_RND_BLOCKS), dim3(NUM_RND_THREADS_PER_BLOCK), 0, 0, getCurandState(), 1 + seed*2); // so there's no chance it'll be correlated with the other one cutilCheckMsg("initRandom: Kernel execution failed"); } void NVMatrix::initRandom() { NVMatrix::initRandom(time(0)); } hiprandState_t* NVMatrix::getCurandState() { pthread_mutex_lock(_rndMutex); int d = getDeviceID(); assert(rndDevStates.count(d) != 0); hiprandState_t* r = rndDevStates[d]; pthread_mutex_unlock(_rndMutex); return r; } int NVMatrix::getDeviceID() { int d; hipGetDevice(&d); return d; } bool NVMatrix::isRndInitialized() { pthread_mutex_lock(_rndMutex); bool b = rndDevStates.count(getDeviceID()) != 0; pthread_mutex_unlock(_rndMutex); return b; } void NVMatrix::destroyRandom() { assert(isRndInitialized()); int d = getDeviceID(); pthread_mutex_lock(_rndMutex); CUDA_CALL(hipFree(rndDevStates[d])); rndDevStates.erase(d); pthread_mutex_unlock(_rndMutex); } void NVMatrix::binarizeProbs() { binarizeProbs(*this); } void NVMatrix::binarizeProbs(NVMatrix& target) { _unaryRandomize(target, BinarizeUnaryRandomizer()); } void NVMatrix::randomizeUniform() { assert(isContiguous()); assert(isRndInitialized()); // CURAND_CALL(hiprandGenerateUniform(rndGen, _devData, getNumElements())); _unaryRandomize(*this, UniformUnaryRandomizer()); } void NVMatrix::randomizeBinary(float prob) { assert(isContiguous()); assert(isRndInitialized()); // CURAND_CALL(hiprandGenerateUniform(rndGen, _devData, getNumElements())); _unaryRandomize(*this, BinaryUnaryRandomizer(prob)); } void NVMatrix::randomizeBinaryWider(float scale) { assert(isContiguous()); assert(isRndInitialized()); // CURAND_CALL(hiprandGenerateUniform(rndGen, _devData, getNumElements())); _unaryRandomize(*this, BinaryWiderUnaryRandomizer(scale)); } void NVMatrix::randomizeTernary() { assert(isContiguous()); assert(isRndInitialized()); // CURAND_CALL(hiprandGenerateUniform(rndGen, _devData, getNumElements())); _unaryRandomize(*this, TernaryUnaryRandomizer()); } void NVMatrix::randomizeGaussian() { randomizeGaussian(1); } void NVMatrix::randomizeGaussian(float stdev) { randomizeGaussian(0, stdev); } void NVMatrix::randomizeGaussian(float mean, float stdev) { assert(isContiguous()); assert(isRndInitialized()); // CURAND_CALL(hiprandGenerateNormal(rndGen, _devData, getNumElements(), mean, stdev)); _unaryRandomize(*this, GaussianUnaryRandomizer(mean, stdev)); } /* * Kind of a hack since we don't actually need the contents of this matrix for it, * so we don't really need a binary randomizer. */ void NVMatrix::randomizeGaussian(NVMatrix& stdevs) { _binaryRandomize(stdevs, *this, GaussianBinaryRandomizer()); } // this = this + randn() void NVMatrix::addGaussianNoise() { addGaussianNoise(1); } // this = this + stdev * randn() void NVMatrix::addGaussianNoise(float stdev) { addGaussianNoise(stdev, *this); } // target = this + stdev * randn() void NVMatrix::addGaussianNoise(float stdev, NVMatrix& target) { _unaryRandomize(target, AddGaussianUnaryRandomizer(stdev)); } void NVMatrix::addGaussianNoise(NVMatrix& stdevs, bool var) { addGaussianNoise(stdevs, var, *this); } void NVMatrix::addGaussianNoise(NVMatrix& stdevs) { addGaussianNoise(stdevs, false, *this); } void NVMatrix::addGaussianNoise(NVMatrix& stdevs, bool var, NVMatrix& target) { if (var) { _binaryRandomize(stdevs, target, AddGaussianBinaryRandomizer<true>()); } else { _binaryRandomize(stdevs, target, AddGaussianBinaryRandomizer<false>()); } } // custom void NVMatrix::addUniformNoise(float scale) { addUniformNoise(scale, *this); } void NVMatrix::addUniformNoise(float scale, NVMatrix& target) { _unaryRandomize(target, AddUniformUnaryRandomizer(scale)); } void NVMatrix::addBinaryNoise(float prob) { addBinaryNoise(prob, *this); } void NVMatrix::addBinaryNoise(float prob, NVMatrix& target) { _unaryRandomize(target, AddBinaryUnaryRandomizer(prob)); } // target = this > b void NVMatrix::biggerThan(NVMatrix& b, NVMatrix& target) { applyBinary(NVMatrixBinaryOps::BiggerThan(), b, target); } // this = this > b void NVMatrix::biggerThan(NVMatrix& b) { biggerThan(b, *this); } // target = (this == b) void NVMatrix::equals(NVMatrix& b, NVMatrix& target) { applyBinary(NVMatrixBinaryOps::Equals(), b, target); } // this = (this == b) void NVMatrix::equals(NVMatrix& m) { equals(m, *this); } void NVMatrix::biggerThanVector(NVMatrix& vec, NVMatrix& target) { applyBinaryV(NVMatrixBinaryOps::BiggerThan(), vec, target); } void NVMatrix::biggerThanVector(NVMatrix& vec) { biggerThanVector(vec, *this); } void NVMatrix::_checkBounds(int startRow, int endRow, int startCol, int endCol) const { assert(startRow >= 0 && startRow < _numRows); assert(endRow > startRow && endRow <= _numRows); assert(startCol >= 0 && startCol < _numCols); assert(endCol > startCol && endCol <= _numCols); } /* * The only place where stride is supported for now! * Will ALWAYS return a view of the original data, sometimes non-contiguous. */ NVMatrix& NVMatrix::slice(int startRow, int endRow, int startCol, int endCol) const { endRow = endRow < 0 ? this->_numRows : endRow; endCol = endCol < 0 ? this->_numCols : endCol; _checkBounds(startRow, endRow, startCol, endCol); if (!isTrans()) { return *new NVMatrix(this->_devData + startRow * _stride + startCol, endRow - startRow, endCol - startCol, _stride, false); } return *new NVMatrix(this->_devData + startCol * _stride + startRow, endRow - startRow, endCol - startCol, _stride, true); } /* this will NEVER return a view */ void NVMatrix::slice(int startRow, int endRow, int startCol, int endCol, NVMatrix& target) const { endRow = endRow < 0 ? this->_numRows : endRow; endCol = endCol < 0 ? this->_numCols : endCol; _checkBounds(startRow, endRow, startCol, endCol); int sliceRows = endRow - startRow, sliceCols = endCol - startCol; if (target.getNumRows() != sliceRows || target.getNumCols() != sliceCols) { target.resize(sliceRows, sliceCols); } this->copy(target, startRow, endRow, startCol, endCol, 0, 0); } NVMatrix& NVMatrix::sliceRows(int startRow, int endRow) const { return slice(startRow, endRow, 0, -1); } void NVMatrix::sliceRows(int startRow, int endRow, NVMatrix& target) const { slice(startRow, endRow, 0, -1, target); } NVMatrix& NVMatrix::sliceCols(int startCol, int endCol) const { return slice(0, -1, startCol, endCol); } void NVMatrix::sliceCols(int startCol, int endCol, NVMatrix& target) const { slice(0, -1, startCol, endCol, target); } /* * Guaranteed to not change the data if the number of elements doesn't change. * So you can use this to "reshape" a matrix. */ // data will be lost if the number of elements is changed bool NVMatrix::resize(int numRows, int numCols) { bool reallocated = false; if (numRows != _numRows || numCols != _numCols) { assert(_ownsData); if (_numElements != numRows * numCols) { if (_numElements > 0) { // free old memory cublasStatus status = hipblasFree(_devData); if (status != HIPBLAS_STATUS_SUCCESS) { fprintf(stderr, "!!!! memory free error: %X\n", status); exit(EXIT_FAILURE); } } if (numRows * numCols > 0) { // allocate new memory cublasStatus status = hipblasAlloc(numCols * numRows, sizeof(float), (void**) &_devData); if (status != HIPBLAS_STATUS_SUCCESS) { fprintf(stderr, "!!!! device memory allocation error\n"); exit(EXIT_FAILURE); } } else { _devData = NULL; } reallocated = true; } _numRows = numRows; _numCols = numCols; _numElements = numRows * numCols; _stride = getLeadingDim(); } return reallocated; } bool NVMatrix::resize(const NVMatrix& like) { setTrans(like.isTrans()); return resize(like.getNumRows(), like.getNumCols()); } bool NVMatrix::resize(const Matrix& like) { setTrans(like.isTrans()); return resize(like.getNumRows(), like.getNumCols()); } void NVMatrix::reshape(int numRows, int numCols) { assert(isContiguous()); assert(_numElements == numRows*numCols); _numRows = numRows; _numCols = numCols; _stride = getLeadingDim(); } NVMatrix& NVMatrix::reshaped(int numRows, int numCols) { assert(isContiguous()); assert(_numElements == numRows*numCols); return *new NVMatrix(_devData, numRows, numCols, -1, _isTrans); } void NVMatrix::copy(NVMatrix &dest, int srcStartRow, int srcEndRow, int srcStartCol, int srcEndCol, int destStartRow, int destStartCol) const { srcEndRow = srcEndRow < 0 ? _numRows : srcEndRow; srcEndCol = srcEndCol < 0 ? _numCols : srcEndCol; NVMatrix* srcSlice = &slice(srcStartRow, srcEndRow, srcStartCol, srcEndCol); NVMatrix* destSlice = &dest.slice(destStartRow, destStartRow + srcEndRow - srcStartRow, destStartCol, destStartCol + srcEndCol - srcStartCol); srcSlice->apply(NVMatrixOps::Identity(), *destSlice); delete srcSlice; delete destSlice; } NVMatrix& NVMatrix::getTranspose() { return *new NVMatrix(_devData, _numCols, _numRows, _stride, !_isTrans);; } void NVMatrix::transpose(NVMatrix& target) { flipTrans(target); target.setTrans(!target.isTrans()); target.reshape(target.getNumCols(), target.getNumRows()); } void NVMatrix::transpose() { int tmp = _numCols; _numCols = _numRows; _numRows = tmp; _isTrans = !_isTrans; } bool NVMatrix::transpose(bool trans) { bool oldTrans = _isTrans; if (oldTrans != trans) { transpose(); } return oldTrans; } /* * Flips the ordering of the matrix from row-major to column-major and vice versa. * This creates temporary storage -- not a cheap operation. * * This is not equivalent to a "hard transpose". The resultant matrix still has * the same dimensions, its layout in memory just changes. */ NVMatrix& NVMatrix::flipTrans() { NVMatrix* meTrans = new NVMatrix(*this); flipTrans(*meTrans); return *meTrans; } void NVMatrix::flipTrans(NVMatrix& target) { assert(&target != this); target.resize(_numRows, _numCols); target.setTrans(!isTrans()); apply(NVMatrixOps::Identity(), target); } void NVMatrix::squaredDiff(NVMatrix& b) { squaredDiff(b, *this); } void NVMatrix::squaredDiff(NVMatrix& b, NVMatrix& target) { applyBinary(NVMatrixBinaryOps::SquaredDiff(), b, target); } void NVMatrix::add(NVMatrix& b, float scaleA, float scaleB, NVMatrix& target) { if (scaleA == 0) { b.scale(scaleB, target); return; } if (scaleA == 1 && scaleB == 1) { // slight optimization applyBinary(NVMatrixBinaryOps::Add(), b, target); } else { applyBinary(NVMatrixBinaryOps::WeightedAdd(scaleA, scaleB), b, target); } } // target = this + scaleB * b void NVMatrix::add(NVMatrix& b, float scaleB, NVMatrix& target) { add(b, 1, scaleB, target); } // target = this + b void NVMatrix::add(NVMatrix& b, NVMatrix& target) { add(b, 1, target); } // this = this + scaleB * b void NVMatrix::add(NVMatrix& b, float scaleB) { add(b, scaleB, *this); } // this = scaleA * this + scaleB * b void NVMatrix::add(NVMatrix& b, float scaleA, float scaleB) { add(b, scaleA, scaleB, *this); } // this = this + b void NVMatrix::add(NVMatrix& b) { add(b, 1, *this); } void NVMatrix::subtract(NVMatrix& b, NVMatrix& target) { add(b, -1, target); } void NVMatrix::subtract(NVMatrix& b) { add(b, -1); } void NVMatrix::eltwiseMult(NVMatrix& b, NVMatrix& target) { applyBinary(NVMatrixBinaryOps::Multiply(), b, target); } void NVMatrix::eltwiseMult(NVMatrix& b) { eltwiseMult(b, *this); } void NVMatrix::eltwiseDivide(NVMatrix& b, NVMatrix& target) { applyBinary(NVMatrixBinaryOps::Divide(), b, target); } void NVMatrix::eltwiseDivide(NVMatrix& b) { eltwiseDivide(b, *this); } void NVMatrix::tile(int timesY, int timesX, NVMatrix& target) { assert(isContiguous() && target.isContiguous()); assert(timesX > 0 && timesY > 0); target.resize(_numRows*timesY, _numCols*timesX); target.setTrans(_isTrans); if(!isTrans()) { hipLaunchKernelGGL(( kTile), dim3(NUM_TILE_BLOCKS),dim3(NUM_TILE_THREADS_PER_BLOCK), 0, 0, _devData, target._devData, _numCols, _numRows, target._numCols, target._numRows); } else { hipLaunchKernelGGL(( kTile), dim3(NUM_TILE_BLOCKS),dim3(NUM_TILE_THREADS_PER_BLOCK), 0, 0, _devData, target._devData, _numRows, _numCols, target._numRows, target._numCols); } cutilCheckMsg("Kernel execution failed"); } void NVMatrix::addVector(NVMatrix& vec, float scaleVec, NVMatrix& target) { applyBinaryV(NVMatrixBinaryOps::WeightedAdd(1, scaleVec), vec, target); } void NVMatrix::addVector(NVMatrix& vec) { addVector(vec, 1, *this); } void NVMatrix::addVector(NVMatrix& vec, float scaleVec) { addVector(vec, scaleVec, *this); } void NVMatrix::addVector(NVMatrix& vec, NVMatrix& target) { addVector(vec, 1, target); } void NVMatrix::equalsVector(NVMatrix& vec, NVMatrix& target) { applyBinaryV(NVMatrixBinaryOps::Equals(), vec, target); } void NVMatrix::equalsVector(NVMatrix& vec) { equalsVector(vec, *this); } void NVMatrix::eltwiseMultByVector(NVMatrix& vec, NVMatrix& target) { applyBinaryV(NVMatrixBinaryOps::Multiply(), vec, target); } void NVMatrix::eltwiseMultByVector(NVMatrix& vec) { eltwiseMultByVector(vec, *this); } void NVMatrix::eltwiseDivideByVector(NVMatrix& vec) { eltwiseDivideByVector(vec, *this); } void NVMatrix::eltwiseDivideByVector(NVMatrix& vec, NVMatrix& target) { applyBinaryV(NVMatrixBinaryOps::Divide(), vec, target); } /* * num threads per block is ignored when summing rows (axis=1) because * it has to be a power of 2. * * TODO: this is a mess, fix it. it works pretty fast but it's too ugly. * TODO: this function is _really_ bad for very long aggregations of few columns. */ template<class Agg, class BinaryOp> void NVMatrix::_aggregate(int axis, NVMatrix& target, Agg agg, BinaryOp op) { assert(axis == 0 || axis == 1); assert(isContiguous() && target.isContiguous()); assert(&target != this); int width = _isTrans ? _numRows : _numCols; int height = _isTrans ? _numCols : _numRows; target.setTrans(_isTrans); assert(width > 0); assert(height > 0); if(axis == 0 && !_isTrans || axis == 1 && _isTrans) { //col sum target.resize(!_isTrans ? 1 : _numRows, !_isTrans ? _numCols : 1); int numBlocks = DIVUP(width, NUM_SUM_COLS_THREADS_PER_BLOCK); assert(numBlocks * NUM_SUM_COLS_THREADS_PER_BLOCK >= width); assert(numBlocks < NUM_BLOCKS_MAX); hipLaunchKernelGGL(( kDumbAggCols<Agg, BinaryOp>), dim3(numBlocks),dim3(NUM_SUM_COLS_THREADS_PER_BLOCK), 0, 0, _devData, target._devData, width, height, agg, op); cutilCheckMsg("kDumbAggCols: Kernel execution failed"); } else { // row sum target.resize(_isTrans ? 1 : _numRows, _isTrans ? _numCols : 1); if (width > 1) { if (height >= 16384) { // linear aggregation int numBlocksX = 1; int numBlocksY = DIVUP(height, AGG_SHORT_ROWS_THREADS_Y*AGG_SHORT_ROWS_LOOPS_Y); int numThreadsX = width <= 4 ? 4 : width <= 8 ? 8 : width <= 12 ? 12 : width <= 16 ? 16 : AGG_SHORT_ROWS_THREADS_X; int numThreadsY = AGG_SHORT_ROWS_THREADS_Y; while (numBlocksY > NUM_BLOCKS_MAX) { numBlocksY = DIVUP(numBlocksY,2); numBlocksX *= 2; } dim3 grid(numBlocksX, numBlocksY), threads(numThreadsX, numThreadsY); if(width <= 16) { if(width <= 4) { hipLaunchKernelGGL(( kAggShortRows<Agg, BinaryOp, 1, 4>), dim3(grid), dim3(threads), 0, 0, _devData, target._devData,width, height, agg, op); } else if(width <= 8) { hipLaunchKernelGGL(( kAggShortRows<Agg, BinaryOp, 1, 8>), dim3(grid), dim3(threads), 0, 0, _devData, target._devData,width, height, agg, op); } else if(width <= 12) { hipLaunchKernelGGL(( kAggShortRows<Agg, BinaryOp, 1, 12>), dim3(grid), dim3(threads), 0, 0, _devData, target._devData,width, height, agg, op); } else { hipLaunchKernelGGL(( kAggShortRows<Agg, BinaryOp, 1, 16>), dim3(grid), dim3(threads), 0, 0, _devData, target._devData,width, height, agg, op); } } else if(width <= 32) { hipLaunchKernelGGL(( kAggShortRows<Agg, BinaryOp, 2, AGG_SHORT_ROWS_THREADS_X>), dim3(grid), dim3(threads), 0, 0, _devData, target._devData,width, height, agg, op); } else if(width <= 48){ hipLaunchKernelGGL(( kAggShortRows<Agg, BinaryOp, 3, AGG_SHORT_ROWS_THREADS_X>), dim3(grid), dim3(threads), 0, 0, _devData, target._devData,width, height, agg, op); } else if(width <= 64){ hipLaunchKernelGGL(( kAggShortRows<Agg, BinaryOp, 4, AGG_SHORT_ROWS_THREADS_X>), dim3(grid), dim3(threads), 0, 0, _devData, target._devData,width, height, agg, op); } else { hipLaunchKernelGGL(( kAggShortRows2<Agg, BinaryOp>), dim3(grid), dim3(threads), 0, 0, _devData, target._devData,width, height, agg, op); } } else { if (width >= 512) { dim3 threads(AWR_NUM_THREADS); dim3 blocks(1, ::min(1024, height)); hipLaunchKernelGGL(( kAggRows_wholerow_nosync), dim3(blocks), dim3(threads), 0, 0, _devData, target._devData, width, height, agg, op); // dim3 threads(AWR_NUM_THREADS); // dim3 blocks(1, ::min(1024, height)); // kAggRows_wholerow<<<blocks, threads>>>(_devData, target._devData, width, height, agg, op); } else { // dim3 threads(AWR_NUM_THREADS); // dim3 blocks(1, ::min(1024, height)); // kAggRows_wholerow<<<blocks, threads>>>(_devData, target._devData, width, height, agg, op); NVMatrix *prevSum = this; while (prevSum->getLeadingDim() > 1) { int numThreadsX = width <= 64 ? 32 : (width <= 128 ? 64 : (width <= 256 ? 128 : (width <= 512 ? 256 : 512))); int numThreadsY = 1; int numBlocksX = DIVUP(width, 2*numThreadsX); int numBlocksY = ::min(height, NUM_BLOCKS_MAX); NVMatrix *nvSumAccum = target.getFollowingDim() == height && target.getLeadingDim() == numBlocksX ? &target : new NVMatrix(height, numBlocksX, false); dim3 grid(numBlocksX, numBlocksY), threads(numThreadsX, numThreadsY); assert(numBlocksX <= NUM_BLOCKS_MAX); assert(numBlocksY <= NUM_BLOCKS_MAX); if(width <= 64) { hipLaunchKernelGGL(( kAggRows<Agg, BinaryOp, 32>), dim3(grid), dim3(threads), 0, 0, prevSum->_devData, nvSumAccum->_devData, width, height, nvSumAccum->getLeadingDim(), agg, op); } else if(width <= 128) { hipLaunchKernelGGL(( kAggRows<Agg, BinaryOp, 64>), dim3(grid), dim3(threads), 0, 0, prevSum->_devData, nvSumAccum->_devData, width, height, nvSumAccum->getLeadingDim(), agg, op); } else if(width <= 256) { hipLaunchKernelGGL(( kAggRows<Agg, BinaryOp, 128>), dim3(grid), dim3(threads), 0, 0, prevSum->_devData, nvSumAccum->_devData, width, height, nvSumAccum->getLeadingDim(), agg, op); } else if(width <= 512) { hipLaunchKernelGGL(( kAggRows<Agg, BinaryOp, 256>), dim3(grid), dim3(threads), 0, 0, prevSum->_devData, nvSumAccum->_devData, width, height, nvSumAccum->getLeadingDim(), agg, op); } else { hipLaunchKernelGGL(( kAggRows<Agg, BinaryOp, 512>), dim3(grid), dim3(threads), 0, 0, prevSum->_devData, nvSumAccum->_devData, width, height, nvSumAccum->getLeadingDim(), agg, op); } cutilCheckMsg("agg rows: Kernel execution failed"); hipDeviceSynchronize(); width = numBlocksX; // only true in reduction agg, but for linear agg this doesn't matter anyway if (prevSum != this) { delete prevSum; } prevSum = nvSumAccum; } } } } else { copy(target); } } } void NVMatrix::inRangeInc(float lower, float upper) { inRangeInc(lower, upper, *this); } void NVMatrix::inRangeInc(float lower, float upper, NVMatrix& target) { apply(NVMatrixOps::InRange<false>(lower, upper), target); } void NVMatrix::inRangeExc(float lower, float upper) { inRangeExc(lower, upper, *this); } void NVMatrix::inRangeExc(float lower, float upper, NVMatrix& target) { apply(NVMatrixOps::InRange<true>(lower, upper), target); } void NVMatrix::biggerThanScalar(float scalar) { biggerThanScalar(scalar, *this); } void NVMatrix::biggerThanScalar(float scalar, NVMatrix& target) { apply(NVMatrixOps::BiggerThanScalar(scalar), target); } void NVMatrix::smallerThanScalar(float scalar) { smallerThanScalar(scalar, *this); } void NVMatrix::smallerThanScalar(float scalar, NVMatrix& target) { apply(NVMatrixOps::SmallerThanScalar(scalar), target); } void NVMatrix::addScalar(float scaleThis, float scalar, NVMatrix& target) { apply(NVMatrixOps::WeightedAddScalar(scaleThis, scalar), target); } void NVMatrix::addScalar(float scalar, NVMatrix& target) { apply(NVMatrixOps::AddScalar(scalar), target); } void NVMatrix::addScalar(float scalar) { addScalar(scalar, *this); } void NVMatrix::minWithScalar(float scalar, NVMatrix& target) { apply(NVMatrixOps::MinWithScalar(scalar), target); } void NVMatrix::minWithScalar(float scalar) { minWithScalar(scalar, *this); } void NVMatrix::maxWithScalar(float scalar, NVMatrix& target) { apply(NVMatrixOps::MaxWithScalar(scalar), target); } void NVMatrix::maxWithScalar(float scalar) { maxWithScalar(scalar, *this); } void NVMatrix::pow(float p, NVMatrix& target) { apply(NVMatrixOps::Pow(p), target); } void NVMatrix::pow(float p) { pow(p, *this); } void NVMatrix::scale(float _scale) { scale(_scale, *this); } void NVMatrix::scale(float _scale, NVMatrix& target) { if (_scale != 1 || &target != this) { // optimize away scale by 1 apply(NVMatrixOps::MultByScalar(_scale), target); } } template<class Agg, class BinaryOp> NVMatrix& NVMatrix::_aggregate(int axis, Agg agg, BinaryOp op) { NVMatrix *sumVec = new NVMatrix(); _aggregate<Agg, BinaryOp>(axis, *sumVec, agg, op); return *sumVec; } void NVMatrix::max(int axis, NVMatrix& target) { _aggregate(axis, target, NVMatrixAggs::Max(), NVMatrixBinaryOps::Second()); } void NVMatrix::addSum(NVMatrix& a, int axis, float scaleThis, float scaleSum) { if (scaleThis != 0) { a._aggregate(axis, *this, NVMatrixAggs::Sum(), NVMatrixBinaryOps::WeightedAdd(scaleThis, scaleSum)); } else { a._aggregate(axis, *this, NVMatrixAggs::Sum(), NVMatrixBinaryOps::SecondScaled(scaleSum)); } } void NVMatrix::sum(int axis, NVMatrix& target) { _aggregate(axis, target, NVMatrixAggs::Sum(), NVMatrixBinaryOps::Second()); } void NVMatrix::min(int axis, NVMatrix& target) { _aggregate(axis, target, NVMatrixAggs::Min(), NVMatrixBinaryOps::Second()); } NVMatrix& NVMatrix::max(int axis) { return _aggregate(axis, NVMatrixAggs::Max(), NVMatrixBinaryOps::Second()); } NVMatrix& NVMatrix::sum(int axis) { return _aggregate(axis, NVMatrixAggs::Sum(), NVMatrixBinaryOps::Second()); } NVMatrix& NVMatrix::min(int axis) { return _aggregate(axis, NVMatrixAggs::Min(), NVMatrixBinaryOps::Second()); } void NVMatrix::_sum_setParams(int n, dim3* blocks, dim3* threads, int* numCols) { int logn = int(ceil(log(double(n)) / log(2))); *numCols = DIVUP(n, logn); int numThreads = *numCols; *blocks = dim3(DIVUP(numThreads, DP_BLOCKSIZE)); *threads = dim3(DP_BLOCKSIZE); } float NVMatrix::mean() { return sum() / getNumElements(); } float NVMatrix::sum() { return _totalAgg(NVMatrixAggs::Sum()); } float NVMatrix::max() { return _totalAgg(NVMatrixAggs::Max()); } float NVMatrix::min() { return _totalAgg(NVMatrixAggs::Min()); } template<class Agg> float NVMatrix::_totalAgg(Agg agg) { assert(isContiguous()); dim3 blocks, threads; int numCols; // Sum most of it on GPU NVMatrix* src = this; for (NVMatrix* target = NULL; src->getNumElements() > CPUSUM_MAX; src = target) { _sum_setParams(src->getNumElements(), &blocks, &threads, &numCols); target = new NVMatrix(1, blocks.x); hipLaunchKernelGGL(( kTotalAgg), dim3(blocks), dim3(threads), 0, 0, src->getDevData(), target->getDevData(), numCols, src->getNumElements(), agg); cutilCheckMsg("kTotalAgg: Kernel execution failed"); hipDeviceSynchronize(); // not really necessary? delete (src == this ? NULL : src); } Matrix srcCPU(src->getNumRows(), src->getNumCols()); src->copyToHost(srcCPU); if (src->getNumElements() > 1) { // Sum remainder on CPU delete (src == this ? NULL : src); if (typeid(Agg) == typeid(NVMatrixAggs::Sum)) { return srcCPU.sum(); } else if (typeid(Agg) == typeid(NVMatrixAggs::Max)) { return srcCPU.max(); } else if (typeid(Agg) == typeid(NVMatrixAggs::Min)) { return srcCPU.min(); } else { assert(false); } } return srcCPU(0,0); } /* * Fast dot product only for matrices with same transposedness. */ float NVMatrix::dotProduct(NVMatrix& b) { assert(isContiguous() && b.isContiguous()); assert(isSameDims(b)); assert(isTrans() == b.isTrans()); // see? dim3 blocks, threads; int numCols; _sum_setParams(getNumElements(), &blocks, &threads, &numCols); NVMatrix target(1, blocks.x); hipLaunchKernelGGL(( kDotProduct_r), dim3(blocks), dim3(threads), 0, 0, getDevData(), b.getDevData(), target.getDevData(), numCols, getNumElements()); cutilCheckMsg("kDotProduct: Kernel execution failed"); hipDeviceSynchronize(); return target.sum(); } float NVMatrix::norm2() { return dotProduct(*this); } float NVMatrix::norm() { return sqrt(norm2()); } void NVMatrix::print(int startRow, int rows, int startCol, int cols) const { hipDeviceSynchronize(); Matrix hm = Matrix(_numRows, _numCols); copyToHost(hm); hm.print(startRow, rows, startCol, cols); } void NVMatrix::print(int rows, int cols) const { print(0, rows, 0, cols); } void NVMatrix::printShape(const char* name) const { printf("%s: %dx%d\n", name, _numRows, _numCols); }
faca60749b618ca96bb3ed19a6d00c19c9c8dfa0.cu
/* * Copyright (c) 2011, Alex Krizhevsky (akrizhevsky@gmail.com) * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * - Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * - Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <set> #include <vector> #include <assert.h> #include <cublas.h> #include <cutil_inline.h> #include <stdlib.h> #include <stdio.h> #include <fstream> #include <iostream> #include <algorithm> #include <typeinfo> #include <nvmatrix.cuh> #include <nvmatrix_operators.cuh> #include <map> using namespace std; /* * Device random number generator pointers. */ //map<int,curandGenerator_t> NVMatrix::rndGen; map<int,curandState*> NVMatrix::rndDevStates; pthread_mutex_t* NVMatrix::_rndMutex = makeMutex(); pthread_mutex_t* NVMatrix::makeMutex() { pthread_mutex_t* m = (pthread_mutex_t*) malloc(sizeof(pthread_mutex_t)); pthread_mutex_init(m, NULL); return m; } void NVMatrix::_init(int numRows, int numCols, int stride, bool isTrans) { _numRows = numRows; _numCols = numCols; _numElements = numRows * numCols; _ownsData = true; _isTrans = isTrans; _devData = NULL; if (_numElements > 0) { cublasAlloc(_numElements, sizeof(float), (void**) &_devData); checkCublasError("!!!! device memory allocation error\n"); } _stride = stride < 0 ? getLeadingDim() : stride; } NVMatrix::NVMatrix() { _init(0, 0, -1, false); } NVMatrix::NVMatrix(bool isTrans) { _init(0, 0, -1, isTrans); } NVMatrix::NVMatrix(int numRows, int numCols, bool isTrans) { _init(numRows, numCols, -1, isTrans); } NVMatrix::NVMatrix(const Matrix& like, bool copy) { _init(like.getNumRows(), like.getNumCols(), -1, like.isTrans()); if (copy) { copyFromHost(like); } } NVMatrix::NVMatrix(const NVMatrix& like, bool copy) { _init(like.getNumRows(), like.getNumCols(), -1, like.isTrans()); if (copy) { like.copy(*this); } } /* * Initializes NVMatrix with same dimensions as given matrix but * does not copy any data. */ NVMatrix::NVMatrix(const NVMatrix& like) { _init(like.getNumRows(), like.getNumCols(), -1, like.isTrans()); } /* * Initializes NVMatrix with same dimensions as given matrix but * does not copy any data. */ NVMatrix::NVMatrix(const Matrix& like) { _init(like.getNumRows(), like.getNumCols(), -1, false); } NVMatrix::NVMatrix(float* devData, int numRows, int numCols, int stride, bool isTrans) : _numRows(numRows), _numCols(numCols), _numElements(numRows*numCols), _ownsData(false), _devData(devData), _isTrans(isTrans) { _stride = stride < 0 ? getLeadingDim() : stride; } NVMatrix::~NVMatrix() { if(_ownsData && _numElements > 0) { cublasStatus status = cublasFree(_devData); if (status != CUBLAS_STATUS_SUCCESS) { fprintf(stderr, "!!!! memory free error\n"); exit(EXIT_FAILURE); } } } void NVMatrix::copyFromHost(const Matrix& hostMatrix, bool resizeDeviceMatrix) { if (resizeDeviceMatrix) { resize(hostMatrix); } copyFromHost(hostMatrix); } void NVMatrix::copyFromHost(const Matrix& hostMatrix) { // assert(getStride() == getLeadingDim()); assert(isSameDims(hostMatrix)); setTrans(hostMatrix.isTrans()); if (getNumElements() > 0) { cublasStatus status = cublasSetMatrix(hostMatrix.getLeadingDim(), hostMatrix.getFollowingDim(), sizeof(float), hostMatrix.getData(), hostMatrix.getLeadingDim(), _devData, _stride); if (status != CUBLAS_STATUS_SUCCESS) { fprintf(stderr, "!!!! device access error (write)\n"); exit( EXIT_FAILURE); } } } void NVMatrix::copyToHost(Matrix& hostMatrix) const { // assert(getStride() == getLeadingDim()); assert(isSameDims(hostMatrix)); hostMatrix.setTrans(_isTrans); if (getNumElements() > 0) { // printf("rows: %d, cols: %d, stride: %d\n", getNumRows(), getNumCols(), getStride()); cublasStatus status = cublasGetMatrix(getLeadingDim(),getFollowingDim(), sizeof(float), _devData, getStride(), hostMatrix.getData(), hostMatrix.getLeadingDim()); if (status != CUBLAS_STATUS_SUCCESS) { fprintf(stderr, "!!!! device access error (read)\n"); exit( EXIT_FAILURE); } } } void NVMatrix::copyToHost(Matrix& hostMatrix, bool resizeTarget) const { if (resizeTarget) { hostMatrix.resize(_numRows, _numCols); } copyToHost(hostMatrix); } void NVMatrix::copy(NVMatrix& dest) const { dest.resize(*this); copy(dest, 0, -1, 0, -1, 0, 0); } NVMatrix& NVMatrix::copy() const { NVMatrix* c = new NVMatrix(); copy(*c); return *c; } //target = scaleAB * this * b; the target is always of column major void NVMatrix::rightMult(const NVMatrix &b, float scaleAB, NVMatrix &target) const { assert(isContiguous() && b.isContiguous() && target.isContiguous()); // assert(&target != &b); assert(_numCols == b.getNumRows()); if(&target != this) { target.resize(_numRows, b.getNumCols()); //target.setTrans(true); // default column major } assert(target.getNumRows() == _numRows); assert(target.getNumCols() == b.getNumCols()); if(_numRows % 64 != 0 || _numCols % 64 != 0 || b.getNumCols() % 64 != 0) { WARN("Matrix dimensions not divisible by 64 -- cublasSgemm performance may suffer."); } cublasSgemm(getTransChar(), b.getTransChar(), _numRows, b.getNumCols(), _numCols, scaleAB, _devData, getLeadingDim(), b.getDevData(), b.getLeadingDim(), 0, target.getDevData(), getNumRows()); target.setTrans(true); checkCublasError("cublasSgemm failed"); // cudaThreadSynchronize(); } // this = scaleAB * this * b void NVMatrix::rightMult(const NVMatrix &b, float scaleAB) { rightMult(b, scaleAB, *this); } // target = this * b void NVMatrix::rightMult(const NVMatrix &b, NVMatrix& target) const { rightMult(b, 1, target); } /* * This will only work if this matrix is in column-major order! In other words, * if isTrans() returns true. But a and b does not have to be */ // this = scaleAB * a * b + scaleThis * this (column-major only), output is also column-major void NVMatrix::addProduct(const NVMatrix& a, const NVMatrix &b, float scaleThis, float scaleAB) { if (scaleThis == 0) { a.rightMult(b, scaleAB, *this); return; } assert(isContiguous()); assert(a.getNumCols() == b.getNumRows()); assert(this->getNumRows() == a.getNumRows()); assert(this->getNumCols() == b.getNumCols()); assert(_isTrans); // make sure about column-major if(a.getNumRows() % 64 != 0 || a.getNumCols() % 64 != 0 || b.getNumCols() % 64 != 0) { WARN("Matrix dimensions not divisible by 64 -- cublasSgemm performance may suffer."); } cublasSgemm(a.getTransChar(), b.getTransChar(), a.getNumRows(), b.getNumCols(), a.getNumCols(), scaleAB, a.getDevData(), a.getLeadingDim(), b.getDevData(), b.getLeadingDim(), scaleThis, _devData, getLeadingDim()); checkCublasError("cublasSgemm failed"); // cudaThreadSynchronize(); } // this = a * b + this void NVMatrix::addProduct(const NVMatrix& a, const NVMatrix &b) { addProduct(a, b, 1, 1); } template <class Randomizer> void NVMatrix::_unaryRandomize(NVMatrix& target, Randomizer rnd) { assert(isRndInitialized()); assert(isContiguous() && target.isContiguous()); if (!isSameDims(target)) { target.resize(*this); } assert(isTrans() == target.isTrans()); kUnaryRandomize<<<NUM_RND_BLOCKS,NUM_RND_THREADS_PER_BLOCK>>>(getDevData(), target.getDevData(), getCurandState(), getNumElements(), rnd); cutilCheckMsg("kUnaryRandomize: Kernel execution failed"); } template <class Randomizer> void NVMatrix::_binaryRandomize(NVMatrix& data2, NVMatrix& target, Randomizer rnd) { assert(isRndInitialized()); assert(isContiguous() && data2.isContiguous() && target.isContiguous()); assert(isSameDims(data2)); assert(isTrans() == data2.isTrans()); if (!isSameDims(target)) { target.resize(*this); } assert(isTrans() == target.isTrans()); kBinaryRandomize<<<NUM_RND_BLOCKS,NUM_RND_THREADS_PER_BLOCK>>>(getDevData(), data2.getDevData(), target.getDevData(), getCurandState(), getNumElements(), rnd); cutilCheckMsg("kBinaryRandomize: Kernel execution failed"); } void NVMatrix::initRandom(unsigned long long seed) { assert(!isRndInitialized()); pthread_mutex_lock(_rndMutex); int d = getDeviceID(); rndDevStates[d] = NULL; CUDA_CALL(cudaMalloc((void **)&rndDevStates[d], NUM_RND_STREAMS * sizeof(curandState))); pthread_mutex_unlock(_rndMutex); kSetupCurand<<<NUM_RND_BLOCKS, NUM_RND_THREADS_PER_BLOCK>>>(getCurandState(), 1 + seed*2); // so there's no chance it'll be correlated with the other one cutilCheckMsg("initRandom: Kernel execution failed"); } void NVMatrix::initRandom() { NVMatrix::initRandom(time(0)); } curandState* NVMatrix::getCurandState() { pthread_mutex_lock(_rndMutex); int d = getDeviceID(); assert(rndDevStates.count(d) != 0); curandState* r = rndDevStates[d]; pthread_mutex_unlock(_rndMutex); return r; } int NVMatrix::getDeviceID() { int d; cudaGetDevice(&d); return d; } bool NVMatrix::isRndInitialized() { pthread_mutex_lock(_rndMutex); bool b = rndDevStates.count(getDeviceID()) != 0; pthread_mutex_unlock(_rndMutex); return b; } void NVMatrix::destroyRandom() { assert(isRndInitialized()); int d = getDeviceID(); pthread_mutex_lock(_rndMutex); CUDA_CALL(cudaFree(rndDevStates[d])); rndDevStates.erase(d); pthread_mutex_unlock(_rndMutex); } void NVMatrix::binarizeProbs() { binarizeProbs(*this); } void NVMatrix::binarizeProbs(NVMatrix& target) { _unaryRandomize(target, BinarizeUnaryRandomizer()); } void NVMatrix::randomizeUniform() { assert(isContiguous()); assert(isRndInitialized()); // CURAND_CALL(curandGenerateUniform(rndGen, _devData, getNumElements())); _unaryRandomize(*this, UniformUnaryRandomizer()); } void NVMatrix::randomizeBinary(float prob) { assert(isContiguous()); assert(isRndInitialized()); // CURAND_CALL(curandGenerateUniform(rndGen, _devData, getNumElements())); _unaryRandomize(*this, BinaryUnaryRandomizer(prob)); } void NVMatrix::randomizeBinaryWider(float scale) { assert(isContiguous()); assert(isRndInitialized()); // CURAND_CALL(curandGenerateUniform(rndGen, _devData, getNumElements())); _unaryRandomize(*this, BinaryWiderUnaryRandomizer(scale)); } void NVMatrix::randomizeTernary() { assert(isContiguous()); assert(isRndInitialized()); // CURAND_CALL(curandGenerateUniform(rndGen, _devData, getNumElements())); _unaryRandomize(*this, TernaryUnaryRandomizer()); } void NVMatrix::randomizeGaussian() { randomizeGaussian(1); } void NVMatrix::randomizeGaussian(float stdev) { randomizeGaussian(0, stdev); } void NVMatrix::randomizeGaussian(float mean, float stdev) { assert(isContiguous()); assert(isRndInitialized()); // CURAND_CALL(curandGenerateNormal(rndGen, _devData, getNumElements(), mean, stdev)); _unaryRandomize(*this, GaussianUnaryRandomizer(mean, stdev)); } /* * Kind of a hack since we don't actually need the contents of this matrix for it, * so we don't really need a binary randomizer. */ void NVMatrix::randomizeGaussian(NVMatrix& stdevs) { _binaryRandomize(stdevs, *this, GaussianBinaryRandomizer()); } // this = this + randn() void NVMatrix::addGaussianNoise() { addGaussianNoise(1); } // this = this + stdev * randn() void NVMatrix::addGaussianNoise(float stdev) { addGaussianNoise(stdev, *this); } // target = this + stdev * randn() void NVMatrix::addGaussianNoise(float stdev, NVMatrix& target) { _unaryRandomize(target, AddGaussianUnaryRandomizer(stdev)); } void NVMatrix::addGaussianNoise(NVMatrix& stdevs, bool var) { addGaussianNoise(stdevs, var, *this); } void NVMatrix::addGaussianNoise(NVMatrix& stdevs) { addGaussianNoise(stdevs, false, *this); } void NVMatrix::addGaussianNoise(NVMatrix& stdevs, bool var, NVMatrix& target) { if (var) { _binaryRandomize(stdevs, target, AddGaussianBinaryRandomizer<true>()); } else { _binaryRandomize(stdevs, target, AddGaussianBinaryRandomizer<false>()); } } // custom void NVMatrix::addUniformNoise(float scale) { addUniformNoise(scale, *this); } void NVMatrix::addUniformNoise(float scale, NVMatrix& target) { _unaryRandomize(target, AddUniformUnaryRandomizer(scale)); } void NVMatrix::addBinaryNoise(float prob) { addBinaryNoise(prob, *this); } void NVMatrix::addBinaryNoise(float prob, NVMatrix& target) { _unaryRandomize(target, AddBinaryUnaryRandomizer(prob)); } // target = this > b void NVMatrix::biggerThan(NVMatrix& b, NVMatrix& target) { applyBinary(NVMatrixBinaryOps::BiggerThan(), b, target); } // this = this > b void NVMatrix::biggerThan(NVMatrix& b) { biggerThan(b, *this); } // target = (this == b) void NVMatrix::equals(NVMatrix& b, NVMatrix& target) { applyBinary(NVMatrixBinaryOps::Equals(), b, target); } // this = (this == b) void NVMatrix::equals(NVMatrix& m) { equals(m, *this); } void NVMatrix::biggerThanVector(NVMatrix& vec, NVMatrix& target) { applyBinaryV(NVMatrixBinaryOps::BiggerThan(), vec, target); } void NVMatrix::biggerThanVector(NVMatrix& vec) { biggerThanVector(vec, *this); } void NVMatrix::_checkBounds(int startRow, int endRow, int startCol, int endCol) const { assert(startRow >= 0 && startRow < _numRows); assert(endRow > startRow && endRow <= _numRows); assert(startCol >= 0 && startCol < _numCols); assert(endCol > startCol && endCol <= _numCols); } /* * The only place where stride is supported for now! * Will ALWAYS return a view of the original data, sometimes non-contiguous. */ NVMatrix& NVMatrix::slice(int startRow, int endRow, int startCol, int endCol) const { endRow = endRow < 0 ? this->_numRows : endRow; endCol = endCol < 0 ? this->_numCols : endCol; _checkBounds(startRow, endRow, startCol, endCol); if (!isTrans()) { return *new NVMatrix(this->_devData + startRow * _stride + startCol, endRow - startRow, endCol - startCol, _stride, false); } return *new NVMatrix(this->_devData + startCol * _stride + startRow, endRow - startRow, endCol - startCol, _stride, true); } /* this will NEVER return a view */ void NVMatrix::slice(int startRow, int endRow, int startCol, int endCol, NVMatrix& target) const { endRow = endRow < 0 ? this->_numRows : endRow; endCol = endCol < 0 ? this->_numCols : endCol; _checkBounds(startRow, endRow, startCol, endCol); int sliceRows = endRow - startRow, sliceCols = endCol - startCol; if (target.getNumRows() != sliceRows || target.getNumCols() != sliceCols) { target.resize(sliceRows, sliceCols); } this->copy(target, startRow, endRow, startCol, endCol, 0, 0); } NVMatrix& NVMatrix::sliceRows(int startRow, int endRow) const { return slice(startRow, endRow, 0, -1); } void NVMatrix::sliceRows(int startRow, int endRow, NVMatrix& target) const { slice(startRow, endRow, 0, -1, target); } NVMatrix& NVMatrix::sliceCols(int startCol, int endCol) const { return slice(0, -1, startCol, endCol); } void NVMatrix::sliceCols(int startCol, int endCol, NVMatrix& target) const { slice(0, -1, startCol, endCol, target); } /* * Guaranteed to not change the data if the number of elements doesn't change. * So you can use this to "reshape" a matrix. */ // data will be lost if the number of elements is changed bool NVMatrix::resize(int numRows, int numCols) { bool reallocated = false; if (numRows != _numRows || numCols != _numCols) { assert(_ownsData); if (_numElements != numRows * numCols) { if (_numElements > 0) { // free old memory cublasStatus status = cublasFree(_devData); if (status != CUBLAS_STATUS_SUCCESS) { fprintf(stderr, "!!!! memory free error: %X\n", status); exit(EXIT_FAILURE); } } if (numRows * numCols > 0) { // allocate new memory cublasStatus status = cublasAlloc(numCols * numRows, sizeof(float), (void**) &_devData); if (status != CUBLAS_STATUS_SUCCESS) { fprintf(stderr, "!!!! device memory allocation error\n"); exit(EXIT_FAILURE); } } else { _devData = NULL; } reallocated = true; } _numRows = numRows; _numCols = numCols; _numElements = numRows * numCols; _stride = getLeadingDim(); } return reallocated; } bool NVMatrix::resize(const NVMatrix& like) { setTrans(like.isTrans()); return resize(like.getNumRows(), like.getNumCols()); } bool NVMatrix::resize(const Matrix& like) { setTrans(like.isTrans()); return resize(like.getNumRows(), like.getNumCols()); } void NVMatrix::reshape(int numRows, int numCols) { assert(isContiguous()); assert(_numElements == numRows*numCols); _numRows = numRows; _numCols = numCols; _stride = getLeadingDim(); } NVMatrix& NVMatrix::reshaped(int numRows, int numCols) { assert(isContiguous()); assert(_numElements == numRows*numCols); return *new NVMatrix(_devData, numRows, numCols, -1, _isTrans); } void NVMatrix::copy(NVMatrix &dest, int srcStartRow, int srcEndRow, int srcStartCol, int srcEndCol, int destStartRow, int destStartCol) const { srcEndRow = srcEndRow < 0 ? _numRows : srcEndRow; srcEndCol = srcEndCol < 0 ? _numCols : srcEndCol; NVMatrix* srcSlice = &slice(srcStartRow, srcEndRow, srcStartCol, srcEndCol); NVMatrix* destSlice = &dest.slice(destStartRow, destStartRow + srcEndRow - srcStartRow, destStartCol, destStartCol + srcEndCol - srcStartCol); srcSlice->apply(NVMatrixOps::Identity(), *destSlice); delete srcSlice; delete destSlice; } NVMatrix& NVMatrix::getTranspose() { return *new NVMatrix(_devData, _numCols, _numRows, _stride, !_isTrans);; } void NVMatrix::transpose(NVMatrix& target) { flipTrans(target); target.setTrans(!target.isTrans()); target.reshape(target.getNumCols(), target.getNumRows()); } void NVMatrix::transpose() { int tmp = _numCols; _numCols = _numRows; _numRows = tmp; _isTrans = !_isTrans; } bool NVMatrix::transpose(bool trans) { bool oldTrans = _isTrans; if (oldTrans != trans) { transpose(); } return oldTrans; } /* * Flips the ordering of the matrix from row-major to column-major and vice versa. * This creates temporary storage -- not a cheap operation. * * This is not equivalent to a "hard transpose". The resultant matrix still has * the same dimensions, its layout in memory just changes. */ NVMatrix& NVMatrix::flipTrans() { NVMatrix* meTrans = new NVMatrix(*this); flipTrans(*meTrans); return *meTrans; } void NVMatrix::flipTrans(NVMatrix& target) { assert(&target != this); target.resize(_numRows, _numCols); target.setTrans(!isTrans()); apply(NVMatrixOps::Identity(), target); } void NVMatrix::squaredDiff(NVMatrix& b) { squaredDiff(b, *this); } void NVMatrix::squaredDiff(NVMatrix& b, NVMatrix& target) { applyBinary(NVMatrixBinaryOps::SquaredDiff(), b, target); } void NVMatrix::add(NVMatrix& b, float scaleA, float scaleB, NVMatrix& target) { if (scaleA == 0) { b.scale(scaleB, target); return; } if (scaleA == 1 && scaleB == 1) { // slight optimization applyBinary(NVMatrixBinaryOps::Add(), b, target); } else { applyBinary(NVMatrixBinaryOps::WeightedAdd(scaleA, scaleB), b, target); } } // target = this + scaleB * b void NVMatrix::add(NVMatrix& b, float scaleB, NVMatrix& target) { add(b, 1, scaleB, target); } // target = this + b void NVMatrix::add(NVMatrix& b, NVMatrix& target) { add(b, 1, target); } // this = this + scaleB * b void NVMatrix::add(NVMatrix& b, float scaleB) { add(b, scaleB, *this); } // this = scaleA * this + scaleB * b void NVMatrix::add(NVMatrix& b, float scaleA, float scaleB) { add(b, scaleA, scaleB, *this); } // this = this + b void NVMatrix::add(NVMatrix& b) { add(b, 1, *this); } void NVMatrix::subtract(NVMatrix& b, NVMatrix& target) { add(b, -1, target); } void NVMatrix::subtract(NVMatrix& b) { add(b, -1); } void NVMatrix::eltwiseMult(NVMatrix& b, NVMatrix& target) { applyBinary(NVMatrixBinaryOps::Multiply(), b, target); } void NVMatrix::eltwiseMult(NVMatrix& b) { eltwiseMult(b, *this); } void NVMatrix::eltwiseDivide(NVMatrix& b, NVMatrix& target) { applyBinary(NVMatrixBinaryOps::Divide(), b, target); } void NVMatrix::eltwiseDivide(NVMatrix& b) { eltwiseDivide(b, *this); } void NVMatrix::tile(int timesY, int timesX, NVMatrix& target) { assert(isContiguous() && target.isContiguous()); assert(timesX > 0 && timesY > 0); target.resize(_numRows*timesY, _numCols*timesX); target.setTrans(_isTrans); if(!isTrans()) { kTile<<<NUM_TILE_BLOCKS,NUM_TILE_THREADS_PER_BLOCK>>>(_devData, target._devData, _numCols, _numRows, target._numCols, target._numRows); } else { kTile<<<NUM_TILE_BLOCKS,NUM_TILE_THREADS_PER_BLOCK>>>(_devData, target._devData, _numRows, _numCols, target._numRows, target._numCols); } cutilCheckMsg("Kernel execution failed"); } void NVMatrix::addVector(NVMatrix& vec, float scaleVec, NVMatrix& target) { applyBinaryV(NVMatrixBinaryOps::WeightedAdd(1, scaleVec), vec, target); } void NVMatrix::addVector(NVMatrix& vec) { addVector(vec, 1, *this); } void NVMatrix::addVector(NVMatrix& vec, float scaleVec) { addVector(vec, scaleVec, *this); } void NVMatrix::addVector(NVMatrix& vec, NVMatrix& target) { addVector(vec, 1, target); } void NVMatrix::equalsVector(NVMatrix& vec, NVMatrix& target) { applyBinaryV(NVMatrixBinaryOps::Equals(), vec, target); } void NVMatrix::equalsVector(NVMatrix& vec) { equalsVector(vec, *this); } void NVMatrix::eltwiseMultByVector(NVMatrix& vec, NVMatrix& target) { applyBinaryV(NVMatrixBinaryOps::Multiply(), vec, target); } void NVMatrix::eltwiseMultByVector(NVMatrix& vec) { eltwiseMultByVector(vec, *this); } void NVMatrix::eltwiseDivideByVector(NVMatrix& vec) { eltwiseDivideByVector(vec, *this); } void NVMatrix::eltwiseDivideByVector(NVMatrix& vec, NVMatrix& target) { applyBinaryV(NVMatrixBinaryOps::Divide(), vec, target); } /* * num threads per block is ignored when summing rows (axis=1) because * it has to be a power of 2. * * TODO: this is a mess, fix it. it works pretty fast but it's too ugly. * TODO: this function is _really_ bad for very long aggregations of few columns. */ template<class Agg, class BinaryOp> void NVMatrix::_aggregate(int axis, NVMatrix& target, Agg agg, BinaryOp op) { assert(axis == 0 || axis == 1); assert(isContiguous() && target.isContiguous()); assert(&target != this); int width = _isTrans ? _numRows : _numCols; int height = _isTrans ? _numCols : _numRows; target.setTrans(_isTrans); assert(width > 0); assert(height > 0); if(axis == 0 && !_isTrans || axis == 1 && _isTrans) { //col sum target.resize(!_isTrans ? 1 : _numRows, !_isTrans ? _numCols : 1); int numBlocks = DIVUP(width, NUM_SUM_COLS_THREADS_PER_BLOCK); assert(numBlocks * NUM_SUM_COLS_THREADS_PER_BLOCK >= width); assert(numBlocks < NUM_BLOCKS_MAX); kDumbAggCols<Agg, BinaryOp><<<numBlocks,NUM_SUM_COLS_THREADS_PER_BLOCK>>>(_devData, target._devData, width, height, agg, op); cutilCheckMsg("kDumbAggCols: Kernel execution failed"); } else { // row sum target.resize(_isTrans ? 1 : _numRows, _isTrans ? _numCols : 1); if (width > 1) { if (height >= 16384) { // linear aggregation int numBlocksX = 1; int numBlocksY = DIVUP(height, AGG_SHORT_ROWS_THREADS_Y*AGG_SHORT_ROWS_LOOPS_Y); int numThreadsX = width <= 4 ? 4 : width <= 8 ? 8 : width <= 12 ? 12 : width <= 16 ? 16 : AGG_SHORT_ROWS_THREADS_X; int numThreadsY = AGG_SHORT_ROWS_THREADS_Y; while (numBlocksY > NUM_BLOCKS_MAX) { numBlocksY = DIVUP(numBlocksY,2); numBlocksX *= 2; } dim3 grid(numBlocksX, numBlocksY), threads(numThreadsX, numThreadsY); if(width <= 16) { if(width <= 4) { kAggShortRows<Agg, BinaryOp, 1, 4><<<grid, threads>>>(_devData, target._devData,width, height, agg, op); } else if(width <= 8) { kAggShortRows<Agg, BinaryOp, 1, 8><<<grid, threads>>>(_devData, target._devData,width, height, agg, op); } else if(width <= 12) { kAggShortRows<Agg, BinaryOp, 1, 12><<<grid, threads>>>(_devData, target._devData,width, height, agg, op); } else { kAggShortRows<Agg, BinaryOp, 1, 16><<<grid, threads>>>(_devData, target._devData,width, height, agg, op); } } else if(width <= 32) { kAggShortRows<Agg, BinaryOp, 2, AGG_SHORT_ROWS_THREADS_X><<<grid, threads>>>(_devData, target._devData,width, height, agg, op); } else if(width <= 48){ kAggShortRows<Agg, BinaryOp, 3, AGG_SHORT_ROWS_THREADS_X><<<grid, threads>>>(_devData, target._devData,width, height, agg, op); } else if(width <= 64){ kAggShortRows<Agg, BinaryOp, 4, AGG_SHORT_ROWS_THREADS_X><<<grid, threads>>>(_devData, target._devData,width, height, agg, op); } else { kAggShortRows2<Agg, BinaryOp><<<grid, threads>>>(_devData, target._devData,width, height, agg, op); } } else { if (width >= 512) { dim3 threads(AWR_NUM_THREADS); dim3 blocks(1, std::min(1024, height)); kAggRows_wholerow_nosync<<<blocks, threads>>>(_devData, target._devData, width, height, agg, op); // dim3 threads(AWR_NUM_THREADS); // dim3 blocks(1, std::min(1024, height)); // kAggRows_wholerow<<<blocks, threads>>>(_devData, target._devData, width, height, agg, op); } else { // dim3 threads(AWR_NUM_THREADS); // dim3 blocks(1, std::min(1024, height)); // kAggRows_wholerow<<<blocks, threads>>>(_devData, target._devData, width, height, agg, op); NVMatrix *prevSum = this; while (prevSum->getLeadingDim() > 1) { int numThreadsX = width <= 64 ? 32 : (width <= 128 ? 64 : (width <= 256 ? 128 : (width <= 512 ? 256 : 512))); int numThreadsY = 1; int numBlocksX = DIVUP(width, 2*numThreadsX); int numBlocksY = std::min(height, NUM_BLOCKS_MAX); NVMatrix *nvSumAccum = target.getFollowingDim() == height && target.getLeadingDim() == numBlocksX ? &target : new NVMatrix(height, numBlocksX, false); dim3 grid(numBlocksX, numBlocksY), threads(numThreadsX, numThreadsY); assert(numBlocksX <= NUM_BLOCKS_MAX); assert(numBlocksY <= NUM_BLOCKS_MAX); if(width <= 64) { kAggRows<Agg, BinaryOp, 32><<<grid, threads>>>(prevSum->_devData, nvSumAccum->_devData, width, height, nvSumAccum->getLeadingDim(), agg, op); } else if(width <= 128) { kAggRows<Agg, BinaryOp, 64><<<grid, threads>>>(prevSum->_devData, nvSumAccum->_devData, width, height, nvSumAccum->getLeadingDim(), agg, op); } else if(width <= 256) { kAggRows<Agg, BinaryOp, 128><<<grid, threads>>>(prevSum->_devData, nvSumAccum->_devData, width, height, nvSumAccum->getLeadingDim(), agg, op); } else if(width <= 512) { kAggRows<Agg, BinaryOp, 256><<<grid, threads>>>(prevSum->_devData, nvSumAccum->_devData, width, height, nvSumAccum->getLeadingDim(), agg, op); } else { kAggRows<Agg, BinaryOp, 512><<<grid, threads>>>(prevSum->_devData, nvSumAccum->_devData, width, height, nvSumAccum->getLeadingDim(), agg, op); } cutilCheckMsg("agg rows: Kernel execution failed"); cudaThreadSynchronize(); width = numBlocksX; // only true in reduction agg, but for linear agg this doesn't matter anyway if (prevSum != this) { delete prevSum; } prevSum = nvSumAccum; } } } } else { copy(target); } } } void NVMatrix::inRangeInc(float lower, float upper) { inRangeInc(lower, upper, *this); } void NVMatrix::inRangeInc(float lower, float upper, NVMatrix& target) { apply(NVMatrixOps::InRange<false>(lower, upper), target); } void NVMatrix::inRangeExc(float lower, float upper) { inRangeExc(lower, upper, *this); } void NVMatrix::inRangeExc(float lower, float upper, NVMatrix& target) { apply(NVMatrixOps::InRange<true>(lower, upper), target); } void NVMatrix::biggerThanScalar(float scalar) { biggerThanScalar(scalar, *this); } void NVMatrix::biggerThanScalar(float scalar, NVMatrix& target) { apply(NVMatrixOps::BiggerThanScalar(scalar), target); } void NVMatrix::smallerThanScalar(float scalar) { smallerThanScalar(scalar, *this); } void NVMatrix::smallerThanScalar(float scalar, NVMatrix& target) { apply(NVMatrixOps::SmallerThanScalar(scalar), target); } void NVMatrix::addScalar(float scaleThis, float scalar, NVMatrix& target) { apply(NVMatrixOps::WeightedAddScalar(scaleThis, scalar), target); } void NVMatrix::addScalar(float scalar, NVMatrix& target) { apply(NVMatrixOps::AddScalar(scalar), target); } void NVMatrix::addScalar(float scalar) { addScalar(scalar, *this); } void NVMatrix::minWithScalar(float scalar, NVMatrix& target) { apply(NVMatrixOps::MinWithScalar(scalar), target); } void NVMatrix::minWithScalar(float scalar) { minWithScalar(scalar, *this); } void NVMatrix::maxWithScalar(float scalar, NVMatrix& target) { apply(NVMatrixOps::MaxWithScalar(scalar), target); } void NVMatrix::maxWithScalar(float scalar) { maxWithScalar(scalar, *this); } void NVMatrix::pow(float p, NVMatrix& target) { apply(NVMatrixOps::Pow(p), target); } void NVMatrix::pow(float p) { pow(p, *this); } void NVMatrix::scale(float _scale) { scale(_scale, *this); } void NVMatrix::scale(float _scale, NVMatrix& target) { if (_scale != 1 || &target != this) { // optimize away scale by 1 apply(NVMatrixOps::MultByScalar(_scale), target); } } template<class Agg, class BinaryOp> NVMatrix& NVMatrix::_aggregate(int axis, Agg agg, BinaryOp op) { NVMatrix *sumVec = new NVMatrix(); _aggregate<Agg, BinaryOp>(axis, *sumVec, agg, op); return *sumVec; } void NVMatrix::max(int axis, NVMatrix& target) { _aggregate(axis, target, NVMatrixAggs::Max(), NVMatrixBinaryOps::Second()); } void NVMatrix::addSum(NVMatrix& a, int axis, float scaleThis, float scaleSum) { if (scaleThis != 0) { a._aggregate(axis, *this, NVMatrixAggs::Sum(), NVMatrixBinaryOps::WeightedAdd(scaleThis, scaleSum)); } else { a._aggregate(axis, *this, NVMatrixAggs::Sum(), NVMatrixBinaryOps::SecondScaled(scaleSum)); } } void NVMatrix::sum(int axis, NVMatrix& target) { _aggregate(axis, target, NVMatrixAggs::Sum(), NVMatrixBinaryOps::Second()); } void NVMatrix::min(int axis, NVMatrix& target) { _aggregate(axis, target, NVMatrixAggs::Min(), NVMatrixBinaryOps::Second()); } NVMatrix& NVMatrix::max(int axis) { return _aggregate(axis, NVMatrixAggs::Max(), NVMatrixBinaryOps::Second()); } NVMatrix& NVMatrix::sum(int axis) { return _aggregate(axis, NVMatrixAggs::Sum(), NVMatrixBinaryOps::Second()); } NVMatrix& NVMatrix::min(int axis) { return _aggregate(axis, NVMatrixAggs::Min(), NVMatrixBinaryOps::Second()); } void NVMatrix::_sum_setParams(int n, dim3* blocks, dim3* threads, int* numCols) { int logn = int(ceil(log(double(n)) / log(2))); *numCols = DIVUP(n, logn); int numThreads = *numCols; *blocks = dim3(DIVUP(numThreads, DP_BLOCKSIZE)); *threads = dim3(DP_BLOCKSIZE); } float NVMatrix::mean() { return sum() / getNumElements(); } float NVMatrix::sum() { return _totalAgg(NVMatrixAggs::Sum()); } float NVMatrix::max() { return _totalAgg(NVMatrixAggs::Max()); } float NVMatrix::min() { return _totalAgg(NVMatrixAggs::Min()); } template<class Agg> float NVMatrix::_totalAgg(Agg agg) { assert(isContiguous()); dim3 blocks, threads; int numCols; // Sum most of it on GPU NVMatrix* src = this; for (NVMatrix* target = NULL; src->getNumElements() > CPUSUM_MAX; src = target) { _sum_setParams(src->getNumElements(), &blocks, &threads, &numCols); target = new NVMatrix(1, blocks.x); kTotalAgg<<<blocks, threads>>>(src->getDevData(), target->getDevData(), numCols, src->getNumElements(), agg); cutilCheckMsg("kTotalAgg: Kernel execution failed"); cudaThreadSynchronize(); // not really necessary? delete (src == this ? NULL : src); } Matrix srcCPU(src->getNumRows(), src->getNumCols()); src->copyToHost(srcCPU); if (src->getNumElements() > 1) { // Sum remainder on CPU delete (src == this ? NULL : src); if (typeid(Agg) == typeid(NVMatrixAggs::Sum)) { return srcCPU.sum(); } else if (typeid(Agg) == typeid(NVMatrixAggs::Max)) { return srcCPU.max(); } else if (typeid(Agg) == typeid(NVMatrixAggs::Min)) { return srcCPU.min(); } else { assert(false); } } return srcCPU(0,0); } /* * Fast dot product only for matrices with same transposedness. */ float NVMatrix::dotProduct(NVMatrix& b) { assert(isContiguous() && b.isContiguous()); assert(isSameDims(b)); assert(isTrans() == b.isTrans()); // see? dim3 blocks, threads; int numCols; _sum_setParams(getNumElements(), &blocks, &threads, &numCols); NVMatrix target(1, blocks.x); kDotProduct_r<<<blocks, threads>>>(getDevData(), b.getDevData(), target.getDevData(), numCols, getNumElements()); cutilCheckMsg("kDotProduct: Kernel execution failed"); cudaThreadSynchronize(); return target.sum(); } float NVMatrix::norm2() { return dotProduct(*this); } float NVMatrix::norm() { return sqrt(norm2()); } void NVMatrix::print(int startRow, int rows, int startCol, int cols) const { cudaThreadSynchronize(); Matrix hm = Matrix(_numRows, _numCols); copyToHost(hm); hm.print(startRow, rows, startCol, cols); } void NVMatrix::print(int rows, int cols) const { print(0, rows, 0, cols); } void NVMatrix::printShape(const char* name) const { printf("%s: %dx%d\n", name, _numRows, _numCols); }
b2802c0ce346ab4276b6151b14ea64b95f496797.hip
// !!! This is a file automatically generated by hipify!!! #include <ATen/ATen.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <math.h> #include <float.h> #include <vector> #define CUDA_1D_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \ i += blockDim.x * gridDim.x) // CUDA: grid stride looping #define CUDA_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; \ i < (n); \ i += blockDim.x * gridDim.x) const int CUDA_NUM_THREADS = 1024; inline int GET_BLOCKS(const int N) { return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS; } __device__ float dmcn_im2col_bilinear(const float* bottom_data, const int data_width, const int height, const int width, float h, float w) { int h_low = floor(h); int w_low = floor(w); int h_high = h_low + 1; int w_high = w_low + 1; float lh = h - h_low; float lw = w - w_low; float hh = 1 - lh, hw = 1 - lw; float v1 = 0; if (h_low >= 0 && w_low >= 0) v1 = bottom_data[h_low * data_width + w_low]; float v2 = 0; if (h_low >=0 && w_high <= width - 1) v2 = bottom_data[h_low * data_width + w_high]; float v3 = 0; if (h_high <= height - 1 && w_low >= 0) v3 = bottom_data[h_high * data_width + w_low]; float v4 = 0; if (h_high <= height - 1 && w_high <= width - 1) v4 = bottom_data[h_high * data_width + w_high]; float w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw; float val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); return val; } __device__ float dmcn_get_gradient_weight(float argmax_h, float argmax_w, const int h, const int w, const int height, const int width) { if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width) { //empty return 0; } int argmax_h_low = floor(argmax_h); int argmax_w_low = floor(argmax_w); int argmax_h_high = argmax_h_low + 1; int argmax_w_high = argmax_w_low + 1; float weight = 0; if (h == argmax_h_low && w == argmax_w_low) weight = (h + 1 - argmax_h) * (w + 1 - argmax_w); if (h == argmax_h_low && w == argmax_w_high) weight = (h + 1 - argmax_h) * (argmax_w + 1 - w); if (h == argmax_h_high && w == argmax_w_low) weight = (argmax_h + 1 - h) * (w + 1 - argmax_w); if (h == argmax_h_high && w == argmax_w_high) weight = (argmax_h + 1 - h) * (argmax_w + 1 - w); return weight; } __device__ float dmcn_get_coordinate_weight(float argmax_h, float argmax_w, const int height, const int width, const float* im_data, const int data_width, const int bp_dir) { if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width) { //empty return 0; } int argmax_h_low = floor(argmax_h); int argmax_w_low = floor(argmax_w); int argmax_h_high = argmax_h_low + 1; int argmax_w_high = argmax_w_low + 1; float weight = 0; if (bp_dir == 0) { if (argmax_h_low >= 0 && argmax_w_low >= 0) weight += -1 * (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_low * data_width + argmax_w_low]; if (argmax_h_low >= 0 && argmax_w_high <= width - 1) weight += -1 * (argmax_w - argmax_w_low) * im_data[argmax_h_low * data_width + argmax_w_high]; if (argmax_h_high <= height - 1 && argmax_w_low >= 0) weight += (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_high * data_width + argmax_w_low]; if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1) weight += (argmax_w - argmax_w_low) * im_data[argmax_h_high * data_width + argmax_w_high]; } else if (bp_dir == 1) { if (argmax_h_low >= 0 && argmax_w_low >= 0) weight += -1 * (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_low]; if (argmax_h_low >= 0 && argmax_w_high <= width - 1) weight += (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_high]; if (argmax_h_high <= height - 1 && argmax_w_low >= 0) weight += -1 * (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_low]; if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1) weight += (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_high]; } return weight; } __global__ void modulated_deformable_im2col_gpu_kernel( const int n, const float* data_im, const float* data_offset, const float* data_mask, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int channel_per_deformable_group, const int batch_size, const int num_channels, const int height_col, const int width_col, float* data_col) { CUDA_KERNEL_LOOP(index, n) { // index index of output matrix const int w_col = index % width_col; const int h_col = (index / width_col) % height_col; const int b_col = (index / width_col / height_col) % batch_size; const int c_im = (index / width_col / height_col) / batch_size; const int c_col = c_im * kernel_h * kernel_w; // compute deformable group index const int deformable_group_index = c_im / channel_per_deformable_group; const int h_in = h_col * stride_h - pad_h; const int w_in = w_col * stride_w - pad_w; float* data_col_ptr = data_col + ((c_col * batch_size + b_col) * height_col + h_col) * width_col + w_col; //const float* data_im_ptr = data_im + ((b_col * num_channels + c_im) * height + h_in) * width + w_in; const float* data_im_ptr = data_im + (b_col * num_channels + c_im) * height * width; const float* data_offset_ptr = data_offset + (b_col + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; const float* data_mask_ptr = data_mask + (b_col + deformable_group_index) * kernel_h * kernel_w * height_col * width_col; for (int i = 0; i < kernel_h; ++i) { for (int j = 0; j < kernel_w; ++j) { const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col; const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col + w_col; const int data_mask_hw_ptr = ((i * kernel_w + j) * height_col + h_col) * width_col + w_col; const float offset_h = data_offset_ptr[data_offset_h_ptr]; const float offset_w = data_offset_ptr[data_offset_w_ptr]; const float mask = data_mask_ptr[data_mask_hw_ptr]; float val = static_cast<float>(0); const float h_im = h_in + i * dilation_h + offset_h; const float w_im = w_in + j * dilation_w + offset_w; //if (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) { if (h_im > -1 && w_im > -1 && h_im < height && w_im < width) { //const float map_h = i * dilation_h + offset_h; //const float map_w = j * dilation_w + offset_w; //const int cur_height = height - h_in; //const int cur_width = width - w_in; //val = dmcn_im2col_bilinear(data_im_ptr, width, cur_height, cur_width, map_h, map_w); val = dmcn_im2col_bilinear(data_im_ptr, width, height, width, h_im, w_im); } *data_col_ptr = val * mask; data_col_ptr += batch_size * height_col * width_col; //data_col_ptr += height_col * width_col; } } } } // data_im : [batch_size, channels, height, width] // data_offset : [batch_size, 2 * kernel_h * kernel_w, height_col, width_col] // data_mask : [batch_size, 2 * kernel_h * kernel_w, height_col, width_col] // data_col : [channels * kernel_h * kernel_w, batch_size, height_col, width_col] __global__ void modulated_deformable_col2im_gpu_kernel(const int n, const float* data_col, const float* data_offset, const float* data_mask, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int channel_per_deformable_group, const int batch_size, const int height_col, const int width_col, float* grad_im) { CUDA_KERNEL_LOOP(index, n) { const int j = (index / width_col / height_col / batch_size) % kernel_w; const int i = (index / width_col / height_col / batch_size / kernel_w) % kernel_h; const int c = index / width_col / height_col / batch_size / kernel_w / kernel_h; // compute the start and end of the output const int deformable_group_index = c / channel_per_deformable_group; int w_out = index % width_col; int h_out = (index / width_col) % height_col; int b = (index / width_col / height_col) % batch_size; int w_in = w_out * stride_w - pad_w; int h_in = h_out * stride_h - pad_h; const float* data_offset_ptr = data_offset + (b + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; const float* data_mask_ptr = data_mask + (b + deformable_group_index) * kernel_h * kernel_w * height_col * width_col; const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out; const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out; const int data_mask_hw_ptr = ((i * kernel_w + j) * height_col + h_out) * width_col + w_out; const float offset_h = data_offset_ptr[data_offset_h_ptr]; const float offset_w = data_offset_ptr[data_offset_w_ptr]; const float mask = data_mask_ptr[data_mask_hw_ptr]; const float cur_inv_h_data = h_in + i * dilation_h + offset_h; const float cur_inv_w_data = w_in + j * dilation_w + offset_w; const float cur_top_grad = data_col[index] * mask; const int cur_h = (int)cur_inv_h_data; const int cur_w = (int)cur_inv_w_data; for (int dy = -2; dy <= 2; dy++) { for (int dx = -2; dx <= 2; dx++) { if (cur_h + dy >= 0 && cur_h + dy < height && cur_w + dx >= 0 && cur_w + dx < width && abs(cur_inv_h_data - (cur_h + dy)) < 1 && abs(cur_inv_w_data - (cur_w + dx)) < 1 ) { int cur_bottom_grad_pos = ((b * channels + c) * height + cur_h + dy) * width + cur_w + dx; float weight = dmcn_get_gradient_weight(cur_inv_h_data, cur_inv_w_data, cur_h + dy, cur_w + dx, height, width); atomicAdd(grad_im + cur_bottom_grad_pos, weight * cur_top_grad); } } } } } __global__ void modulated_deformable_col2im_coord_gpu_kernel(const int n, const float* data_col, const float* data_im, const float* data_offset, const float* data_mask, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int channel_per_deformable_group, const int batch_size, const int offset_channels, const int height_col, const int width_col, float* grad_offset, float* grad_mask) { // data_im : [batch_size, channels, height, width] // data_offset : [batch_size, 2 * kernel_h * kernel_w, height_col, width_col] // data_mask : [batch_size, 2 * kernel_h * kernel_w, height_col, width_col] // data_col : [channels * kernel_h * kernel_w, batch_size, height_col, width_col] // kernel : [out_channels, channels, kernel_h, kernel_w] CUDA_KERNEL_LOOP(index, n) { float val = 0, mval = 0; int w = index % width_col; int h = (index / width_col) % height_col; int c = (index / width_col / height_col) % offset_channels; int b = (index / width_col / height_col) / offset_channels; // compute the start and end of the output const int deformable_group_index = c / (2 * kernel_h * kernel_w); const int col_step = kernel_h * kernel_w; int cnt = 0; const float* data_col_ptr = data_col + deformable_group_index * channel_per_deformable_group * batch_size * width_col * height_col; const float* data_im_ptr = data_im + (b + deformable_group_index) * channel_per_deformable_group / kernel_h / kernel_w * height * width; const float* data_offset_ptr = data_offset + (b + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; const float* data_mask_ptr = data_mask + (b + deformable_group_index) * kernel_h * kernel_w * height_col * width_col; const int offset_c = c - deformable_group_index * 2 * kernel_h * kernel_w; for (int col_c = (offset_c / 2); col_c < channel_per_deformable_group; col_c += col_step) { const int col_pos = (((col_c * batch_size + b) * height_col) + h) * width_col + w; const int bp_dir = offset_c % 2; int j = (col_pos / width_col / height_col / batch_size) % kernel_w; int i = (col_pos / width_col / height_col / batch_size / kernel_w) % kernel_h; int w_out = col_pos % width_col; int h_out = (col_pos / width_col) % height_col; int w_in = w_out * stride_w - pad_w; int h_in = h_out * stride_h - pad_h; const int data_offset_h_ptr = (((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out); const int data_offset_w_ptr = (((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out); const int data_mask_hw_ptr = (((i * kernel_w + j) * height_col + h_out) * width_col + w_out); const float offset_h = data_offset_ptr[data_offset_h_ptr]; const float offset_w = data_offset_ptr[data_offset_w_ptr]; const float mask = data_mask_ptr[data_mask_hw_ptr]; float inv_h = h_in + i * dilation_h + offset_h; float inv_w = w_in + j * dilation_w + offset_w; if (inv_h <= -1 || inv_w <= -1 || inv_h >= height || inv_w >= width) { inv_h = inv_w = -2; } else { mval += data_col_ptr[col_pos] * dmcn_im2col_bilinear(data_im_ptr + cnt * height * width, width, height, width, inv_h, inv_w); } const float weight = dmcn_get_coordinate_weight( inv_h, inv_w, height, width, data_im_ptr + cnt * height * width, width, bp_dir); val += weight * data_col_ptr[col_pos] * mask; cnt += 1; } grad_offset[index] = val; if (offset_c % 2 == 0) grad_mask[(((b + deformable_group_index) * kernel_h * kernel_w + offset_c / 2) * height_col + h) * width_col + w] = mval; } } int ModulatedDeformConvForwardLaucher( // data_im : [batch_size, channels, height, width] // data_offset : [batch_size, 2 * kernel_h * kernel_w, height_col, width_col] // data_mask : [batch_size, 2 * kernel_h * kernel_w, height_col, width_col] // data_col : [channels * kernel_h * kernel_w, batch_size, height_col, width_col] // kernel : [out_channels, channels * kernel_h * kernel_w] // data_out : [out_channels, batch_size, height, width] at::Tensor data_im, at::Tensor data_offset, at::Tensor data_mask, at::Tensor data_col, int kernel_h, int kernel_w, int pad_h, int pad_w, int stride_h, int stride_w, int dilation_h, int dilation_w) { const auto num_channels = data_im.size(1); const auto height = data_im.size(2); const auto width = data_im.size(3); const auto batch_size = data_im.size(0); const auto channel_per_deformable_group = num_channels; const auto height_col = data_col.size(2); const auto width_col = data_col.size(3); const int num_kernels = batch_size * num_channels * width_col * height_col; hipLaunchKernelGGL(( modulated_deformable_im2col_gpu_kernel), dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, 0, num_kernels, data_im.data<float>(), data_offset.data<float>(), data_mask.data<float>(), height, width, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, batch_size, num_channels, height_col, width_col, data_col.data<float>()); hipError_t err = hipGetLastError(); if (hipSuccess != err) { fprintf(stderr, "cudaCheckError() failed : %s\n", hipGetErrorString(err)); exit(-1); } return 1; } int ModulatedDeformConvBackwardLaucher( // data_im : [batch_size, channels, height, width] // data_offset : [batch_size, 2 * kernel_h * kernel_w, height_col, width_col] // data_mask : [batch_size, 2 * kernel_h * kernel_w, height_col, width_col] // data_col : [channels * kernel_h * kernel_w, batch_size, height_col, width_col] // kernel : [out_channels, channels, kernel_h, kernel_w] at::Tensor data_im, at::Tensor data_offset, at::Tensor data_mask, at::Tensor data_col, int kernel_h, int kernel_w, int pad_h, int pad_w, int stride_h, int stride_w, int dilation_h, int dilation_w, at::Tensor grad_offset, at::Tensor grad_mask, at::Tensor grad_im) { const auto offset_channels = 2 * kernel_h * kernel_w; const auto num_channels = data_im.size(1); const auto height = data_im.size(2); const auto width = data_im.size(3); const auto batch_size = data_im.size(0); const auto height_col = data_col.size(2); const auto width_col = data_col.size(3); const auto col2im_channel_per_deformable_group = num_channels; const auto col2im_coord_channel_per_deformable_group = data_col.size(0); const int col2im_num_kernels = batch_size * num_channels * kernel_h * kernel_w * width_col * height_col; const int col2im_coord_num_kernels = batch_size * height_col * width_col * 2 * kernel_h * kernel_w; hipLaunchKernelGGL(( modulated_deformable_col2im_gpu_kernel), dim3(GET_BLOCKS(col2im_num_kernels)), dim3(CUDA_NUM_THREADS), 0, 0, col2im_num_kernels, data_col.data<float>(), data_offset.data<float>(), data_mask.data<float>(), num_channels, height, width, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, col2im_channel_per_deformable_group, batch_size, height_col, width_col, grad_im.data<float>()); hipLaunchKernelGGL(( modulated_deformable_col2im_coord_gpu_kernel), dim3(GET_BLOCKS(col2im_coord_num_kernels)), dim3(CUDA_NUM_THREADS), 0, 0, col2im_coord_num_kernels, data_col.data<float>(), data_im.data<float>(), data_offset.data<float>(), data_mask.data<float>(), num_channels, height, width, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, col2im_coord_channel_per_deformable_group, batch_size, offset_channels, height_col, width_col, grad_offset.data<float>(), grad_mask.data<float>()); hipError_t err = hipGetLastError(); if (hipSuccess != err) { fprintf(stderr, "cudaCheckError() failed : %s\n", hipGetErrorString(err)); exit(-1); } return 1; }
b2802c0ce346ab4276b6151b14ea64b95f496797.cu
#include <ATen/ATen.h> #include <cuda.h> #include <cuda_runtime.h> #include <math.h> #include <float.h> #include <vector> #define CUDA_1D_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \ i += blockDim.x * gridDim.x) // CUDA: grid stride looping #define CUDA_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; \ i < (n); \ i += blockDim.x * gridDim.x) const int CUDA_NUM_THREADS = 1024; inline int GET_BLOCKS(const int N) { return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS; } __device__ float dmcn_im2col_bilinear(const float* bottom_data, const int data_width, const int height, const int width, float h, float w) { int h_low = floor(h); int w_low = floor(w); int h_high = h_low + 1; int w_high = w_low + 1; float lh = h - h_low; float lw = w - w_low; float hh = 1 - lh, hw = 1 - lw; float v1 = 0; if (h_low >= 0 && w_low >= 0) v1 = bottom_data[h_low * data_width + w_low]; float v2 = 0; if (h_low >=0 && w_high <= width - 1) v2 = bottom_data[h_low * data_width + w_high]; float v3 = 0; if (h_high <= height - 1 && w_low >= 0) v3 = bottom_data[h_high * data_width + w_low]; float v4 = 0; if (h_high <= height - 1 && w_high <= width - 1) v4 = bottom_data[h_high * data_width + w_high]; float w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw; float val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); return val; } __device__ float dmcn_get_gradient_weight(float argmax_h, float argmax_w, const int h, const int w, const int height, const int width) { if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width) { //empty return 0; } int argmax_h_low = floor(argmax_h); int argmax_w_low = floor(argmax_w); int argmax_h_high = argmax_h_low + 1; int argmax_w_high = argmax_w_low + 1; float weight = 0; if (h == argmax_h_low && w == argmax_w_low) weight = (h + 1 - argmax_h) * (w + 1 - argmax_w); if (h == argmax_h_low && w == argmax_w_high) weight = (h + 1 - argmax_h) * (argmax_w + 1 - w); if (h == argmax_h_high && w == argmax_w_low) weight = (argmax_h + 1 - h) * (w + 1 - argmax_w); if (h == argmax_h_high && w == argmax_w_high) weight = (argmax_h + 1 - h) * (argmax_w + 1 - w); return weight; } __device__ float dmcn_get_coordinate_weight(float argmax_h, float argmax_w, const int height, const int width, const float* im_data, const int data_width, const int bp_dir) { if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width) { //empty return 0; } int argmax_h_low = floor(argmax_h); int argmax_w_low = floor(argmax_w); int argmax_h_high = argmax_h_low + 1; int argmax_w_high = argmax_w_low + 1; float weight = 0; if (bp_dir == 0) { if (argmax_h_low >= 0 && argmax_w_low >= 0) weight += -1 * (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_low * data_width + argmax_w_low]; if (argmax_h_low >= 0 && argmax_w_high <= width - 1) weight += -1 * (argmax_w - argmax_w_low) * im_data[argmax_h_low * data_width + argmax_w_high]; if (argmax_h_high <= height - 1 && argmax_w_low >= 0) weight += (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_high * data_width + argmax_w_low]; if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1) weight += (argmax_w - argmax_w_low) * im_data[argmax_h_high * data_width + argmax_w_high]; } else if (bp_dir == 1) { if (argmax_h_low >= 0 && argmax_w_low >= 0) weight += -1 * (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_low]; if (argmax_h_low >= 0 && argmax_w_high <= width - 1) weight += (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_high]; if (argmax_h_high <= height - 1 && argmax_w_low >= 0) weight += -1 * (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_low]; if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1) weight += (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_high]; } return weight; } __global__ void modulated_deformable_im2col_gpu_kernel( const int n, const float* data_im, const float* data_offset, const float* data_mask, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int channel_per_deformable_group, const int batch_size, const int num_channels, const int height_col, const int width_col, float* data_col) { CUDA_KERNEL_LOOP(index, n) { // index index of output matrix const int w_col = index % width_col; const int h_col = (index / width_col) % height_col; const int b_col = (index / width_col / height_col) % batch_size; const int c_im = (index / width_col / height_col) / batch_size; const int c_col = c_im * kernel_h * kernel_w; // compute deformable group index const int deformable_group_index = c_im / channel_per_deformable_group; const int h_in = h_col * stride_h - pad_h; const int w_in = w_col * stride_w - pad_w; float* data_col_ptr = data_col + ((c_col * batch_size + b_col) * height_col + h_col) * width_col + w_col; //const float* data_im_ptr = data_im + ((b_col * num_channels + c_im) * height + h_in) * width + w_in; const float* data_im_ptr = data_im + (b_col * num_channels + c_im) * height * width; const float* data_offset_ptr = data_offset + (b_col + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; const float* data_mask_ptr = data_mask + (b_col + deformable_group_index) * kernel_h * kernel_w * height_col * width_col; for (int i = 0; i < kernel_h; ++i) { for (int j = 0; j < kernel_w; ++j) { const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col; const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col + w_col; const int data_mask_hw_ptr = ((i * kernel_w + j) * height_col + h_col) * width_col + w_col; const float offset_h = data_offset_ptr[data_offset_h_ptr]; const float offset_w = data_offset_ptr[data_offset_w_ptr]; const float mask = data_mask_ptr[data_mask_hw_ptr]; float val = static_cast<float>(0); const float h_im = h_in + i * dilation_h + offset_h; const float w_im = w_in + j * dilation_w + offset_w; //if (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) { if (h_im > -1 && w_im > -1 && h_im < height && w_im < width) { //const float map_h = i * dilation_h + offset_h; //const float map_w = j * dilation_w + offset_w; //const int cur_height = height - h_in; //const int cur_width = width - w_in; //val = dmcn_im2col_bilinear(data_im_ptr, width, cur_height, cur_width, map_h, map_w); val = dmcn_im2col_bilinear(data_im_ptr, width, height, width, h_im, w_im); } *data_col_ptr = val * mask; data_col_ptr += batch_size * height_col * width_col; //data_col_ptr += height_col * width_col; } } } } // data_im : [batch_size, channels, height, width] // data_offset : [batch_size, 2 * kernel_h * kernel_w, height_col, width_col] // data_mask : [batch_size, 2 * kernel_h * kernel_w, height_col, width_col] // data_col : [channels * kernel_h * kernel_w, batch_size, height_col, width_col] __global__ void modulated_deformable_col2im_gpu_kernel(const int n, const float* data_col, const float* data_offset, const float* data_mask, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int channel_per_deformable_group, const int batch_size, const int height_col, const int width_col, float* grad_im) { CUDA_KERNEL_LOOP(index, n) { const int j = (index / width_col / height_col / batch_size) % kernel_w; const int i = (index / width_col / height_col / batch_size / kernel_w) % kernel_h; const int c = index / width_col / height_col / batch_size / kernel_w / kernel_h; // compute the start and end of the output const int deformable_group_index = c / channel_per_deformable_group; int w_out = index % width_col; int h_out = (index / width_col) % height_col; int b = (index / width_col / height_col) % batch_size; int w_in = w_out * stride_w - pad_w; int h_in = h_out * stride_h - pad_h; const float* data_offset_ptr = data_offset + (b + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; const float* data_mask_ptr = data_mask + (b + deformable_group_index) * kernel_h * kernel_w * height_col * width_col; const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out; const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out; const int data_mask_hw_ptr = ((i * kernel_w + j) * height_col + h_out) * width_col + w_out; const float offset_h = data_offset_ptr[data_offset_h_ptr]; const float offset_w = data_offset_ptr[data_offset_w_ptr]; const float mask = data_mask_ptr[data_mask_hw_ptr]; const float cur_inv_h_data = h_in + i * dilation_h + offset_h; const float cur_inv_w_data = w_in + j * dilation_w + offset_w; const float cur_top_grad = data_col[index] * mask; const int cur_h = (int)cur_inv_h_data; const int cur_w = (int)cur_inv_w_data; for (int dy = -2; dy <= 2; dy++) { for (int dx = -2; dx <= 2; dx++) { if (cur_h + dy >= 0 && cur_h + dy < height && cur_w + dx >= 0 && cur_w + dx < width && abs(cur_inv_h_data - (cur_h + dy)) < 1 && abs(cur_inv_w_data - (cur_w + dx)) < 1 ) { int cur_bottom_grad_pos = ((b * channels + c) * height + cur_h + dy) * width + cur_w + dx; float weight = dmcn_get_gradient_weight(cur_inv_h_data, cur_inv_w_data, cur_h + dy, cur_w + dx, height, width); atomicAdd(grad_im + cur_bottom_grad_pos, weight * cur_top_grad); } } } } } __global__ void modulated_deformable_col2im_coord_gpu_kernel(const int n, const float* data_col, const float* data_im, const float* data_offset, const float* data_mask, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int channel_per_deformable_group, const int batch_size, const int offset_channels, const int height_col, const int width_col, float* grad_offset, float* grad_mask) { // data_im : [batch_size, channels, height, width] // data_offset : [batch_size, 2 * kernel_h * kernel_w, height_col, width_col] // data_mask : [batch_size, 2 * kernel_h * kernel_w, height_col, width_col] // data_col : [channels * kernel_h * kernel_w, batch_size, height_col, width_col] // kernel : [out_channels, channels, kernel_h, kernel_w] CUDA_KERNEL_LOOP(index, n) { float val = 0, mval = 0; int w = index % width_col; int h = (index / width_col) % height_col; int c = (index / width_col / height_col) % offset_channels; int b = (index / width_col / height_col) / offset_channels; // compute the start and end of the output const int deformable_group_index = c / (2 * kernel_h * kernel_w); const int col_step = kernel_h * kernel_w; int cnt = 0; const float* data_col_ptr = data_col + deformable_group_index * channel_per_deformable_group * batch_size * width_col * height_col; const float* data_im_ptr = data_im + (b + deformable_group_index) * channel_per_deformable_group / kernel_h / kernel_w * height * width; const float* data_offset_ptr = data_offset + (b + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; const float* data_mask_ptr = data_mask + (b + deformable_group_index) * kernel_h * kernel_w * height_col * width_col; const int offset_c = c - deformable_group_index * 2 * kernel_h * kernel_w; for (int col_c = (offset_c / 2); col_c < channel_per_deformable_group; col_c += col_step) { const int col_pos = (((col_c * batch_size + b) * height_col) + h) * width_col + w; const int bp_dir = offset_c % 2; int j = (col_pos / width_col / height_col / batch_size) % kernel_w; int i = (col_pos / width_col / height_col / batch_size / kernel_w) % kernel_h; int w_out = col_pos % width_col; int h_out = (col_pos / width_col) % height_col; int w_in = w_out * stride_w - pad_w; int h_in = h_out * stride_h - pad_h; const int data_offset_h_ptr = (((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out); const int data_offset_w_ptr = (((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out); const int data_mask_hw_ptr = (((i * kernel_w + j) * height_col + h_out) * width_col + w_out); const float offset_h = data_offset_ptr[data_offset_h_ptr]; const float offset_w = data_offset_ptr[data_offset_w_ptr]; const float mask = data_mask_ptr[data_mask_hw_ptr]; float inv_h = h_in + i * dilation_h + offset_h; float inv_w = w_in + j * dilation_w + offset_w; if (inv_h <= -1 || inv_w <= -1 || inv_h >= height || inv_w >= width) { inv_h = inv_w = -2; } else { mval += data_col_ptr[col_pos] * dmcn_im2col_bilinear(data_im_ptr + cnt * height * width, width, height, width, inv_h, inv_w); } const float weight = dmcn_get_coordinate_weight( inv_h, inv_w, height, width, data_im_ptr + cnt * height * width, width, bp_dir); val += weight * data_col_ptr[col_pos] * mask; cnt += 1; } grad_offset[index] = val; if (offset_c % 2 == 0) grad_mask[(((b + deformable_group_index) * kernel_h * kernel_w + offset_c / 2) * height_col + h) * width_col + w] = mval; } } int ModulatedDeformConvForwardLaucher( // data_im : [batch_size, channels, height, width] // data_offset : [batch_size, 2 * kernel_h * kernel_w, height_col, width_col] // data_mask : [batch_size, 2 * kernel_h * kernel_w, height_col, width_col] // data_col : [channels * kernel_h * kernel_w, batch_size, height_col, width_col] // kernel : [out_channels, channels * kernel_h * kernel_w] // data_out : [out_channels, batch_size, height, width] at::Tensor data_im, at::Tensor data_offset, at::Tensor data_mask, at::Tensor data_col, int kernel_h, int kernel_w, int pad_h, int pad_w, int stride_h, int stride_w, int dilation_h, int dilation_w) { const auto num_channels = data_im.size(1); const auto height = data_im.size(2); const auto width = data_im.size(3); const auto batch_size = data_im.size(0); const auto channel_per_deformable_group = num_channels; const auto height_col = data_col.size(2); const auto width_col = data_col.size(3); const int num_kernels = batch_size * num_channels * width_col * height_col; modulated_deformable_im2col_gpu_kernel<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS>>>( num_kernels, data_im.data<float>(), data_offset.data<float>(), data_mask.data<float>(), height, width, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, batch_size, num_channels, height_col, width_col, data_col.data<float>()); cudaError_t err = cudaGetLastError(); if (cudaSuccess != err) { fprintf(stderr, "cudaCheckError() failed : %s\n", cudaGetErrorString(err)); exit(-1); } return 1; } int ModulatedDeformConvBackwardLaucher( // data_im : [batch_size, channels, height, width] // data_offset : [batch_size, 2 * kernel_h * kernel_w, height_col, width_col] // data_mask : [batch_size, 2 * kernel_h * kernel_w, height_col, width_col] // data_col : [channels * kernel_h * kernel_w, batch_size, height_col, width_col] // kernel : [out_channels, channels, kernel_h, kernel_w] at::Tensor data_im, at::Tensor data_offset, at::Tensor data_mask, at::Tensor data_col, int kernel_h, int kernel_w, int pad_h, int pad_w, int stride_h, int stride_w, int dilation_h, int dilation_w, at::Tensor grad_offset, at::Tensor grad_mask, at::Tensor grad_im) { const auto offset_channels = 2 * kernel_h * kernel_w; const auto num_channels = data_im.size(1); const auto height = data_im.size(2); const auto width = data_im.size(3); const auto batch_size = data_im.size(0); const auto height_col = data_col.size(2); const auto width_col = data_col.size(3); const auto col2im_channel_per_deformable_group = num_channels; const auto col2im_coord_channel_per_deformable_group = data_col.size(0); const int col2im_num_kernels = batch_size * num_channels * kernel_h * kernel_w * width_col * height_col; const int col2im_coord_num_kernels = batch_size * height_col * width_col * 2 * kernel_h * kernel_w; modulated_deformable_col2im_gpu_kernel<<<GET_BLOCKS(col2im_num_kernels), CUDA_NUM_THREADS>>>( col2im_num_kernels, data_col.data<float>(), data_offset.data<float>(), data_mask.data<float>(), num_channels, height, width, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, col2im_channel_per_deformable_group, batch_size, height_col, width_col, grad_im.data<float>()); modulated_deformable_col2im_coord_gpu_kernel<<<GET_BLOCKS(col2im_coord_num_kernels), CUDA_NUM_THREADS>>>( col2im_coord_num_kernels, data_col.data<float>(), data_im.data<float>(), data_offset.data<float>(), data_mask.data<float>(), num_channels, height, width, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, col2im_coord_channel_per_deformable_group, batch_size, offset_channels, height_col, width_col, grad_offset.data<float>(), grad_mask.data<float>()); cudaError_t err = cudaGetLastError(); if (cudaSuccess != err) { fprintf(stderr, "cudaCheckError() failed : %s\n", cudaGetErrorString(err)); exit(-1); } return 1; }
b86cc81f68a35b4eb516091bb551e6f5e769f5b0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Written by Vasily Volkov. // Copyright (c) 2008-2009, The Regents of the University of California. // All rights reserved. #include "codelets.h" __global__ void FFT512_device( float2 *work ); #define rank 16 __global__ void FFT16_device_( float2 *work ) { int tid = threadIdx.x; int bid = blockIdx.y * gridDim.x + blockIdx.x; int lo = bid & (8192/rank/64-1); int hi = bid &~(8192/rank/64-1); int i = lo*64 + tid; work += hi * (rank*64) + i; float2 a[rank]; load<rank>( a, work, 512 ); FFT16( a ); twiddle<rank>( a, i, 8192 ); store<rank>( a, work, 512 ); } extern "C" void FFT8192( float2 *work, int batch ) { hipLaunchKernelGGL(( FFT16_device_), dim3(grid2D(batch*(8192/rank)/64)), dim3(64) , 0, 0, work ); hipLaunchKernelGGL(( FFT512_device), dim3(grid2D(batch*rank)), dim3(64) , 0, 0, work ); }
b86cc81f68a35b4eb516091bb551e6f5e769f5b0.cu
// Written by Vasily Volkov. // Copyright (c) 2008-2009, The Regents of the University of California. // All rights reserved. #include "codelets.h" __global__ void FFT512_device( float2 *work ); #define rank 16 __global__ void FFT16_device_( float2 *work ) { int tid = threadIdx.x; int bid = blockIdx.y * gridDim.x + blockIdx.x; int lo = bid & (8192/rank/64-1); int hi = bid &~(8192/rank/64-1); int i = lo*64 + tid; work += hi * (rank*64) + i; float2 a[rank]; load<rank>( a, work, 512 ); FFT16( a ); twiddle<rank>( a, i, 8192 ); store<rank>( a, work, 512 ); } extern "C" void FFT8192( float2 *work, int batch ) { FFT16_device_<<< grid2D(batch*(8192/rank)/64), 64 >>>( work ); FFT512_device<<< grid2D(batch*rank), 64 >>>( work ); }
4edcbd95ad148d6a892e67f47bc284ed85adc76c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "kernel_d_3.h" #include <stdlib.h> #include <stdio.h> #include <helper_cuda.h> #define TPB 3200 __device__ float distance(float x1, float x2){ return sqrt((x2-x1)*(x2-x1)); } __global__ void distanceKernel(float *d_out, float *d_in, float ref){ const int i = blockIdx.x*blockDim.x + threadIdx.x; const float x = d_in[i]; d_out[i] = distance(x, ref); printf("blockIdx:%2d,blockDim:%2d,threadIdx:%2d,i = %2d:dist from %f to %f.\n", blockIdx.x,blockDim.x,threadIdx.x, i, ref, x, d_out[i]); } void distanceArray(float *out, float *in, float ref, int len){ float *d_in = 0; float *d_out = 0; checkCudaErrors( hipMalloc(&d_in, len*sizeof(float)) ); checkCudaErrors( hipMalloc(&d_out, len*sizeof(float)) ); checkCudaErrors( hipMemcpy(d_in, in, len*sizeof(float), hipMemcpyHostToDevice) ); hipLaunchKernelGGL(( distanceKernel), dim3(len/TPB), dim3(TPB), 0, 0, d_out, d_in, ref); checkCudaErrors( hipPeekAtLastError() ); checkCudaErrors( hipDeviceSynchronize() ); checkCudaErrors( hipMemcpy(out, d_out, len*sizeof(float), hipMemcpyDeviceToHost) ); checkCudaErrors( hipFree(d_in) ); checkCudaErrors( hipFree(d_out) ); }
4edcbd95ad148d6a892e67f47bc284ed85adc76c.cu
#include "kernel_d_3.h" #include <stdlib.h> #include <stdio.h> #include <helper_cuda.h> #define TPB 3200 __device__ float distance(float x1, float x2){ return sqrt((x2-x1)*(x2-x1)); } __global__ void distanceKernel(float *d_out, float *d_in, float ref){ const int i = blockIdx.x*blockDim.x + threadIdx.x; const float x = d_in[i]; d_out[i] = distance(x, ref); printf("blockIdx:%2d,blockDim:%2d,threadIdx:%2d,i = %2d:dist from %f to %f.\n", blockIdx.x,blockDim.x,threadIdx.x, i, ref, x, d_out[i]); } void distanceArray(float *out, float *in, float ref, int len){ float *d_in = 0; float *d_out = 0; checkCudaErrors( cudaMalloc(&d_in, len*sizeof(float)) ); checkCudaErrors( cudaMalloc(&d_out, len*sizeof(float)) ); checkCudaErrors( cudaMemcpy(d_in, in, len*sizeof(float), cudaMemcpyHostToDevice) ); distanceKernel<<<len/TPB, TPB>>>(d_out, d_in, ref); checkCudaErrors( cudaPeekAtLastError() ); checkCudaErrors( cudaDeviceSynchronize() ); checkCudaErrors( cudaMemcpy(out, d_out, len*sizeof(float), cudaMemcpyDeviceToHost) ); checkCudaErrors( cudaFree(d_in) ); checkCudaErrors( cudaFree(d_out) ); }
b0a1e347c7b557997f4832469db057f14afddbe8.hip
// !!! This is a file automatically generated by hipify!!! #include <thrust/device_vector.h> #include <thrust/sort.h> #include <hip/hip_runtime.h> #include <cstdlib> // This examples compares sorting performance using Array of Structures (AoS) // and Structure of Arrays (SoA) data layout. Legacy applications will often // store data in C/C++ structs, such as MyStruct defined below. Although // Thrust can process array of structs, it is typically less efficient than // the equivalent structure of arrays layout. In this particular example, // the optimized SoA approach is approximately *five times faster* than the // traditional AoS method. Therefore, it is almost always worthwhile to // convert AoS data structures to SoA. struct MyStruct { int key; float value; __host__ __device__ bool operator<(const MyStruct other) const { return key < other.key; } }; void initialize_keys(thrust::device_vector<int>& keys) { thrust::host_vector<int> h_keys(keys.size()); for(size_t i = 0; i < h_keys.size(); i++) h_keys[i] = rand(); keys = h_keys; } void initialize_keys(thrust::device_vector<MyStruct>& structures) { thrust::host_vector<MyStruct> h_structures(structures.size()); for(size_t i = 0; i < h_structures.size(); i++) h_structures[i].key = rand(); structures = h_structures; } int main(void) { size_t N = 1000000; hipEvent_t start; hipEvent_t end; float elapsed_time, t1, t2; hipEventCreate(&start); hipEventCreate(&end); // Sort Key-Value pairs using Array of Structures (AoS) storage { thrust::device_vector<MyStruct> structures(N); initialize_keys(structures); hipEventRecord(start,0); thrust::sort(structures.begin(), structures.end()); hipEventSynchronize(end); hipEventRecord(end,0); hipEventSynchronize(end); hipEventElapsedTime(&elapsed_time, start, end); std::cout << "AoS sort took " << elapsed_time << " milliseconds" << std::endl; t1 = elapsed_time; } // Sort Key-Value pairs using Structure of Arrays (SoA) storage { thrust::device_vector<int> keys(N); thrust::device_vector<float> values(N); initialize_keys(keys); hipEventRecord(start,0); thrust::sort_by_key(keys.begin(), keys.end(), values.begin()); hipDeviceSynchronize(); hipEventRecord(end,0); hipEventSynchronize(end); hipEventElapsedTime(&elapsed_time, start, end); std::cout << "SoA sort took " << elapsed_time << " milliseconds" << std::endl; t2 = elapsed_time; } std::cout << "SoA was " << t1/t2 << " times faster" << std::endl; return 0; }
b0a1e347c7b557997f4832469db057f14afddbe8.cu
#include <thrust/device_vector.h> #include <thrust/sort.h> #include <cuda.h> #include <cstdlib> // This examples compares sorting performance using Array of Structures (AoS) // and Structure of Arrays (SoA) data layout. Legacy applications will often // store data in C/C++ structs, such as MyStruct defined below. Although // Thrust can process array of structs, it is typically less efficient than // the equivalent structure of arrays layout. In this particular example, // the optimized SoA approach is approximately *five times faster* than the // traditional AoS method. Therefore, it is almost always worthwhile to // convert AoS data structures to SoA. struct MyStruct { int key; float value; __host__ __device__ bool operator<(const MyStruct other) const { return key < other.key; } }; void initialize_keys(thrust::device_vector<int>& keys) { thrust::host_vector<int> h_keys(keys.size()); for(size_t i = 0; i < h_keys.size(); i++) h_keys[i] = rand(); keys = h_keys; } void initialize_keys(thrust::device_vector<MyStruct>& structures) { thrust::host_vector<MyStruct> h_structures(structures.size()); for(size_t i = 0; i < h_structures.size(); i++) h_structures[i].key = rand(); structures = h_structures; } int main(void) { size_t N = 1000000; cudaEvent_t start; cudaEvent_t end; float elapsed_time, t1, t2; cudaEventCreate(&start); cudaEventCreate(&end); // Sort Key-Value pairs using Array of Structures (AoS) storage { thrust::device_vector<MyStruct> structures(N); initialize_keys(structures); cudaEventRecord(start,0); thrust::sort(structures.begin(), structures.end()); cudaEventSynchronize(end); cudaEventRecord(end,0); cudaEventSynchronize(end); cudaEventElapsedTime(&elapsed_time, start, end); std::cout << "AoS sort took " << elapsed_time << " milliseconds" << std::endl; t1 = elapsed_time; } // Sort Key-Value pairs using Structure of Arrays (SoA) storage { thrust::device_vector<int> keys(N); thrust::device_vector<float> values(N); initialize_keys(keys); cudaEventRecord(start,0); thrust::sort_by_key(keys.begin(), keys.end(), values.begin()); cudaThreadSynchronize(); cudaEventRecord(end,0); cudaEventSynchronize(end); cudaEventElapsedTime(&elapsed_time, start, end); std::cout << "SoA sort took " << elapsed_time << " milliseconds" << std::endl; t2 = elapsed_time; } std::cout << "SoA was " << t1/t2 << " times faster" << std::endl; return 0; }
723331b497038f08a47a24c9bc0e06b71b569d5c.hip
// !!! This is a file automatically generated by hipify!!! /** * Copyright 2016 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ #include <cudnn.h> #include <hip/hip_runtime.h> #include <stdio.h> #include <stdlib.h> #include <time.h> // Reference outputs (calculated on an M40 GPU) // > ./RNN 20 2 512 64 0 // Forward: 1299 GFLOPs // Backward: 2171 GFLOPs, (1564 GFLOPs), (3549 GFLOPs) // i checksum 1.315793E+06 h checksum 1.315212E+05 // di checksum 6.676003E+01 dh checksum 6.425067E+01 // dw checksum 1.453750E+09 // // > ./RNN 20 2 512 64 1 // Forward: 1296 GFLOPs // Backward: 2235 GFLOPs, (1567 GFLOPs), (3896 GFLOPs) // i checksum 6.319591E+05 h checksum 6.319605E+04 // di checksum 4.501830E+00 dh checksum 4.489546E+00 // dw checksum 5.012598E+07 // // > ./RNN 20 2 512 64 2 // Forward: 2635 GFLOPs // Backward: 2757 GFLOPs, (2001 GFLOPs), (4433 GFLOPs) // i checksum 5.749536E+05 c checksum 4.365091E+05 h checksum 5.774818E+04 // di checksum 3.842206E+02 dc checksum 9.323785E+03 dh checksum 1.182566E+01 // dw checksum 4.313461E+08 // // > ./RNN 20 2 512 64 3 // Forward: 2428 GFLOPs // Backward: 2645 GFLOPs, (1915 GFLOPs), (4270 GFLOPs) // i checksum 6.358978E+05 h checksum 6.281680E+04 // di checksum 6.296622E+00 dh checksum 2.289960E+05 // dw checksum 5.397419E+07 // Define some error checking macros. #define cudaErrCheck(stat) { cudaErrCheck_((stat), __FILE__, __LINE__); } void cudaErrCheck_(hipError_t stat, const char *file, int line) { if (stat != hipSuccess) { fprintf(stderr, "CUDA Error: %s %s %d\n", hipGetErrorString(stat), file, line); } } #define cudnnErrCheck(stat) { cudnnErrCheck_((stat), __FILE__, __LINE__); } void cudnnErrCheck_(cudnnStatus_t stat, const char *file, int line) { if (stat != CUDNN_STATUS_SUCCESS) { fprintf(stderr, "cuDNN Error: %s %s %d\n", cudnnGetErrorString(stat), file, line); } } __global__ void initGPUData_ker(float *data, int numElements, float value) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < numElements) { data[tid] = value; } } void initGPUData(float *data, int numElements, float value) { dim3 gridDim; dim3 blockDim; blockDim.x = 1024; gridDim.x = (numElements + blockDim.x - 1) / blockDim.x; hipLaunchKernelGGL(( initGPUData_ker) , dim3(gridDim), dim3(blockDim) , 0, 0, data, numElements, value); } extern "C" float runRNN(int seqLength, int numLayers, int hiddenSize, int inputSize, int miniBatch, float dropout, bool bidirectional, int mode) { // ------------------------- // Create cudnn context // ------------------------- cudnnHandle_t cudnnHandle; // printf("Size of the handle is %d\n", sizeof(cudnnHandle)); cudnnErrCheck(cudnnCreate(&cudnnHandle)); // ------------------------- // Set up inputs and outputs // ------------------------- void *x; //printf("Size of the object is %d\n", sizeof(&x)); //printf("Value of copy is %d\n", hipMemcpyHostToDevice); void *hx = NULL; void *cx = NULL; void *dx; void *dhx = NULL; void *dcx = NULL; void *y; void *hy = NULL; void *cy = NULL; void *dy; void *dhy = NULL; void *dcy = NULL; // Memory allocation. hx, cx, dhx, dcx, hy, cy, dhy and dcy can be NULL. cudaErrCheck(hipMalloc((void**)&x, seqLength * inputSize * miniBatch * sizeof(float))); cudaErrCheck(hipMalloc((void**)&hx, numLayers * hiddenSize * miniBatch * (bidirectional ? 2 : 1) * sizeof(float))); cudaErrCheck(hipMalloc((void**)&cx, numLayers * hiddenSize * miniBatch * (bidirectional ? 2 : 1) * sizeof(float))); cudaErrCheck(hipMalloc((void**)&dx, seqLength * inputSize * miniBatch * sizeof(float))); cudaErrCheck(hipMalloc((void**)&dhx, numLayers * hiddenSize * miniBatch * (bidirectional ? 2 : 1) * sizeof(float))); cudaErrCheck(hipMalloc((void**)&dcx, numLayers * hiddenSize * miniBatch * (bidirectional ? 2 : 1) * sizeof(float))); int ysize = seqLength * hiddenSize * miniBatch * (bidirectional ? 2 : 1); float y_train[ysize]; float y_test[ysize]; for(int j = 0; j < ysize; j++){ y_train[j] = (j % 10) + 20.5; } cudaErrCheck(hipMalloc((void**)&y, seqLength * hiddenSize * miniBatch * (bidirectional ? 2 : 1) * sizeof(float))); cudaErrCheck(hipMalloc((void**)&hy, numLayers * hiddenSize * miniBatch * (bidirectional ? 2 : 1) * sizeof(float))); cudaErrCheck(hipMalloc((void**)&cy, numLayers * hiddenSize * miniBatch * (bidirectional ? 2 : 1) * sizeof(float))); cudaErrCheck(hipMalloc((void**)&dy, seqLength * hiddenSize * miniBatch * (bidirectional ? 2 : 1) * sizeof(float))); cudaErrCheck(hipMalloc((void**)&dhy, numLayers * hiddenSize * miniBatch * (bidirectional ? 2 : 1) * sizeof(float))); cudaErrCheck(hipMalloc((void**)&dcy, numLayers * hiddenSize * miniBatch * (bidirectional ? 2 : 1) * sizeof(float))); // Set up tensor descriptors. x/y/dx/dy are arrays, one per time step. cudnnTensorDescriptor_t *xDesc, *yDesc, *dxDesc, *dyDesc; cudnnTensorDescriptor_t hxDesc, cxDesc; cudnnTensorDescriptor_t hyDesc, cyDesc; cudnnTensorDescriptor_t dhxDesc, dcxDesc; cudnnTensorDescriptor_t dhyDesc, dcyDesc; xDesc = (cudnnTensorDescriptor_t*)malloc(seqLength * sizeof(cudnnTensorDescriptor_t)); yDesc = (cudnnTensorDescriptor_t*)malloc(seqLength * sizeof(cudnnTensorDescriptor_t)); dxDesc = (cudnnTensorDescriptor_t*)malloc(seqLength * sizeof(cudnnTensorDescriptor_t)); dyDesc = (cudnnTensorDescriptor_t*)malloc(seqLength * sizeof(cudnnTensorDescriptor_t)); int dimA[3]; int strideA[3]; // In this example dimA[1] is constant across the whole sequence // This isn't required, all that is required is that it does not increase. for (int i = 0; i < seqLength; i++) { cudnnErrCheck(cudnnCreateTensorDescriptor(&xDesc[i])); cudnnErrCheck(cudnnCreateTensorDescriptor(&yDesc[i])); cudnnErrCheck(cudnnCreateTensorDescriptor(&dxDesc[i])); cudnnErrCheck(cudnnCreateTensorDescriptor(&dyDesc[i])); dimA[0] = miniBatch; dimA[1] = inputSize; dimA[2] = 1; strideA[0] = dimA[2] * dimA[1]; strideA[1] = dimA[2]; strideA[2] = 1; cudnnErrCheck(cudnnSetTensorNdDescriptor(xDesc[i], CUDNN_DATA_FLOAT, 3, dimA, strideA)); cudnnErrCheck(cudnnSetTensorNdDescriptor(dxDesc[i], CUDNN_DATA_FLOAT, 3, dimA, strideA)); dimA[0] = miniBatch; dimA[1] = bidirectional ? hiddenSize * 2 : hiddenSize; dimA[2] = 1; strideA[0] = dimA[2] * dimA[1]; strideA[1] = dimA[2]; strideA[2] = 1; cudnnErrCheck(cudnnSetTensorNdDescriptor(yDesc[i], CUDNN_DATA_FLOAT, 3, dimA, strideA)); cudnnErrCheck(cudnnSetTensorNdDescriptor(dyDesc[i], CUDNN_DATA_FLOAT, 3, dimA, strideA)); } dimA[0] = numLayers * (bidirectional ? 2 : 1); dimA[1] = miniBatch; dimA[2] = hiddenSize; strideA[0] = dimA[2] * dimA[1]; strideA[1] = dimA[2]; strideA[2] = 1; cudnnErrCheck(cudnnCreateTensorDescriptor(&hxDesc)); cudnnErrCheck(cudnnCreateTensorDescriptor(&cxDesc)); cudnnErrCheck(cudnnCreateTensorDescriptor(&hyDesc)); cudnnErrCheck(cudnnCreateTensorDescriptor(&cyDesc)); cudnnErrCheck(cudnnCreateTensorDescriptor(&dhxDesc)); cudnnErrCheck(cudnnCreateTensorDescriptor(&dcxDesc)); cudnnErrCheck(cudnnCreateTensorDescriptor(&dhyDesc)); cudnnErrCheck(cudnnCreateTensorDescriptor(&dcyDesc)); cudnnErrCheck(cudnnSetTensorNdDescriptor(hxDesc, CUDNN_DATA_FLOAT, 3, dimA, strideA)); cudnnErrCheck(cudnnSetTensorNdDescriptor(cxDesc, CUDNN_DATA_FLOAT, 3, dimA, strideA)); cudnnErrCheck(cudnnSetTensorNdDescriptor(hyDesc, CUDNN_DATA_FLOAT, 3, dimA, strideA)); cudnnErrCheck(cudnnSetTensorNdDescriptor(cyDesc, CUDNN_DATA_FLOAT, 3, dimA, strideA)); cudnnErrCheck(cudnnSetTensorNdDescriptor(dhxDesc, CUDNN_DATA_FLOAT, 3, dimA, strideA)); cudnnErrCheck(cudnnSetTensorNdDescriptor(dcxDesc, CUDNN_DATA_FLOAT, 3, dimA, strideA)); cudnnErrCheck(cudnnSetTensorNdDescriptor(dhyDesc, CUDNN_DATA_FLOAT, 3, dimA, strideA)); cudnnErrCheck(cudnnSetTensorNdDescriptor(dcyDesc, CUDNN_DATA_FLOAT, 3, dimA, strideA)); // ------------------------- // Set up the dropout descriptor (needed for the RNN descriptor) // ------------------------- unsigned long long seed = 1337ull; // Pick a seed. cudnnDropoutDescriptor_t dropoutDesc; cudnnErrCheck(cudnnCreateDropoutDescriptor(&dropoutDesc)); // How much memory does dropout need for states? // These states are used to generate random numbers internally // and should not be freed until the RNN descriptor is no longer used size_t stateSize; void *states; cudnnErrCheck(cudnnDropoutGetStatesSize(cudnnHandle, &stateSize)); cudaErrCheck(hipMalloc(&states, stateSize)); cudnnErrCheck(cudnnSetDropoutDescriptor(dropoutDesc, cudnnHandle, dropout, states, stateSize, seed)); // ------------------------- // Set up the RNN descriptor // ------------------------- cudnnRNNDescriptor_t rnnDesc; miopenRNNMode_t RNNMode; cudnnErrCheck(cudnnCreateRNNDescriptor(&rnnDesc)); if (mode == 0) RNNMode = miopenRNNRELU; else if (mode == 1) RNNMode = miopenRNNTANH; else if (mode == 2) RNNMode = miopenLSTM; else if (mode == 3) RNNMode = miopenGRU; cudnnErrCheck(cudnnSetRNNDescriptor(rnnDesc, hiddenSize, numLayers, dropoutDesc, CUDNN_LINEAR_INPUT, // We can also skip the input matrix transformation bidirectional ? CUDNN_BIDIRECTIONAL : CUDNN_UNIDIRECTIONAL, RNNMode, CUDNN_DATA_FLOAT)); // ------------------------- // Set up parameters // ------------------------- // This needs to be done after the rnn descriptor is set as otherwise // we don't know how many parameters we have to allocate void *w; void *dw; cudnnFilterDescriptor_t wDesc, dwDesc; cudnnErrCheck(cudnnCreateFilterDescriptor(&wDesc)); cudnnErrCheck(cudnnCreateFilterDescriptor(&dwDesc)); size_t weightsSize; cudnnErrCheck(cudnnGetRNNParamsSize(cudnnHandle, rnnDesc, xDesc[0], &weightsSize, CUDNN_DATA_FLOAT)); int dimW[3]; dimW[0] = weightsSize / sizeof(float); dimW[1] = 1; dimW[2] = 1; cudnnErrCheck(cudnnSetFilterNdDescriptor(wDesc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 3, dimW)); cudnnErrCheck(cudnnSetFilterNdDescriptor(dwDesc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 3, dimW)); cudaErrCheck(hipMalloc((void**)&w, weightsSize)); cudaErrCheck(hipMalloc((void**)&dw, weightsSize)); // ------------------------- // Set up work space and reserved memory // ------------------------- void *workspace; void *reserveSpace; size_t workSize; size_t reserveSize; // Need for every pass cudnnErrCheck(cudnnGetRNNWorkspaceSize(cudnnHandle, rnnDesc, seqLength, xDesc, &workSize)); // Only needed in training, shouldn't be touched between passes. cudnnErrCheck(cudnnGetRNNTrainingReserveSize(cudnnHandle, rnnDesc, seqLength, xDesc, &reserveSize)); cudaErrCheck(hipMalloc((void**)&workspace, workSize)); cudaErrCheck(hipMalloc((void**)&reserveSpace, reserveSize)); // ********************************************************************************************************* // Initialise weights and inputs // ********************************************************************************************************* // We initialise to something simple. // Matrices are initialised to 1 / matrixSize, biases to 1, data is 1. initGPUData((float*)x, seqLength * inputSize * miniBatch, 1.f); if (hx != NULL) initGPUData((float*)hx, numLayers * hiddenSize * miniBatch * (bidirectional ? 2 : 1), 1.f); if (cx != NULL) initGPUData((float*)cx, numLayers * hiddenSize * miniBatch * (bidirectional ? 2 : 1), 1.f); initGPUData((float*)dy, seqLength * hiddenSize * miniBatch * (bidirectional ? 2 : 1), 1.f); if (dhy != NULL) initGPUData((float*)dhy, numLayers * hiddenSize * miniBatch * (bidirectional ? 2 : 1), 1.f); if (dcy != NULL) initGPUData((float*)dcy, numLayers * hiddenSize * miniBatch * (bidirectional ? 2 : 1), 1.f); // Weights int numLinearLayers = 0; if (RNNMode == miopenRNNRELU || RNNMode == miopenRNNTANH) { numLinearLayers = 2; } else if (RNNMode == miopenLSTM) { numLinearLayers = 8; } else if (RNNMode == miopenGRU) { numLinearLayers = 6; } for (int layer = 0; layer < numLayers * (bidirectional ? 2 : 1); layer++) { for (int linLayerID = 0; linLayerID < numLinearLayers; linLayerID++) { cudnnFilterDescriptor_t linLayerMatDesc; cudnnErrCheck(cudnnCreateFilterDescriptor(&linLayerMatDesc)); float *linLayerMat; cudnnErrCheck(cudnnGetRNNLinLayerMatrixParams( cudnnHandle, rnnDesc, layer, xDesc[0], wDesc, w, linLayerID, linLayerMatDesc, (void**)&linLayerMat)); cudnnDataType_t dataType; cudnnTensorFormat_t format; int nbDims; int filterDimA[3]; cudnnErrCheck(cudnnGetFilterNdDescriptor(linLayerMatDesc, 3, &dataType, &format, &nbDims, filterDimA)); initGPUData(linLayerMat, filterDimA[0] * filterDimA[1] * filterDimA[2], 1.f / (float)(filterDimA[0] * filterDimA[1] * filterDimA[2])); cudnnErrCheck(cudnnDestroyFilterDescriptor(linLayerMatDesc)); cudnnFilterDescriptor_t linLayerBiasDesc; cudnnErrCheck(cudnnCreateFilterDescriptor(&linLayerBiasDesc)); float *linLayerBias; cudnnErrCheck(cudnnGetRNNLinLayerBiasParams( cudnnHandle, rnnDesc, layer, xDesc[0], wDesc, w, linLayerID, linLayerBiasDesc, (void**)&linLayerBias)); cudnnErrCheck(cudnnGetFilterNdDescriptor(linLayerBiasDesc, 3, &dataType, &format, &nbDims, filterDimA)); initGPUData(linLayerBias, filterDimA[0] * filterDimA[1] * filterDimA[2], 1.f); cudnnErrCheck(cudnnDestroyFilterDescriptor(linLayerBiasDesc)); } } // ********************************************************************************************************* // At this point all of the setup is done. We now need to pass through the RNN. // ********************************************************************************************************* // int alpha_param[1]; // alpha_param[0] = -0.05; // int beta_param[1]; // beta_param[0] = 1.0; float rval; cudaErrCheck(hipDeviceSynchronize()); for(int i = 0; i < 300; i++){ hipEvent_t start, stop; float timeForward, timeBackward1, timeBackward2; cudaErrCheck(hipEventCreate(&start)); cudaErrCheck(hipEventCreate(&stop)); cudaErrCheck(hipEventRecord(start)); // If we're not training we use this instead // cudnnErrCheck(cudnnRNNForwardInference(cudnnHandle, // rnnDesc, // xDesc, // x, // hxDesc, // hx, // cxDesc, // cx, // wDesc, // w, // yDesc, // y, // hyDesc, // hy, // cyDesc, // cy, // workspace, // workSize)); cudnnErrCheck(cudnnRNNForwardTraining(cudnnHandle, rnnDesc, seqLength, xDesc, x, hxDesc, hx, cxDesc, cx, wDesc, w, yDesc, y, hyDesc, hy, cyDesc, cy, workspace, workSize, reserveSpace, reserveSize)); cudaErrCheck(hipEventRecord(stop)); cudaErrCheck(hipEventSynchronize(stop)); cudaErrCheck(hipEventElapsedTime(&timeForward, start, stop)); cudaErrCheck(hipEventRecord(start)); //Compute a dy cudaErrCheck(hipMemcpy(y_test, y, ysize, hipMemcpyDeviceToHost)); for(int j = 0; j < ysize; j++){ y_test[j] -= y_train[j]; } cudaErrCheck(hipMemcpy(dy, y_test, ysize, hipMemcpyHostToDevice)); //cudaErrCheck(hipMemcpy(y, y_train, ysize, hipMemcpyHostToDevice)); cudnnErrCheck(cudnnRNNBackwardData(cudnnHandle, rnnDesc, seqLength, yDesc, y, dyDesc, dy, dhyDesc, dhy, dcyDesc, dcy, wDesc, w, hxDesc, hx, cxDesc, cx, dxDesc, dx, dhxDesc, dhx, dcxDesc, dcx, workspace, workSize, reserveSpace, reserveSize )); cudaErrCheck(hipEventRecord(stop)); cudaErrCheck(hipEventSynchronize(stop)); cudaErrCheck(hipEventElapsedTime(&timeBackward1, start, stop)); cudaErrCheck(hipEventRecord(start)); // cudnnRNNBackwardWeights adds to the data in dw. cudaErrCheck(hipMemset(dw, 0, weightsSize)); cudnnErrCheck(cudnnRNNBackwardWeights( cudnnHandle, rnnDesc, seqLength, xDesc, x, hxDesc, hx, yDesc, y, workspace, workSize, dwDesc, dw, reserveSpace, reserveSize )); cudaErrCheck(hipEventSynchronize(stop)); cudaErrCheck(hipEventRecord(stop)); cudaErrCheck(hipEventSynchronize(stop)); cudaErrCheck(hipEventElapsedTime(&timeBackward2, start, stop)); if (true) { float* testOutputdw; float testOutputw[weightsSize]; float* testY; testOutputdw = (float*)malloc(weightsSize); testY = (float*)malloc(ysize); cudaErrCheck(hipMemcpy(testY, y, ysize, hipMemcpyDeviceToHost)); cudaErrCheck(hipMemcpy(testOutputdw, dw, weightsSize, hipMemcpyDeviceToHost)); cudaErrCheck(hipMemcpy(testOutputw, w, weightsSize, hipMemcpyDeviceToHost)); free(testOutputdw); free(testY); } printf("Epoch %d \n",i); int numMats = 0; if (RNNMode == miopenRNNRELU || RNNMode == miopenRNNTANH) { numMats = 2; } else if (RNNMode == miopenLSTM) { numMats = 8; } else if (RNNMode == miopenGRU) { numMats = 6; } printf("Forward: %3.0f GFLOPS\n", numMats * 2ull * (bidirectional ? 2 : 1) * hiddenSize * hiddenSize * seqLength * miniBatch * numLayers / (1e6 * timeForward)); rval = numMats * 2ull * (bidirectional ? 2 : 1) * hiddenSize * hiddenSize * seqLength * miniBatch * numLayers / (1e6 * timeForward); } // Calculate FLOPS //sprintf(buffer, "Forward: %3.0f GFLOPS\n", numMats * 2ull * (bidirectional ? 2 : 1) * hiddenSize * hiddenSize * seqLength * miniBatch * numLayers / (1e6 * timeForward)); //hipDeviceSynchronize(); // ********************************************************************************************************* // Print checksums. // ********************************************************************************************************* // Can these be passed back and saved? hipFree(x); hipFree(hx); hipFree(cx); hipFree(y); hipFree(hy); hipFree(cy); hipFree(dx); hipFree(dhx); hipFree(dcx); hipFree(dy); hipFree(dhy); hipFree(dcy); hipFree(workspace); hipFree(reserveSpace); hipFree(w); hipFree(dw); cudnnDestroy(cudnnHandle); return rval; } //extern "C" int cudamain() { return 0; }
723331b497038f08a47a24c9bc0e06b71b569d5c.cu
/** * Copyright 2016 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ #include <cudnn.h> #include <cuda.h> #include <stdio.h> #include <stdlib.h> #include <time.h> // Reference outputs (calculated on an M40 GPU) // > ./RNN 20 2 512 64 0 // Forward: 1299 GFLOPs // Backward: 2171 GFLOPs, (1564 GFLOPs), (3549 GFLOPs) // i checksum 1.315793E+06 h checksum 1.315212E+05 // di checksum 6.676003E+01 dh checksum 6.425067E+01 // dw checksum 1.453750E+09 // // > ./RNN 20 2 512 64 1 // Forward: 1296 GFLOPs // Backward: 2235 GFLOPs, (1567 GFLOPs), (3896 GFLOPs) // i checksum 6.319591E+05 h checksum 6.319605E+04 // di checksum 4.501830E+00 dh checksum 4.489546E+00 // dw checksum 5.012598E+07 // // > ./RNN 20 2 512 64 2 // Forward: 2635 GFLOPs // Backward: 2757 GFLOPs, (2001 GFLOPs), (4433 GFLOPs) // i checksum 5.749536E+05 c checksum 4.365091E+05 h checksum 5.774818E+04 // di checksum 3.842206E+02 dc checksum 9.323785E+03 dh checksum 1.182566E+01 // dw checksum 4.313461E+08 // // > ./RNN 20 2 512 64 3 // Forward: 2428 GFLOPs // Backward: 2645 GFLOPs, (1915 GFLOPs), (4270 GFLOPs) // i checksum 6.358978E+05 h checksum 6.281680E+04 // di checksum 6.296622E+00 dh checksum 2.289960E+05 // dw checksum 5.397419E+07 // Define some error checking macros. #define cudaErrCheck(stat) { cudaErrCheck_((stat), __FILE__, __LINE__); } void cudaErrCheck_(cudaError_t stat, const char *file, int line) { if (stat != cudaSuccess) { fprintf(stderr, "CUDA Error: %s %s %d\n", cudaGetErrorString(stat), file, line); } } #define cudnnErrCheck(stat) { cudnnErrCheck_((stat), __FILE__, __LINE__); } void cudnnErrCheck_(cudnnStatus_t stat, const char *file, int line) { if (stat != CUDNN_STATUS_SUCCESS) { fprintf(stderr, "cuDNN Error: %s %s %d\n", cudnnGetErrorString(stat), file, line); } } __global__ void initGPUData_ker(float *data, int numElements, float value) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < numElements) { data[tid] = value; } } void initGPUData(float *data, int numElements, float value) { dim3 gridDim; dim3 blockDim; blockDim.x = 1024; gridDim.x = (numElements + blockDim.x - 1) / blockDim.x; initGPUData_ker <<< gridDim, blockDim >>> (data, numElements, value); } extern "C" float runRNN(int seqLength, int numLayers, int hiddenSize, int inputSize, int miniBatch, float dropout, bool bidirectional, int mode) { // ------------------------- // Create cudnn context // ------------------------- cudnnHandle_t cudnnHandle; // printf("Size of the handle is %d\n", sizeof(cudnnHandle)); cudnnErrCheck(cudnnCreate(&cudnnHandle)); // ------------------------- // Set up inputs and outputs // ------------------------- void *x; //printf("Size of the object is %d\n", sizeof(&x)); //printf("Value of copy is %d\n", cudaMemcpyHostToDevice); void *hx = NULL; void *cx = NULL; void *dx; void *dhx = NULL; void *dcx = NULL; void *y; void *hy = NULL; void *cy = NULL; void *dy; void *dhy = NULL; void *dcy = NULL; // Memory allocation. hx, cx, dhx, dcx, hy, cy, dhy and dcy can be NULL. cudaErrCheck(cudaMalloc((void**)&x, seqLength * inputSize * miniBatch * sizeof(float))); cudaErrCheck(cudaMalloc((void**)&hx, numLayers * hiddenSize * miniBatch * (bidirectional ? 2 : 1) * sizeof(float))); cudaErrCheck(cudaMalloc((void**)&cx, numLayers * hiddenSize * miniBatch * (bidirectional ? 2 : 1) * sizeof(float))); cudaErrCheck(cudaMalloc((void**)&dx, seqLength * inputSize * miniBatch * sizeof(float))); cudaErrCheck(cudaMalloc((void**)&dhx, numLayers * hiddenSize * miniBatch * (bidirectional ? 2 : 1) * sizeof(float))); cudaErrCheck(cudaMalloc((void**)&dcx, numLayers * hiddenSize * miniBatch * (bidirectional ? 2 : 1) * sizeof(float))); int ysize = seqLength * hiddenSize * miniBatch * (bidirectional ? 2 : 1); float y_train[ysize]; float y_test[ysize]; for(int j = 0; j < ysize; j++){ y_train[j] = (j % 10) + 20.5; } cudaErrCheck(cudaMalloc((void**)&y, seqLength * hiddenSize * miniBatch * (bidirectional ? 2 : 1) * sizeof(float))); cudaErrCheck(cudaMalloc((void**)&hy, numLayers * hiddenSize * miniBatch * (bidirectional ? 2 : 1) * sizeof(float))); cudaErrCheck(cudaMalloc((void**)&cy, numLayers * hiddenSize * miniBatch * (bidirectional ? 2 : 1) * sizeof(float))); cudaErrCheck(cudaMalloc((void**)&dy, seqLength * hiddenSize * miniBatch * (bidirectional ? 2 : 1) * sizeof(float))); cudaErrCheck(cudaMalloc((void**)&dhy, numLayers * hiddenSize * miniBatch * (bidirectional ? 2 : 1) * sizeof(float))); cudaErrCheck(cudaMalloc((void**)&dcy, numLayers * hiddenSize * miniBatch * (bidirectional ? 2 : 1) * sizeof(float))); // Set up tensor descriptors. x/y/dx/dy are arrays, one per time step. cudnnTensorDescriptor_t *xDesc, *yDesc, *dxDesc, *dyDesc; cudnnTensorDescriptor_t hxDesc, cxDesc; cudnnTensorDescriptor_t hyDesc, cyDesc; cudnnTensorDescriptor_t dhxDesc, dcxDesc; cudnnTensorDescriptor_t dhyDesc, dcyDesc; xDesc = (cudnnTensorDescriptor_t*)malloc(seqLength * sizeof(cudnnTensorDescriptor_t)); yDesc = (cudnnTensorDescriptor_t*)malloc(seqLength * sizeof(cudnnTensorDescriptor_t)); dxDesc = (cudnnTensorDescriptor_t*)malloc(seqLength * sizeof(cudnnTensorDescriptor_t)); dyDesc = (cudnnTensorDescriptor_t*)malloc(seqLength * sizeof(cudnnTensorDescriptor_t)); int dimA[3]; int strideA[3]; // In this example dimA[1] is constant across the whole sequence // This isn't required, all that is required is that it does not increase. for (int i = 0; i < seqLength; i++) { cudnnErrCheck(cudnnCreateTensorDescriptor(&xDesc[i])); cudnnErrCheck(cudnnCreateTensorDescriptor(&yDesc[i])); cudnnErrCheck(cudnnCreateTensorDescriptor(&dxDesc[i])); cudnnErrCheck(cudnnCreateTensorDescriptor(&dyDesc[i])); dimA[0] = miniBatch; dimA[1] = inputSize; dimA[2] = 1; strideA[0] = dimA[2] * dimA[1]; strideA[1] = dimA[2]; strideA[2] = 1; cudnnErrCheck(cudnnSetTensorNdDescriptor(xDesc[i], CUDNN_DATA_FLOAT, 3, dimA, strideA)); cudnnErrCheck(cudnnSetTensorNdDescriptor(dxDesc[i], CUDNN_DATA_FLOAT, 3, dimA, strideA)); dimA[0] = miniBatch; dimA[1] = bidirectional ? hiddenSize * 2 : hiddenSize; dimA[2] = 1; strideA[0] = dimA[2] * dimA[1]; strideA[1] = dimA[2]; strideA[2] = 1; cudnnErrCheck(cudnnSetTensorNdDescriptor(yDesc[i], CUDNN_DATA_FLOAT, 3, dimA, strideA)); cudnnErrCheck(cudnnSetTensorNdDescriptor(dyDesc[i], CUDNN_DATA_FLOAT, 3, dimA, strideA)); } dimA[0] = numLayers * (bidirectional ? 2 : 1); dimA[1] = miniBatch; dimA[2] = hiddenSize; strideA[0] = dimA[2] * dimA[1]; strideA[1] = dimA[2]; strideA[2] = 1; cudnnErrCheck(cudnnCreateTensorDescriptor(&hxDesc)); cudnnErrCheck(cudnnCreateTensorDescriptor(&cxDesc)); cudnnErrCheck(cudnnCreateTensorDescriptor(&hyDesc)); cudnnErrCheck(cudnnCreateTensorDescriptor(&cyDesc)); cudnnErrCheck(cudnnCreateTensorDescriptor(&dhxDesc)); cudnnErrCheck(cudnnCreateTensorDescriptor(&dcxDesc)); cudnnErrCheck(cudnnCreateTensorDescriptor(&dhyDesc)); cudnnErrCheck(cudnnCreateTensorDescriptor(&dcyDesc)); cudnnErrCheck(cudnnSetTensorNdDescriptor(hxDesc, CUDNN_DATA_FLOAT, 3, dimA, strideA)); cudnnErrCheck(cudnnSetTensorNdDescriptor(cxDesc, CUDNN_DATA_FLOAT, 3, dimA, strideA)); cudnnErrCheck(cudnnSetTensorNdDescriptor(hyDesc, CUDNN_DATA_FLOAT, 3, dimA, strideA)); cudnnErrCheck(cudnnSetTensorNdDescriptor(cyDesc, CUDNN_DATA_FLOAT, 3, dimA, strideA)); cudnnErrCheck(cudnnSetTensorNdDescriptor(dhxDesc, CUDNN_DATA_FLOAT, 3, dimA, strideA)); cudnnErrCheck(cudnnSetTensorNdDescriptor(dcxDesc, CUDNN_DATA_FLOAT, 3, dimA, strideA)); cudnnErrCheck(cudnnSetTensorNdDescriptor(dhyDesc, CUDNN_DATA_FLOAT, 3, dimA, strideA)); cudnnErrCheck(cudnnSetTensorNdDescriptor(dcyDesc, CUDNN_DATA_FLOAT, 3, dimA, strideA)); // ------------------------- // Set up the dropout descriptor (needed for the RNN descriptor) // ------------------------- unsigned long long seed = 1337ull; // Pick a seed. cudnnDropoutDescriptor_t dropoutDesc; cudnnErrCheck(cudnnCreateDropoutDescriptor(&dropoutDesc)); // How much memory does dropout need for states? // These states are used to generate random numbers internally // and should not be freed until the RNN descriptor is no longer used size_t stateSize; void *states; cudnnErrCheck(cudnnDropoutGetStatesSize(cudnnHandle, &stateSize)); cudaErrCheck(cudaMalloc(&states, stateSize)); cudnnErrCheck(cudnnSetDropoutDescriptor(dropoutDesc, cudnnHandle, dropout, states, stateSize, seed)); // ------------------------- // Set up the RNN descriptor // ------------------------- cudnnRNNDescriptor_t rnnDesc; cudnnRNNMode_t RNNMode; cudnnErrCheck(cudnnCreateRNNDescriptor(&rnnDesc)); if (mode == 0) RNNMode = CUDNN_RNN_RELU; else if (mode == 1) RNNMode = CUDNN_RNN_TANH; else if (mode == 2) RNNMode = CUDNN_LSTM; else if (mode == 3) RNNMode = CUDNN_GRU; cudnnErrCheck(cudnnSetRNNDescriptor(rnnDesc, hiddenSize, numLayers, dropoutDesc, CUDNN_LINEAR_INPUT, // We can also skip the input matrix transformation bidirectional ? CUDNN_BIDIRECTIONAL : CUDNN_UNIDIRECTIONAL, RNNMode, CUDNN_DATA_FLOAT)); // ------------------------- // Set up parameters // ------------------------- // This needs to be done after the rnn descriptor is set as otherwise // we don't know how many parameters we have to allocate void *w; void *dw; cudnnFilterDescriptor_t wDesc, dwDesc; cudnnErrCheck(cudnnCreateFilterDescriptor(&wDesc)); cudnnErrCheck(cudnnCreateFilterDescriptor(&dwDesc)); size_t weightsSize; cudnnErrCheck(cudnnGetRNNParamsSize(cudnnHandle, rnnDesc, xDesc[0], &weightsSize, CUDNN_DATA_FLOAT)); int dimW[3]; dimW[0] = weightsSize / sizeof(float); dimW[1] = 1; dimW[2] = 1; cudnnErrCheck(cudnnSetFilterNdDescriptor(wDesc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 3, dimW)); cudnnErrCheck(cudnnSetFilterNdDescriptor(dwDesc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 3, dimW)); cudaErrCheck(cudaMalloc((void**)&w, weightsSize)); cudaErrCheck(cudaMalloc((void**)&dw, weightsSize)); // ------------------------- // Set up work space and reserved memory // ------------------------- void *workspace; void *reserveSpace; size_t workSize; size_t reserveSize; // Need for every pass cudnnErrCheck(cudnnGetRNNWorkspaceSize(cudnnHandle, rnnDesc, seqLength, xDesc, &workSize)); // Only needed in training, shouldn't be touched between passes. cudnnErrCheck(cudnnGetRNNTrainingReserveSize(cudnnHandle, rnnDesc, seqLength, xDesc, &reserveSize)); cudaErrCheck(cudaMalloc((void**)&workspace, workSize)); cudaErrCheck(cudaMalloc((void**)&reserveSpace, reserveSize)); // ********************************************************************************************************* // Initialise weights and inputs // ********************************************************************************************************* // We initialise to something simple. // Matrices are initialised to 1 / matrixSize, biases to 1, data is 1. initGPUData((float*)x, seqLength * inputSize * miniBatch, 1.f); if (hx != NULL) initGPUData((float*)hx, numLayers * hiddenSize * miniBatch * (bidirectional ? 2 : 1), 1.f); if (cx != NULL) initGPUData((float*)cx, numLayers * hiddenSize * miniBatch * (bidirectional ? 2 : 1), 1.f); initGPUData((float*)dy, seqLength * hiddenSize * miniBatch * (bidirectional ? 2 : 1), 1.f); if (dhy != NULL) initGPUData((float*)dhy, numLayers * hiddenSize * miniBatch * (bidirectional ? 2 : 1), 1.f); if (dcy != NULL) initGPUData((float*)dcy, numLayers * hiddenSize * miniBatch * (bidirectional ? 2 : 1), 1.f); // Weights int numLinearLayers = 0; if (RNNMode == CUDNN_RNN_RELU || RNNMode == CUDNN_RNN_TANH) { numLinearLayers = 2; } else if (RNNMode == CUDNN_LSTM) { numLinearLayers = 8; } else if (RNNMode == CUDNN_GRU) { numLinearLayers = 6; } for (int layer = 0; layer < numLayers * (bidirectional ? 2 : 1); layer++) { for (int linLayerID = 0; linLayerID < numLinearLayers; linLayerID++) { cudnnFilterDescriptor_t linLayerMatDesc; cudnnErrCheck(cudnnCreateFilterDescriptor(&linLayerMatDesc)); float *linLayerMat; cudnnErrCheck(cudnnGetRNNLinLayerMatrixParams( cudnnHandle, rnnDesc, layer, xDesc[0], wDesc, w, linLayerID, linLayerMatDesc, (void**)&linLayerMat)); cudnnDataType_t dataType; cudnnTensorFormat_t format; int nbDims; int filterDimA[3]; cudnnErrCheck(cudnnGetFilterNdDescriptor(linLayerMatDesc, 3, &dataType, &format, &nbDims, filterDimA)); initGPUData(linLayerMat, filterDimA[0] * filterDimA[1] * filterDimA[2], 1.f / (float)(filterDimA[0] * filterDimA[1] * filterDimA[2])); cudnnErrCheck(cudnnDestroyFilterDescriptor(linLayerMatDesc)); cudnnFilterDescriptor_t linLayerBiasDesc; cudnnErrCheck(cudnnCreateFilterDescriptor(&linLayerBiasDesc)); float *linLayerBias; cudnnErrCheck(cudnnGetRNNLinLayerBiasParams( cudnnHandle, rnnDesc, layer, xDesc[0], wDesc, w, linLayerID, linLayerBiasDesc, (void**)&linLayerBias)); cudnnErrCheck(cudnnGetFilterNdDescriptor(linLayerBiasDesc, 3, &dataType, &format, &nbDims, filterDimA)); initGPUData(linLayerBias, filterDimA[0] * filterDimA[1] * filterDimA[2], 1.f); cudnnErrCheck(cudnnDestroyFilterDescriptor(linLayerBiasDesc)); } } // ********************************************************************************************************* // At this point all of the setup is done. We now need to pass through the RNN. // ********************************************************************************************************* // int alpha_param[1]; // alpha_param[0] = -0.05; // int beta_param[1]; // beta_param[0] = 1.0; float rval; cudaErrCheck(cudaDeviceSynchronize()); for(int i = 0; i < 300; i++){ cudaEvent_t start, stop; float timeForward, timeBackward1, timeBackward2; cudaErrCheck(cudaEventCreate(&start)); cudaErrCheck(cudaEventCreate(&stop)); cudaErrCheck(cudaEventRecord(start)); // If we're not training we use this instead // cudnnErrCheck(cudnnRNNForwardInference(cudnnHandle, // rnnDesc, // xDesc, // x, // hxDesc, // hx, // cxDesc, // cx, // wDesc, // w, // yDesc, // y, // hyDesc, // hy, // cyDesc, // cy, // workspace, // workSize)); cudnnErrCheck(cudnnRNNForwardTraining(cudnnHandle, rnnDesc, seqLength, xDesc, x, hxDesc, hx, cxDesc, cx, wDesc, w, yDesc, y, hyDesc, hy, cyDesc, cy, workspace, workSize, reserveSpace, reserveSize)); cudaErrCheck(cudaEventRecord(stop)); cudaErrCheck(cudaEventSynchronize(stop)); cudaErrCheck(cudaEventElapsedTime(&timeForward, start, stop)); cudaErrCheck(cudaEventRecord(start)); //Compute a dy cudaErrCheck(cudaMemcpy(y_test, y, ysize, cudaMemcpyDeviceToHost)); for(int j = 0; j < ysize; j++){ y_test[j] -= y_train[j]; } cudaErrCheck(cudaMemcpy(dy, y_test, ysize, cudaMemcpyHostToDevice)); //cudaErrCheck(cudaMemcpy(y, y_train, ysize, cudaMemcpyHostToDevice)); cudnnErrCheck(cudnnRNNBackwardData(cudnnHandle, rnnDesc, seqLength, yDesc, y, dyDesc, dy, dhyDesc, dhy, dcyDesc, dcy, wDesc, w, hxDesc, hx, cxDesc, cx, dxDesc, dx, dhxDesc, dhx, dcxDesc, dcx, workspace, workSize, reserveSpace, reserveSize )); cudaErrCheck(cudaEventRecord(stop)); cudaErrCheck(cudaEventSynchronize(stop)); cudaErrCheck(cudaEventElapsedTime(&timeBackward1, start, stop)); cudaErrCheck(cudaEventRecord(start)); // cudnnRNNBackwardWeights adds to the data in dw. cudaErrCheck(cudaMemset(dw, 0, weightsSize)); cudnnErrCheck(cudnnRNNBackwardWeights( cudnnHandle, rnnDesc, seqLength, xDesc, x, hxDesc, hx, yDesc, y, workspace, workSize, dwDesc, dw, reserveSpace, reserveSize )); cudaErrCheck(cudaEventSynchronize(stop)); cudaErrCheck(cudaEventRecord(stop)); cudaErrCheck(cudaEventSynchronize(stop)); cudaErrCheck(cudaEventElapsedTime(&timeBackward2, start, stop)); if (true) { float* testOutputdw; float testOutputw[weightsSize]; float* testY; testOutputdw = (float*)malloc(weightsSize); testY = (float*)malloc(ysize); cudaErrCheck(cudaMemcpy(testY, y, ysize, cudaMemcpyDeviceToHost)); cudaErrCheck(cudaMemcpy(testOutputdw, dw, weightsSize, cudaMemcpyDeviceToHost)); cudaErrCheck(cudaMemcpy(testOutputw, w, weightsSize, cudaMemcpyDeviceToHost)); free(testOutputdw); free(testY); } printf("Epoch %d \n",i); int numMats = 0; if (RNNMode == CUDNN_RNN_RELU || RNNMode == CUDNN_RNN_TANH) { numMats = 2; } else if (RNNMode == CUDNN_LSTM) { numMats = 8; } else if (RNNMode == CUDNN_GRU) { numMats = 6; } printf("Forward: %3.0f GFLOPS\n", numMats * 2ull * (bidirectional ? 2 : 1) * hiddenSize * hiddenSize * seqLength * miniBatch * numLayers / (1e6 * timeForward)); rval = numMats * 2ull * (bidirectional ? 2 : 1) * hiddenSize * hiddenSize * seqLength * miniBatch * numLayers / (1e6 * timeForward); } // Calculate FLOPS //sprintf(buffer, "Forward: %3.0f GFLOPS\n", numMats * 2ull * (bidirectional ? 2 : 1) * hiddenSize * hiddenSize * seqLength * miniBatch * numLayers / (1e6 * timeForward)); //cudaDeviceSynchronize(); // ********************************************************************************************************* // Print checksums. // ********************************************************************************************************* // Can these be passed back and saved? cudaFree(x); cudaFree(hx); cudaFree(cx); cudaFree(y); cudaFree(hy); cudaFree(cy); cudaFree(dx); cudaFree(dhx); cudaFree(dcx); cudaFree(dy); cudaFree(dhy); cudaFree(dcy); cudaFree(workspace); cudaFree(reserveSpace); cudaFree(w); cudaFree(dw); cudnnDestroy(cudnnHandle); return rval; } //extern "C" int cudamain() { return 0; }
f5b0a548a90722f5ddbd4c82a29285b368eb97d6.hip
// !!! This is a file automatically generated by hipify!!! #include "HostImporter.h" #include "seba-video/debug_def.h" namespace seba { HostImporter::HostImporter( sebaSurfaceFormat_t surfaceFmt, unsigned maxWidth, unsigned maxHeight) : Component(surfaceFmt, seba::Packed, maxWidth, maxHeight) { } HostImporter::~HostImporter() { } void HostImporter::Copy( void* h_src, unsigned width, unsigned pitch, unsigned height ) { auto &info = GetOutputSurface().GetInfo(); // I'm unsure what's fastvideo behavior otherwise unsigned byteWidth = width * SurfacePixelByteSize((sebaSurfaceFormat_t)info.surfaceFmt); INSIST(width <= info.maxWidth); INSIST(height <= info.maxHeight); INSIST(pitch <= info.maxPitch); INSIST(byteWidth <= pitch); info.width = width; info.height = height; // Don't handle the case when src and dst got diffent pitch for now info.pitch = pitch; hipMemcpy2DAsync( (void *)GetOutputSurface().GetDevicePtr(), pitch, h_src, info.pitch, byteWidth, height, hipMemcpyHostToDevice ); } }
f5b0a548a90722f5ddbd4c82a29285b368eb97d6.cu
#include "HostImporter.h" #include "seba-video/debug_def.h" namespace seba { HostImporter::HostImporter( sebaSurfaceFormat_t surfaceFmt, unsigned maxWidth, unsigned maxHeight) : Component(surfaceFmt, seba::Packed, maxWidth, maxHeight) { } HostImporter::~HostImporter() { } void HostImporter::Copy( void* h_src, unsigned width, unsigned pitch, unsigned height ) { auto &info = GetOutputSurface().GetInfo(); // I'm unsure what's fastvideo behavior otherwise unsigned byteWidth = width * SurfacePixelByteSize((sebaSurfaceFormat_t)info.surfaceFmt); INSIST(width <= info.maxWidth); INSIST(height <= info.maxHeight); INSIST(pitch <= info.maxPitch); INSIST(byteWidth <= pitch); info.width = width; info.height = height; // Don't handle the case when src and dst got diffent pitch for now info.pitch = pitch; cudaMemcpy2DAsync( (void *)GetOutputSurface().GetDevicePtr(), pitch, h_src, info.pitch, byteWidth, height, cudaMemcpyHostToDevice ); } }
5e0e4a16d06fccc794e265e53f33012afac33345.hip
// !!! This is a file automatically generated by hipify!!! /*************************************************************************************************** * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /* \file \brief Execution environment */ #include <iostream> #include <stdexcept> #include <iomanip> #include <ios> #include "cutlass/core_io.h" #include "cublas_helpers.h" #include "rank_k_operation_profiler.h" #include "gpu_timer.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace profiler { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Ctor RankKOperationProfiler::RankKOperationProfiler(Options const &options): OperationProfiler( options, library::OperationKind::kRankK, { {ArgumentTypeID::kEnumerated, {"rank_k_kind"}, "Variant of RankK (universal)"}, {ArgumentTypeID::kInteger, {"n", "problem-size::n"}, "N dimension of the RankK problem space"}, {ArgumentTypeID::kInteger, {"k", "problem-size::k"}, "K dimension of the RankK problem space"}, {ArgumentTypeID::kTensor, {"A"}, "Tensor storing the A operand"}, {ArgumentTypeID::kTensor, {"C"}, "Tensor storing the C operand"}, {ArgumentTypeID::kEnumerated, {"fill_mode"}, "Fill Mode for RankK kernel (lower or upper)"}, {ArgumentTypeID::kEnumerated, {"blas_mode"}, "Blas Mode for RankK kernel (symmetric or hermitian)"}, {ArgumentTypeID::kScalar, {"alpha", "epilogue::alpha"}, "Epilogue scalar alpha"}, {ArgumentTypeID::kScalar, {"beta", "epilogue::beta"}, "Epilogue scalar beta"}, {ArgumentTypeID::kInteger, {"split_k_slices", "split-k-slices"}, "Number of partitions of K dimension"}, {ArgumentTypeID::kInteger, {"batch_count", "batch-count"}, "Number of RankK computed in one batch"}, }, { library::Provider::kCUBLAS} ) { description_ = " Rank-k Update. D = alpha * A*A^T + beta * C (symmetric) or D = alpha * A*A^H + beta * C (hermitian)"; } /// Destructor RankKOperationProfiler::~RankKOperationProfiler() { } /// Prints usage statement for the math function void RankKOperationProfiler::print_usage(std::ostream &out) const { out << "RankK" << "\n\n"; OperationProfiler::print_usage(out); } /// Prints examples void RankKOperationProfiler::print_examples(std::ostream &out) const { out << "\nExamples:\n\n" << "Profile a particular problem size Syrk kernel:\n" << " $ cutlass_profiler --operation=rank_k --blas_mode=symmetric --n=1024 --k=128\n\n" << "Profile a particular problem size Herk kernel:\n" << " $ cutlass_profiler --operation=rank_k --blas_mode=hermitian --n=1024 --k=128\n\n" << "Schmoo over problem size and beta:\n" << " $ cutlass_profiler --operation=rank_k --n=1024:4096:256 --k=128:8192:128 --beta=0,1,2.5\n\n" << "Schmoo over accumulator types:\n" << " $ cutlass_profiler --operation=rank_k --accumulator-type=f16,f32\n\n" << "Schmoo over fill modees:\n" << " $ cutlass_profiler --operation=rank_k --fill_mode=lower/upper\n\n" << "Run when A is f16 with column-major or A is any datatype with row-major (For column major, use column, col, or n. For row major use, row or t):\n" << " $ cutlass_profiler --operation=rank_k --A=f16:column or --A=*:row\n\n" << "Using various input value distribution:\n" << " $ cutlass_profiler --operation=rank_k --dist=uniform,min:0,max:3\n" << " $ cutlass_profiler --operation=rank_k --dist=gaussian,mean:0,stddev:3\n" << " $ cutlass_profiler --operation=rank_k --dist=sequential,start:0,delta:1\n\n" << "Run a kernel with cta tile size of 256x128x32 and save workspace if results are incorrect (note that --cta-tile::k=32 is default cta-tile size):\n" << " $ cutlass_profiler --operation=rank_k --cta_m=256 --cta_n=128 --cta_k=32 --save-workspace=incorrect\n\n" << "Test your changes to rank_k kernels with a quick functional test and save results in functional-test.csv:\n" << " $ cutlass_profiler --operation=rank_k \\ \n" << " --n=8,56,120,136,256,264,512,520,1024,1032,4096,8192,16384 \\ \n" << " --k=8,16,32,64,128,256,288,384,504,512,520 \\ \n" << " --beta=0,1,2 --profiling-iterations=1 \\ \n" << " --providers=cutlass --output=functional-test.csv\n\n"; } ///////////////////////////////////////////////////////////////////////////////////////////////// #if 0 // used this for debugging static std::string byte_string(std::vector<uint8_t> const &bytes) { std::stringstream ss; ss << "0x"; for (size_t idx = bytes.size(); idx > 0; --idx) { ss << std::hex << std::setw(2) << std::setfill('0') << uint32_t(bytes.at(idx - 1)); } return ss.str(); } #endif Status RankKOperationProfiler::RankKProblem::parse( library::RankKDescription const &operation_desc, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem) { if (!arg_as_int(this->n, "n", problem_space, problem)) { // default value this->n = 1024; } if (!arg_as_int(this->k, "k", problem_space, problem)) { // default value this->k = 1024; } if (!arg_as_int(this->split_k_slices, "split_k_slices", problem_space, problem)) { // default value this->split_k_slices = 1; } if (!arg_as_int(this->batch_count, "batch_count", problem_space, problem)) { // default value this->batch_count = 1; } if (this->split_k_slices > 1 && this->batch_count > 1) { // At least one of these must be one return Status::kErrorInvalidProblem; } if (!tensor_description_satisfies(operation_desc.A, "A", problem_space, problem)) { return Status::kErrorInvalidProblem; } if (!tensor_description_satisfies(operation_desc.C, "C", problem_space, problem)) { return Status::kErrorInvalidProblem; } if (!arg_as_scalar( this->alpha, operation_desc.element_epilogue, "alpha", problem_space, problem)) { if (!cast_from_double(this->alpha, operation_desc.element_epilogue, 1)) { return Status::kErrorInternal; } } if (!arg_as_scalar( this->beta, operation_desc.element_epilogue, "beta", problem_space, problem)) { if (!cast_from_double(this->beta, operation_desc.element_epilogue, 0)) { return Status::kErrorInternal; } } this->lda = DeviceAllocation::get_packed_layout( operation_desc.A.layout, {int(this->n), int(this->k)}).front(); this->ldc = DeviceAllocation::get_packed_layout( operation_desc.C.layout, {int(this->n), int(this->n)}).front(); return Status::kSuccess; } /// Total number of bytes loaded int64_t RankKOperationProfiler::RankKProblem::bytes(library::RankKDescription const &operation_desc) const { // Input bytes read and Output bytes written for the gemm problem int64_t bytes = int64_t(library::sizeof_bits(operation_desc.A.element) * n / 8) * k + int64_t(library::sizeof_bits(operation_desc.A.element) * n / 8) * k + // Half matrix including the diagonal will have (N*(N+1))/2 elements int64_t(library::sizeof_bits(operation_desc.C.element) * n / 8) * (n+1) / 2; // Set is_beta_zero true if beta is zero bool is_beta_zero = std::all_of(beta.begin(), beta.end(), [](uint8_t i) { return i==0; }); // Output bytes read for the gemm problem for non-zero beta values if (!is_beta_zero) { bytes += int64_t(library::sizeof_bits(operation_desc.C.element) * n / 8) * (n+1) / 2; } bytes *= batch_count; return bytes; } /// Total number of flops computed int64_t RankKOperationProfiler::RankKProblem::flops(library::RankKDescription const &operation_desc) const { // FLOPs = 2 * n(n+1)k/2 [mma] + 2 * n(n+1)/2 [epilogue] // FLOPs = n(n+1)(k + 1) int64_t flops_ = n * (n + 1) * (k + 1); // complex-valued support switch (operation_desc.tile_description.math_instruction.math_operation) { case library::MathOperationID::kMultiplyAddComplex: flops_ *= 4; break; case library::MathOperationID::kMultiplyAddComplexFastF32: flops_ *= 4; break; case library::MathOperationID::kMultiplyAddGaussianComplex: flops_ *= 3; break; default: break; } return flops_; } /// Initializes a performance result void RankKOperationProfiler::RankKProblem::initialize_result( PerformanceResult &result, library::RankKDescription const &operation_desc, ProblemSpace const &problem_space) { result.arguments.resize(problem_space.rank()); set_argument(result, "rank_k_kind", problem_space, library::to_string(operation_desc.rank_k_kind)); set_argument(result, "A", problem_space, std::string(library::to_string(operation_desc.A.element)) + ":" + library::to_string(operation_desc.A.layout)); set_argument(result, "C", problem_space, std::string(library::to_string(operation_desc.C.element)) + ":" + library::to_string(operation_desc.C.layout)); set_argument(result, "fill_mode", problem_space, library::to_string(operation_desc.fill_mode)); set_argument(result, "blas_mode", problem_space, library::to_string(operation_desc.blas_mode)); set_argument(result, "n", problem_space, n); set_argument(result, "k", problem_space, k); set_argument(result, "split_k_slices", problem_space, split_k_slices); set_argument(result, "batch_count", problem_space, batch_count); set_argument(result, "alpha", problem_space, library::lexical_cast(alpha, operation_desc.element_epilogue)); set_argument(result, "beta", problem_space, library::lexical_cast(beta, operation_desc.element_epilogue)); } ///////////////////////////////////////////////////////////////////////////////////////////////// /// Extracts the problem dimensions Status RankKOperationProfiler::initialize_configuration( Options const &options, PerformanceReport &report, DeviceContext &device_context, library::Operation const *operation, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem) { library::RankKDescription const &operation_desc = static_cast<library::RankKDescription const &>(operation->description()); if (operation_desc.rank_k_kind != library::RankKKind::kUniversal) { return Status::kErrorInvalidProblem; } Status status = problem_.parse(operation_desc, problem_space, problem); if (status != Status::kSuccess) { return status; } rank_k_workspace_.configuration.problem_size.m() = int(problem_.n); rank_k_workspace_.configuration.problem_size.n() = int(problem_.n); rank_k_workspace_.configuration.problem_size.k() = int(problem_.k); rank_k_workspace_.configuration.lda = problem_.lda; rank_k_workspace_.configuration.ldc = problem_.ldc; rank_k_workspace_.configuration.ldd = problem_.ldc; //rank_k_workspace_.configuration.split_k_slices = int(problem_.split_k_slices); rank_k_workspace_.configuration.batch_count = int(problem_.split_k_slices); rank_k_workspace_.arguments.A = nullptr; rank_k_workspace_.arguments.C = nullptr; rank_k_workspace_.arguments.D = nullptr; rank_k_workspace_.arguments.alpha = problem_.alpha.data(); rank_k_workspace_.arguments.beta = problem_.beta.data(); rank_k_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost; initialize_result_(this->model_result_, options, operation_desc, problem_space); return operation->can_implement(&rank_k_workspace_.configuration, &rank_k_workspace_.arguments); } /// Initializes the performance result void RankKOperationProfiler::initialize_result_( PerformanceResult &result, Options const &options, library::RankKDescription const &operation_desc, ProblemSpace const &problem_space) { result.provider = library::Provider::kCUTLASS; result.disposition = Disposition::kNotRun; result.status = Status::kSuccess; result.operation_name = operation_desc.name; problem_.initialize_result(result, operation_desc, problem_space); OperationProfiler::initialize_result_(result, operation_desc, problem_space); result.bytes = problem_.bytes(operation_desc); result.flops = problem_.flops(operation_desc); result.runtime = 0; // complex-valued support switch (operation_desc.tile_description.math_instruction.math_operation) { case library::MathOperationID::kMultiplyAddComplex: result.flops *= 4; break; case library::MathOperationID::kMultiplyAddComplexFastF32: result.flops *= 4; break; default: break; } } /// Initializes workspace Status RankKOperationProfiler::initialize_workspace( Options const &options, PerformanceReport &report, DeviceContext &device_context, library::Operation const *operation, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem) { library::RankKDescription const &operation_desc = static_cast<library::RankKDescription const &>(operation->description()); if (options.execution_mode != ExecutionMode::kDryRun) { int seed_shift = 0; rank_k_workspace_.A = device_context.allocate_tensor( options, "A", operation_desc.A.element, operation_desc.A.layout, {int(problem_.n), int(problem_.k)}, {int(problem_.lda)}, 1, // batch_count seed_shift++ ); rank_k_workspace_.C = device_context.allocate_tensor( options, "C", operation_desc.C.element, operation_desc.C.layout, {int(problem_.n), int(problem_.n)}, {int(problem_.ldc)}, 1, // batch_count seed_shift++ ); rank_k_workspace_.Computed = device_context.allocate_tensor( "D", operation_desc.C.element, operation_desc.C.layout, {int(problem_.n), int(problem_.n)}, {int(problem_.ldc)} ); rank_k_workspace_.Reference = device_context.allocate_tensor( "Reference", operation_desc.C.element, operation_desc.C.layout, {int(problem_.n), int(problem_.n)}, {int(problem_.ldc)} ); rank_k_workspace_.Computed->copy_from_device(rank_k_workspace_.C->data()); rank_k_workspace_.Reference->copy_from_device(rank_k_workspace_.C->data()); } // // Initialize the CUTLASS operation // Status status = Status::kSuccess; if (options.profiling.provider_enabled(library::Provider::kCUTLASS)) { if (options.execution_mode != ExecutionMode::kDryRun) { uint64_t workspace_size = operation->get_host_workspace_size(&rank_k_workspace_.configuration); rank_k_workspace_.host_workspace.resize(workspace_size, 0); workspace_size = operation->get_device_workspace_size(&rank_k_workspace_.configuration); rank_k_workspace_.device_workspace.reset(library::NumericTypeID::kU8, workspace_size); status = operation->initialize( &rank_k_workspace_.configuration, rank_k_workspace_.host_workspace.data(), rank_k_workspace_.device_workspace.data()); } // // If CUTLASS is enabled, generate a result for it // results_.push_back(model_result_); results_.back().provider = library::Provider::kCUTLASS; results_.back().op_kind = library::OperationKind::kRankK; results_.back().disposition = Disposition::kNotRun; for(auto provider : verification_providers_) { results_.back().verification_map[provider] = Disposition::kNotRun; } } return status; } ///////////////////////////////////////////////////////////////////////////////////////////////// /// Verifies CUTLASS against references bool RankKOperationProfiler::verify_cutlass( Options const &options, PerformanceReport &report, DeviceContext &device_context, library::Operation const *operation, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem) { if (!options.profiling.provider_enabled(library::Provider::kCUTLASS)) { return true; } if (options.execution_mode == ExecutionMode::kDryRun) { return true; } // Initialize structure containing RankK arguments rank_k_workspace_.arguments.A = rank_k_workspace_.A->data(); rank_k_workspace_.arguments.C = rank_k_workspace_.C->data(); rank_k_workspace_.arguments.D = rank_k_workspace_.Computed->data(); rank_k_workspace_.arguments.alpha = problem_.alpha.data(); rank_k_workspace_.arguments.beta = problem_.beta.data(); rank_k_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost; // // Run the CUTLASS operation // results_.back().status = operation->run( &rank_k_workspace_.arguments, rank_k_workspace_.host_workspace.data(), rank_k_workspace_.device_workspace.data()); if (results_.back().status != Status::kSuccess) { results_.back().disposition = Disposition::kFailed; return false; } hipError_t result = hipDeviceSynchronize(); if (result != hipSuccess) { results_.back().disposition = Disposition::kFailed; return false; } // CUTLASS op ran the but not yet verified against any verification provider results_.back().disposition = Disposition::kNotVerified; // // Run verification providers // if (options.verification.enabled) { #if CUTLASS_ENABLE_CUBLAS if (options.verification.provider_enabled(library::Provider::kCUBLAS)) { // Guard against unsupported cases auto const & rank_k_desc = static_cast<library::RankKDescription const &>(operation->description()); if (cublas_satisfies(rank_k_desc) == Status::kSuccess) { // call cublas verification if supported verify_with_cublas_( options, report, device_context, operation, problem_space, problem); } else { // set verification map for cublas to not supported results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kNotSupported; } } #endif // #if CUTLASS_ENABLE_CUBLAS // Update disposition to worst case verification outcome among all // verification providers which are supported bool is_any_verification_run_passed = false; for(auto &m : results_.back().verification_map) { if(m.second == Disposition::kFailed || m.second == Disposition::kIncorrect) { results_.back().disposition = m.second; return true; } if(!is_any_verification_run_passed && m.second == Disposition::kPassed) { is_any_verification_run_passed = true; } } if(is_any_verification_run_passed) { results_.back().disposition = Disposition::kPassed; } } // Return true means continue profiling return true; } /////////////////////////////////////////////////////////////////////////////////////////////////// /// Verifies CUTLASS against references bool RankKOperationProfiler::verify_with_cublas_( Options const &options, PerformanceReport &report, DeviceContext &device_context, library::Operation const *operation, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem) { #if CUTLASS_ENABLE_CUBLAS library::RankKDescription const &rank_k_desc = static_cast<library::RankKDescription const &>(operation->description()); // // Construct cuBLAS operators // CublasCreate handle; hipblasStatus_t status = handle.get_cublas_create_status(); if (status != HIPBLAS_STATUS_SUCCESS) { results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kFailed; return true; } // // Initialize state // try { // // Construct dispatcher to cublas<t>Syrk() // // Initialize structure containing RankK arguments rank_k_workspace_.arguments.A = rank_k_workspace_.A->data(); rank_k_workspace_.arguments.C = rank_k_workspace_.Reference->data(); rank_k_workspace_.arguments.D = rank_k_workspace_.Reference->data(); rank_k_workspace_.arguments.alpha = problem_.alpha.data(); rank_k_workspace_.arguments.beta = problem_.beta.data(); rank_k_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost; detail::cublasRankKDispatcher rank_k_op( rank_k_desc, rank_k_workspace_.configuration, rank_k_workspace_.arguments ); if (rank_k_op.status != Status::kSuccess) { results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kNotRun; return true; } results_.back().status = Status::kSuccess; status = rank_k_op(handle); // Handle errors if (status != HIPBLAS_STATUS_SUCCESS) { results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kFailed; return true; } // // Verify results // results_.back().verification_map[library::Provider::kCUBLAS] = compare_tensors( options, *rank_k_workspace_.Computed, *rank_k_workspace_.Reference ); // Save workspace if incorrect if (options.verification.save_workspace == SaveWorkspace::kIncorrect && results_.back().verification_map[library::Provider::kCUBLAS] == Disposition::kIncorrect) { save_workspace( device_context, options, rank_k_desc, library::Provider::kCUTLASS, library::Provider::kCUBLAS); } } catch (...) { results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kFailed; } #endif // Return true means continue profiling return true; } ///////////////////////////////////////////////////////////////////////////////////////////////// /// Measures performance results bool RankKOperationProfiler::profile( Options const &options, PerformanceReport &report, DeviceContext &device_context, library::Operation const *operation, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem) { if (options.profiling.provider_enabled(library::Provider::kCUTLASS)) { // Initialize structure containing RankK arguments rank_k_workspace_.arguments.A = rank_k_workspace_.A->data(); rank_k_workspace_.arguments.C = rank_k_workspace_.C->data(); rank_k_workspace_.arguments.D = rank_k_workspace_.Computed->data(); rank_k_workspace_.arguments.alpha = problem_.alpha.data(); rank_k_workspace_.arguments.beta = problem_.beta.data(); rank_k_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost; results_.back().status = profile_cutlass_( results_.back().runtime, options, operation, &rank_k_workspace_.arguments, rank_k_workspace_.host_workspace.data(), rank_k_workspace_.device_workspace.data() ); } return true; } ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace profiler } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
5e0e4a16d06fccc794e265e53f33012afac33345.cu
/*************************************************************************************************** * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /* \file \brief Execution environment */ #include <iostream> #include <stdexcept> #include <iomanip> #include <ios> #include "cutlass/core_io.h" #include "cublas_helpers.h" #include "rank_k_operation_profiler.h" #include "gpu_timer.h" ///////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace profiler { ///////////////////////////////////////////////////////////////////////////////////////////////// /// Ctor RankKOperationProfiler::RankKOperationProfiler(Options const &options): OperationProfiler( options, library::OperationKind::kRankK, { {ArgumentTypeID::kEnumerated, {"rank_k_kind"}, "Variant of RankK (universal)"}, {ArgumentTypeID::kInteger, {"n", "problem-size::n"}, "N dimension of the RankK problem space"}, {ArgumentTypeID::kInteger, {"k", "problem-size::k"}, "K dimension of the RankK problem space"}, {ArgumentTypeID::kTensor, {"A"}, "Tensor storing the A operand"}, {ArgumentTypeID::kTensor, {"C"}, "Tensor storing the C operand"}, {ArgumentTypeID::kEnumerated, {"fill_mode"}, "Fill Mode for RankK kernel (lower or upper)"}, {ArgumentTypeID::kEnumerated, {"blas_mode"}, "Blas Mode for RankK kernel (symmetric or hermitian)"}, {ArgumentTypeID::kScalar, {"alpha", "epilogue::alpha"}, "Epilogue scalar alpha"}, {ArgumentTypeID::kScalar, {"beta", "epilogue::beta"}, "Epilogue scalar beta"}, {ArgumentTypeID::kInteger, {"split_k_slices", "split-k-slices"}, "Number of partitions of K dimension"}, {ArgumentTypeID::kInteger, {"batch_count", "batch-count"}, "Number of RankK computed in one batch"}, }, { library::Provider::kCUBLAS} ) { description_ = " Rank-k Update. D = alpha * A*A^T + beta * C (symmetric) or D = alpha * A*A^H + beta * C (hermitian)"; } /// Destructor RankKOperationProfiler::~RankKOperationProfiler() { } /// Prints usage statement for the math function void RankKOperationProfiler::print_usage(std::ostream &out) const { out << "RankK" << "\n\n"; OperationProfiler::print_usage(out); } /// Prints examples void RankKOperationProfiler::print_examples(std::ostream &out) const { out << "\nExamples:\n\n" << "Profile a particular problem size Syrk kernel:\n" << " $ cutlass_profiler --operation=rank_k --blas_mode=symmetric --n=1024 --k=128\n\n" << "Profile a particular problem size Herk kernel:\n" << " $ cutlass_profiler --operation=rank_k --blas_mode=hermitian --n=1024 --k=128\n\n" << "Schmoo over problem size and beta:\n" << " $ cutlass_profiler --operation=rank_k --n=1024:4096:256 --k=128:8192:128 --beta=0,1,2.5\n\n" << "Schmoo over accumulator types:\n" << " $ cutlass_profiler --operation=rank_k --accumulator-type=f16,f32\n\n" << "Schmoo over fill modees:\n" << " $ cutlass_profiler --operation=rank_k --fill_mode=lower/upper\n\n" << "Run when A is f16 with column-major or A is any datatype with row-major (For column major, use column, col, or n. For row major use, row or t):\n" << " $ cutlass_profiler --operation=rank_k --A=f16:column or --A=*:row\n\n" << "Using various input value distribution:\n" << " $ cutlass_profiler --operation=rank_k --dist=uniform,min:0,max:3\n" << " $ cutlass_profiler --operation=rank_k --dist=gaussian,mean:0,stddev:3\n" << " $ cutlass_profiler --operation=rank_k --dist=sequential,start:0,delta:1\n\n" << "Run a kernel with cta tile size of 256x128x32 and save workspace if results are incorrect (note that --cta-tile::k=32 is default cta-tile size):\n" << " $ cutlass_profiler --operation=rank_k --cta_m=256 --cta_n=128 --cta_k=32 --save-workspace=incorrect\n\n" << "Test your changes to rank_k kernels with a quick functional test and save results in functional-test.csv:\n" << " $ cutlass_profiler --operation=rank_k \\ \n" << " --n=8,56,120,136,256,264,512,520,1024,1032,4096,8192,16384 \\ \n" << " --k=8,16,32,64,128,256,288,384,504,512,520 \\ \n" << " --beta=0,1,2 --profiling-iterations=1 \\ \n" << " --providers=cutlass --output=functional-test.csv\n\n"; } ///////////////////////////////////////////////////////////////////////////////////////////////// #if 0 // used this for debugging static std::string byte_string(std::vector<uint8_t> const &bytes) { std::stringstream ss; ss << "0x"; for (size_t idx = bytes.size(); idx > 0; --idx) { ss << std::hex << std::setw(2) << std::setfill('0') << uint32_t(bytes.at(idx - 1)); } return ss.str(); } #endif Status RankKOperationProfiler::RankKProblem::parse( library::RankKDescription const &operation_desc, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem) { if (!arg_as_int(this->n, "n", problem_space, problem)) { // default value this->n = 1024; } if (!arg_as_int(this->k, "k", problem_space, problem)) { // default value this->k = 1024; } if (!arg_as_int(this->split_k_slices, "split_k_slices", problem_space, problem)) { // default value this->split_k_slices = 1; } if (!arg_as_int(this->batch_count, "batch_count", problem_space, problem)) { // default value this->batch_count = 1; } if (this->split_k_slices > 1 && this->batch_count > 1) { // At least one of these must be one return Status::kErrorInvalidProblem; } if (!tensor_description_satisfies(operation_desc.A, "A", problem_space, problem)) { return Status::kErrorInvalidProblem; } if (!tensor_description_satisfies(operation_desc.C, "C", problem_space, problem)) { return Status::kErrorInvalidProblem; } if (!arg_as_scalar( this->alpha, operation_desc.element_epilogue, "alpha", problem_space, problem)) { if (!cast_from_double(this->alpha, operation_desc.element_epilogue, 1)) { return Status::kErrorInternal; } } if (!arg_as_scalar( this->beta, operation_desc.element_epilogue, "beta", problem_space, problem)) { if (!cast_from_double(this->beta, operation_desc.element_epilogue, 0)) { return Status::kErrorInternal; } } this->lda = DeviceAllocation::get_packed_layout( operation_desc.A.layout, {int(this->n), int(this->k)}).front(); this->ldc = DeviceAllocation::get_packed_layout( operation_desc.C.layout, {int(this->n), int(this->n)}).front(); return Status::kSuccess; } /// Total number of bytes loaded int64_t RankKOperationProfiler::RankKProblem::bytes(library::RankKDescription const &operation_desc) const { // Input bytes read and Output bytes written for the gemm problem int64_t bytes = int64_t(library::sizeof_bits(operation_desc.A.element) * n / 8) * k + int64_t(library::sizeof_bits(operation_desc.A.element) * n / 8) * k + // Half matrix including the diagonal will have (N*(N+1))/2 elements int64_t(library::sizeof_bits(operation_desc.C.element) * n / 8) * (n+1) / 2; // Set is_beta_zero true if beta is zero bool is_beta_zero = std::all_of(beta.begin(), beta.end(), [](uint8_t i) { return i==0; }); // Output bytes read for the gemm problem for non-zero beta values if (!is_beta_zero) { bytes += int64_t(library::sizeof_bits(operation_desc.C.element) * n / 8) * (n+1) / 2; } bytes *= batch_count; return bytes; } /// Total number of flops computed int64_t RankKOperationProfiler::RankKProblem::flops(library::RankKDescription const &operation_desc) const { // FLOPs = 2 * n(n+1)k/2 [mma] + 2 * n(n+1)/2 [epilogue] // FLOPs = n(n+1)(k + 1) int64_t flops_ = n * (n + 1) * (k + 1); // complex-valued support switch (operation_desc.tile_description.math_instruction.math_operation) { case library::MathOperationID::kMultiplyAddComplex: flops_ *= 4; break; case library::MathOperationID::kMultiplyAddComplexFastF32: flops_ *= 4; break; case library::MathOperationID::kMultiplyAddGaussianComplex: flops_ *= 3; break; default: break; } return flops_; } /// Initializes a performance result void RankKOperationProfiler::RankKProblem::initialize_result( PerformanceResult &result, library::RankKDescription const &operation_desc, ProblemSpace const &problem_space) { result.arguments.resize(problem_space.rank()); set_argument(result, "rank_k_kind", problem_space, library::to_string(operation_desc.rank_k_kind)); set_argument(result, "A", problem_space, std::string(library::to_string(operation_desc.A.element)) + ":" + library::to_string(operation_desc.A.layout)); set_argument(result, "C", problem_space, std::string(library::to_string(operation_desc.C.element)) + ":" + library::to_string(operation_desc.C.layout)); set_argument(result, "fill_mode", problem_space, library::to_string(operation_desc.fill_mode)); set_argument(result, "blas_mode", problem_space, library::to_string(operation_desc.blas_mode)); set_argument(result, "n", problem_space, n); set_argument(result, "k", problem_space, k); set_argument(result, "split_k_slices", problem_space, split_k_slices); set_argument(result, "batch_count", problem_space, batch_count); set_argument(result, "alpha", problem_space, library::lexical_cast(alpha, operation_desc.element_epilogue)); set_argument(result, "beta", problem_space, library::lexical_cast(beta, operation_desc.element_epilogue)); } ///////////////////////////////////////////////////////////////////////////////////////////////// /// Extracts the problem dimensions Status RankKOperationProfiler::initialize_configuration( Options const &options, PerformanceReport &report, DeviceContext &device_context, library::Operation const *operation, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem) { library::RankKDescription const &operation_desc = static_cast<library::RankKDescription const &>(operation->description()); if (operation_desc.rank_k_kind != library::RankKKind::kUniversal) { return Status::kErrorInvalidProblem; } Status status = problem_.parse(operation_desc, problem_space, problem); if (status != Status::kSuccess) { return status; } rank_k_workspace_.configuration.problem_size.m() = int(problem_.n); rank_k_workspace_.configuration.problem_size.n() = int(problem_.n); rank_k_workspace_.configuration.problem_size.k() = int(problem_.k); rank_k_workspace_.configuration.lda = problem_.lda; rank_k_workspace_.configuration.ldc = problem_.ldc; rank_k_workspace_.configuration.ldd = problem_.ldc; //rank_k_workspace_.configuration.split_k_slices = int(problem_.split_k_slices); rank_k_workspace_.configuration.batch_count = int(problem_.split_k_slices); rank_k_workspace_.arguments.A = nullptr; rank_k_workspace_.arguments.C = nullptr; rank_k_workspace_.arguments.D = nullptr; rank_k_workspace_.arguments.alpha = problem_.alpha.data(); rank_k_workspace_.arguments.beta = problem_.beta.data(); rank_k_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost; initialize_result_(this->model_result_, options, operation_desc, problem_space); return operation->can_implement(&rank_k_workspace_.configuration, &rank_k_workspace_.arguments); } /// Initializes the performance result void RankKOperationProfiler::initialize_result_( PerformanceResult &result, Options const &options, library::RankKDescription const &operation_desc, ProblemSpace const &problem_space) { result.provider = library::Provider::kCUTLASS; result.disposition = Disposition::kNotRun; result.status = Status::kSuccess; result.operation_name = operation_desc.name; problem_.initialize_result(result, operation_desc, problem_space); OperationProfiler::initialize_result_(result, operation_desc, problem_space); result.bytes = problem_.bytes(operation_desc); result.flops = problem_.flops(operation_desc); result.runtime = 0; // complex-valued support switch (operation_desc.tile_description.math_instruction.math_operation) { case library::MathOperationID::kMultiplyAddComplex: result.flops *= 4; break; case library::MathOperationID::kMultiplyAddComplexFastF32: result.flops *= 4; break; default: break; } } /// Initializes workspace Status RankKOperationProfiler::initialize_workspace( Options const &options, PerformanceReport &report, DeviceContext &device_context, library::Operation const *operation, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem) { library::RankKDescription const &operation_desc = static_cast<library::RankKDescription const &>(operation->description()); if (options.execution_mode != ExecutionMode::kDryRun) { int seed_shift = 0; rank_k_workspace_.A = device_context.allocate_tensor( options, "A", operation_desc.A.element, operation_desc.A.layout, {int(problem_.n), int(problem_.k)}, {int(problem_.lda)}, 1, // batch_count seed_shift++ ); rank_k_workspace_.C = device_context.allocate_tensor( options, "C", operation_desc.C.element, operation_desc.C.layout, {int(problem_.n), int(problem_.n)}, {int(problem_.ldc)}, 1, // batch_count seed_shift++ ); rank_k_workspace_.Computed = device_context.allocate_tensor( "D", operation_desc.C.element, operation_desc.C.layout, {int(problem_.n), int(problem_.n)}, {int(problem_.ldc)} ); rank_k_workspace_.Reference = device_context.allocate_tensor( "Reference", operation_desc.C.element, operation_desc.C.layout, {int(problem_.n), int(problem_.n)}, {int(problem_.ldc)} ); rank_k_workspace_.Computed->copy_from_device(rank_k_workspace_.C->data()); rank_k_workspace_.Reference->copy_from_device(rank_k_workspace_.C->data()); } // // Initialize the CUTLASS operation // Status status = Status::kSuccess; if (options.profiling.provider_enabled(library::Provider::kCUTLASS)) { if (options.execution_mode != ExecutionMode::kDryRun) { uint64_t workspace_size = operation->get_host_workspace_size(&rank_k_workspace_.configuration); rank_k_workspace_.host_workspace.resize(workspace_size, 0); workspace_size = operation->get_device_workspace_size(&rank_k_workspace_.configuration); rank_k_workspace_.device_workspace.reset(library::NumericTypeID::kU8, workspace_size); status = operation->initialize( &rank_k_workspace_.configuration, rank_k_workspace_.host_workspace.data(), rank_k_workspace_.device_workspace.data()); } // // If CUTLASS is enabled, generate a result for it // results_.push_back(model_result_); results_.back().provider = library::Provider::kCUTLASS; results_.back().op_kind = library::OperationKind::kRankK; results_.back().disposition = Disposition::kNotRun; for(auto provider : verification_providers_) { results_.back().verification_map[provider] = Disposition::kNotRun; } } return status; } ///////////////////////////////////////////////////////////////////////////////////////////////// /// Verifies CUTLASS against references bool RankKOperationProfiler::verify_cutlass( Options const &options, PerformanceReport &report, DeviceContext &device_context, library::Operation const *operation, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem) { if (!options.profiling.provider_enabled(library::Provider::kCUTLASS)) { return true; } if (options.execution_mode == ExecutionMode::kDryRun) { return true; } // Initialize structure containing RankK arguments rank_k_workspace_.arguments.A = rank_k_workspace_.A->data(); rank_k_workspace_.arguments.C = rank_k_workspace_.C->data(); rank_k_workspace_.arguments.D = rank_k_workspace_.Computed->data(); rank_k_workspace_.arguments.alpha = problem_.alpha.data(); rank_k_workspace_.arguments.beta = problem_.beta.data(); rank_k_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost; // // Run the CUTLASS operation // results_.back().status = operation->run( &rank_k_workspace_.arguments, rank_k_workspace_.host_workspace.data(), rank_k_workspace_.device_workspace.data()); if (results_.back().status != Status::kSuccess) { results_.back().disposition = Disposition::kFailed; return false; } cudaError_t result = cudaDeviceSynchronize(); if (result != cudaSuccess) { results_.back().disposition = Disposition::kFailed; return false; } // CUTLASS op ran the but not yet verified against any verification provider results_.back().disposition = Disposition::kNotVerified; // // Run verification providers // if (options.verification.enabled) { #if CUTLASS_ENABLE_CUBLAS if (options.verification.provider_enabled(library::Provider::kCUBLAS)) { // Guard against unsupported cases auto const & rank_k_desc = static_cast<library::RankKDescription const &>(operation->description()); if (cublas_satisfies(rank_k_desc) == Status::kSuccess) { // call cublas verification if supported verify_with_cublas_( options, report, device_context, operation, problem_space, problem); } else { // set verification map for cublas to not supported results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kNotSupported; } } #endif // #if CUTLASS_ENABLE_CUBLAS // Update disposition to worst case verification outcome among all // verification providers which are supported bool is_any_verification_run_passed = false; for(auto &m : results_.back().verification_map) { if(m.second == Disposition::kFailed || m.second == Disposition::kIncorrect) { results_.back().disposition = m.second; return true; } if(!is_any_verification_run_passed && m.second == Disposition::kPassed) { is_any_verification_run_passed = true; } } if(is_any_verification_run_passed) { results_.back().disposition = Disposition::kPassed; } } // Return true means continue profiling return true; } /////////////////////////////////////////////////////////////////////////////////////////////////// /// Verifies CUTLASS against references bool RankKOperationProfiler::verify_with_cublas_( Options const &options, PerformanceReport &report, DeviceContext &device_context, library::Operation const *operation, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem) { #if CUTLASS_ENABLE_CUBLAS library::RankKDescription const &rank_k_desc = static_cast<library::RankKDescription const &>(operation->description()); // // Construct cuBLAS operators // CublasCreate handle; cublasStatus_t status = handle.get_cublas_create_status(); if (status != CUBLAS_STATUS_SUCCESS) { results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kFailed; return true; } // // Initialize state // try { // // Construct dispatcher to cublas<t>Syrk() // // Initialize structure containing RankK arguments rank_k_workspace_.arguments.A = rank_k_workspace_.A->data(); rank_k_workspace_.arguments.C = rank_k_workspace_.Reference->data(); rank_k_workspace_.arguments.D = rank_k_workspace_.Reference->data(); rank_k_workspace_.arguments.alpha = problem_.alpha.data(); rank_k_workspace_.arguments.beta = problem_.beta.data(); rank_k_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost; detail::cublasRankKDispatcher rank_k_op( rank_k_desc, rank_k_workspace_.configuration, rank_k_workspace_.arguments ); if (rank_k_op.status != Status::kSuccess) { results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kNotRun; return true; } results_.back().status = Status::kSuccess; status = rank_k_op(handle); // Handle errors if (status != CUBLAS_STATUS_SUCCESS) { results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kFailed; return true; } // // Verify results // results_.back().verification_map[library::Provider::kCUBLAS] = compare_tensors( options, *rank_k_workspace_.Computed, *rank_k_workspace_.Reference ); // Save workspace if incorrect if (options.verification.save_workspace == SaveWorkspace::kIncorrect && results_.back().verification_map[library::Provider::kCUBLAS] == Disposition::kIncorrect) { save_workspace( device_context, options, rank_k_desc, library::Provider::kCUTLASS, library::Provider::kCUBLAS); } } catch (...) { results_.back().verification_map[library::Provider::kCUBLAS] = Disposition::kFailed; } #endif // Return true means continue profiling return true; } ///////////////////////////////////////////////////////////////////////////////////////////////// /// Measures performance results bool RankKOperationProfiler::profile( Options const &options, PerformanceReport &report, DeviceContext &device_context, library::Operation const *operation, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem) { if (options.profiling.provider_enabled(library::Provider::kCUTLASS)) { // Initialize structure containing RankK arguments rank_k_workspace_.arguments.A = rank_k_workspace_.A->data(); rank_k_workspace_.arguments.C = rank_k_workspace_.C->data(); rank_k_workspace_.arguments.D = rank_k_workspace_.Computed->data(); rank_k_workspace_.arguments.alpha = problem_.alpha.data(); rank_k_workspace_.arguments.beta = problem_.beta.data(); rank_k_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost; results_.back().status = profile_cutlass_( results_.back().runtime, options, operation, &rank_k_workspace_.arguments, rank_k_workspace_.host_workspace.data(), rank_k_workspace_.device_workspace.data() ); } return true; } ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace profiler } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////
e915a7365da646adce860b03844dd1b2bbca3b20.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <hip/hip_runtime.h> #include <rmm/rmm.h> #include <rmm/thrust_rmm_allocator.h> #include <thrust/device_vector.h> #include <thrust/for_each.h> #include <thrust/remove.h> #include <thrust/scan.h> #include <thrust/transform_scan.h> #include "nvstrings/NVStrings.h" #include "nvstrings/NVText.h" #include <cudf/utilities/error.hpp> #include "../custring_view.cuh" #include "../util.h" // NVStrings* NVText::create_ngrams(NVStrings& strs, unsigned int ngrams, const char* separator) { if (ngrams == 0) ngrams = 2; if (separator == nullptr) separator = ""; unsigned int count = strs.size(); if (count == 0) return strs.copy(); auto execpol = rmm::exec_policy(0); rmm::device_vector<custring_view*> strings(count, nullptr); custring_view** d_strings = strings.data().get(); strs.create_custring_index(d_strings); // first let's remove any nulls or empty strings auto end = thrust::remove_if( execpol->on(0), d_strings, d_strings + count, [] __device__(custring_view * ds) { return (ds == nullptr) || ds->empty(); }); count = (unsigned int)(end - d_strings); // new count if (count <= ngrams) return strs.join(separator, ""); // this not quite right if there are nulls we removed if (ngrams == 1) return strs.copy(); // same with this one; need method to create NVStrings from custring_views custring_view* d_separator = custring_from_host(separator); // compute size of new strings unsigned int ngrams_count = count - ngrams + 1; rmm::device_vector<size_t> sizes(ngrams_count, 0); size_t* d_sizes = sizes.data().get(); thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), ngrams_count, [d_strings, ngrams, d_separator, d_sizes] __device__(unsigned int idx) { size_t size = 0; for (unsigned int n = 0; n < ngrams; ++n) { custring_view* dstr = d_strings[n + idx]; size += dstr->size(); if ((n + 1) < ngrams) size += d_separator->size(); } d_sizes[idx] = size; }); size_t bufsize = thrust::reduce(execpol->on(0), d_sizes, d_sizes + ngrams_count); rmm::device_vector<char> buffer(bufsize); char* d_buffer = buffer.data().get(); rmm::device_vector<size_t> offsets(ngrams_count, 0); thrust::exclusive_scan(execpol->on(0), sizes.begin(), sizes.end(), offsets.begin()); size_t* d_offsets = offsets.data().get(); // build the memory and a list of pointers rmm::device_vector<thrust::pair<const char*, size_t>> results(ngrams_count); thrust::pair<const char*, size_t>* d_results = results.data().get(); thrust::for_each_n( execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), ngrams_count, [d_strings, d_separator, ngrams, d_offsets, d_buffer, d_results] __device__(unsigned int idx) { char* buffer = d_buffer + d_offsets[idx]; char* sptr = buffer; size_t length = 0; for (unsigned int n = 0; n < ngrams; ++n) { custring_view* dstr = d_strings[n + idx]; unsigned int bytes = dstr->size(); length += bytes; memcpy(sptr, dstr->data(), bytes); sptr += bytes; if ((n + 1) >= ngrams) continue; bytes = d_separator->size(); length += bytes; memcpy(sptr, d_separator->data(), bytes); sptr += bytes; } d_results[idx].first = buffer; d_results[idx].second = length; }); // RMM_FREE(d_separator, 0); // build strings object from results elements return NVStrings::create_from_index((std::pair<const char*, size_t>*)d_results, ngrams_count); } // using position_pair = thrust::pair<int32_t, int32_t>; using index_pair = thrust::pair<const char*, size_t>; // This class walks a string looking for specified delimiter character(s). // It will automatically ignore adjacent delimiters (i.e. different than split). // The next_token method returns character start position (spos) and end // position (epos) between delimiter runs identifying each token. // An iterator is used to retrieve each utf8 character to be checked. // The spaces parameter identifies a run of delimiters (or not delimiters). struct base_string_tokenize { custring_view_array d_strings; custring_view* d_delimiter; custring_view* d_separator; __device__ bool is_delimiter(Char ch) { if (!d_delimiter) return (ch <= ' '); // all ascii whitespace return d_delimiter->find(ch) >= 0; } __device__ bool next_token( custring_view* dstr, bool& spaces, custring_view::iterator& itr, int& spos, int& epos) { if (spos >= dstr->chars_count()) return false; for (; itr != dstr->end(); ++itr) { Char ch = *itr; if (spaces == is_delimiter(ch)) { if (spaces) spos = itr.position() + 1; else epos = itr.position() + 1; continue; } spaces = !spaces; if (spaces) { epos = itr.position(); break; } } return spos < epos; } }; // Count the number of tokens within a string. struct string_token_counter_fn : base_string_tokenize { string_token_counter_fn(custring_view_array d_strings, custring_view* d_delimiter, custring_view* d_separator) : base_string_tokenize{d_strings, d_delimiter, d_separator} { } __device__ int32_t operator()(custring_view* dstr) { bool spaces = true; int nchars = dstr->chars_count(); int spos = 0, epos = nchars; int32_t token_count = 0; auto itr = dstr->begin(); while (next_token(dstr, spaces, itr, spos, epos)) { ++token_count; spos = epos + 1; epos = nchars; ++itr; } return token_count; } }; // Record the byte positions of each token within each string. struct string_tokens_positions_fn : base_string_tokenize { const int32_t* d_token_offsets; position_pair* d_token_positions; string_tokens_positions_fn(custring_view_array d_strings, custring_view* d_delimiter, custring_view* d_separator, const int32_t* d_token_offsets, position_pair* d_token_positions) : base_string_tokenize{d_strings, d_delimiter, d_separator}, d_token_offsets(d_token_offsets), d_token_positions(d_token_positions) { } __device__ void operator()(unsigned int idx) { custring_view* dstr = d_strings[idx]; bool spaces = true; int nchars = dstr->chars_count(); int spos = 0, epos = nchars, token_index = 0; auto token_positions = d_token_positions + d_token_offsets[idx]; auto itr = dstr->begin(); while (next_token(dstr, spaces, itr, spos, epos)) { token_positions[token_index++] = thrust::make_pair(dstr->byte_offset_for(spos), // convert char pos dstr->byte_offset_for(epos)); // to byte offset spos = epos + 1; epos = nchars; ++itr; } } }; // Compute the size of each ngram that will be created for each string. // Adjacent token position-pairs are used to calculate the total ngram sizes. struct ngram_sizes_fn { custring_view* d_separator; int32_t ngrams; // always >=2 const int32_t* d_token_offsets; const position_pair* d_token_positions; const int32_t* d_ngram_offsets; __device__ int32_t operator()(unsigned int idx) { auto token_positions = d_token_positions + d_token_offsets[idx]; auto token_count = d_token_offsets[idx + 1] - d_token_offsets[idx]; int32_t bytes = 0; for (int token_index = (ngrams - 1); token_index < token_count; ++token_index) { int32_t length = 0; for (int n = (ngrams - 1); n >= 0; --n) // sliding window of tokens { position_pair item = token_positions[token_index - n]; length += item.second - item.first; // size of this token in bytes length += d_separator->size(); // add size of the separator } length -= d_separator->size(); // remove trailing separator bytes += length; } return bytes; } }; // Build the ngrams for each string. // The ngrams for each string are placed contiguously within the section of memory // assigned for the string. And an index_pair is recorded for each ngram. struct ngram_builder_fn { custring_view_array d_strings; custring_view* d_separator; int32_t ngrams; const int32_t* d_token_offsets; const position_pair* d_token_positions; const int32_t* d_ngram_offsets; const int32_t* d_chars_offsets; char* d_chars; // write ngram strings to here index_pair* d_indices; // output ngram index-pairs here __device__ void operator()(int32_t idx) { custring_view* dstr = d_strings[idx]; auto token_positions = d_token_positions + d_token_offsets[idx]; auto token_count = d_token_offsets[idx + 1] - d_token_offsets[idx]; int ngram_index = 0; char* out_ptr = d_chars + d_chars_offsets[idx]; auto indices = d_indices + d_ngram_offsets[idx]; for (int token_index = (ngrams - 1); token_index < token_count; ++token_index) { int32_t length = 0; auto ngram_ptr = out_ptr; for (int n = (ngrams - 1); n >= 0; --n) { position_pair item = token_positions[token_index - n]; out_ptr = copy_and_incr(out_ptr, dstr->data() + item.first, item.second - item.first); length += item.second - item.first; if (n > 0) { // copy separator (except for the last one) out_ptr = copy_and_incr(out_ptr, d_separator->data(), d_separator->size()); length += d_separator->size(); } } indices[ngram_index++] = index_pair{ngram_ptr, length}; } } }; // This will create ngrams for each string and not across strings. NVStrings* NVText::ngrams_tokenize(NVStrings const& strs, const char* delimiter, int32_t ngrams, const char* separator) { auto count = strs.size(); if (count == 0) return strs.copy(); if (ngrams == 1) return NVText::tokenize(strs, delimiter); if (ngrams == 0) // default is 2 ngrams = 2; if (!separator) // no separator specified separator = ""; auto execpol = rmm::exec_policy(0); rmm::device_vector<custring_view*> strings(count, nullptr); custring_view** d_strings = strings.data().get(); strs.create_custring_index(d_strings); // first remove any nulls or empty strings auto end = thrust::remove_if( execpol->on(0), d_strings, d_strings + count, [] __device__(custring_view * dstr) { return (dstr == nullptr) || dstr->empty(); }); count = (unsigned int)(end - d_strings); // new count custring_view* d_separator = custring_from_host(separator); custring_view* d_delimiter = custring_from_host(delimiter); // Example for comments with ngrams=2 // ["a bb ccc","dd e"] => ["a_bb", "bb_ccc", "dd_e"] // get the number of tokens per string // token-counts = [3,2]; token-offsets = [0,3,5] rmm::device_vector<int32_t> token_offsets(count + 1); auto d_token_offsets = token_offsets.data().get(); thrust::transform_inclusive_scan(execpol->on(0), strings.begin(), strings.end(), d_token_offsets + 1, string_token_counter_fn{d_strings, d_delimiter, d_separator}, thrust::plus<int32_t>()); CUDA_TRY(hipMemset(d_token_offsets, 0, sizeof(int32_t))); int32_t total_tokens = token_offsets[count]; // 5 // get the token positions per string // => [(0,1),(2,4),(5,8), (0,2),(3,4)] rmm::device_vector<position_pair> token_positions(total_tokens); auto d_token_positions = token_positions.data().get(); thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<int32_t>(0), count, string_tokens_positions_fn{ d_strings, d_delimiter, d_separator, d_token_offsets, d_token_positions}); // compute the number of ngrams per string // ngram-counts = [2,1]; ngram-offsets = [0,2,3] rmm::device_vector<int32_t> ngram_offsets(count + 1); auto d_ngram_offsets = ngram_offsets.data().get(); thrust::transform_inclusive_scan( execpol->on(0), thrust::make_counting_iterator<int32_t>(0), thrust::make_counting_iterator<int32_t>(count), d_ngram_offsets + 1, [d_token_offsets, ngrams] __device__(int32_t idx) { auto token_count = d_token_offsets[idx + 1] - d_token_offsets[idx]; return (token_count >= ngrams) ? token_count - ngrams + 1 : 0; }, thrust::plus<int32_t>()); CUDA_TRY(hipMemset(d_ngram_offsets, 0, sizeof(int32_t))); int32_t total_ngrams = ngram_offsets[count]; // 3 // compute the size of the ngrams for each string // sizes = [10,4]; offsets = [0,10,14] // 2 bigrams in 1st string total to 10 bytes; 1 bigram in 2nd string is 4 bytes rmm::device_vector<int32_t> chars_offsets(count + 1); auto d_chars_offsets = chars_offsets.data().get(); thrust::transform_inclusive_scan(execpol->on(0), thrust::make_counting_iterator<int32_t>(0), thrust::make_counting_iterator<int32_t>(count), d_chars_offsets + 1, ngram_sizes_fn{d_separator, ngrams, d_token_offsets, d_token_positions, d_ngram_offsets}, //, d_ngram_sizes}, thrust::plus<int32_t>()); CUDA_TRY(hipMemset(d_chars_offsets, 0, sizeof(int32_t))); // create output buffer size auto output_chars_size = chars_offsets[count]; // 14 rmm::device_vector<char> output_buffer(output_chars_size); auto d_output_buffer = output_buffer.data().get(); // build the ngrams in the output buffer rmm::device_vector<index_pair> results(total_ngrams); auto d_results = results.data().get(); thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<int32_t>(0), count, ngram_builder_fn{d_strings, d_separator, ngrams, d_token_offsets, d_token_positions, d_ngram_offsets, d_chars_offsets, d_output_buffer, d_results}); RMM_FREE(d_separator, 0); RMM_FREE(d_delimiter, 0); // build strings object from results indices return NVStrings::create_from_index((std::pair<const char*, size_t>*)d_results, total_ngrams); }
e915a7365da646adce860b03844dd1b2bbca3b20.cu
/* * Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cuda_runtime.h> #include <rmm/rmm.h> #include <rmm/thrust_rmm_allocator.h> #include <thrust/device_vector.h> #include <thrust/for_each.h> #include <thrust/remove.h> #include <thrust/scan.h> #include <thrust/transform_scan.h> #include "nvstrings/NVStrings.h" #include "nvstrings/NVText.h" #include <cudf/utilities/error.hpp> #include "../custring_view.cuh" #include "../util.h" // NVStrings* NVText::create_ngrams(NVStrings& strs, unsigned int ngrams, const char* separator) { if (ngrams == 0) ngrams = 2; if (separator == nullptr) separator = ""; unsigned int count = strs.size(); if (count == 0) return strs.copy(); auto execpol = rmm::exec_policy(0); rmm::device_vector<custring_view*> strings(count, nullptr); custring_view** d_strings = strings.data().get(); strs.create_custring_index(d_strings); // first let's remove any nulls or empty strings auto end = thrust::remove_if( execpol->on(0), d_strings, d_strings + count, [] __device__(custring_view * ds) { return (ds == nullptr) || ds->empty(); }); count = (unsigned int)(end - d_strings); // new count if (count <= ngrams) return strs.join(separator, ""); // this not quite right if there are nulls we removed if (ngrams == 1) return strs.copy(); // same with this one; need method to create NVStrings from custring_views custring_view* d_separator = custring_from_host(separator); // compute size of new strings unsigned int ngrams_count = count - ngrams + 1; rmm::device_vector<size_t> sizes(ngrams_count, 0); size_t* d_sizes = sizes.data().get(); thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), ngrams_count, [d_strings, ngrams, d_separator, d_sizes] __device__(unsigned int idx) { size_t size = 0; for (unsigned int n = 0; n < ngrams; ++n) { custring_view* dstr = d_strings[n + idx]; size += dstr->size(); if ((n + 1) < ngrams) size += d_separator->size(); } d_sizes[idx] = size; }); size_t bufsize = thrust::reduce(execpol->on(0), d_sizes, d_sizes + ngrams_count); rmm::device_vector<char> buffer(bufsize); char* d_buffer = buffer.data().get(); rmm::device_vector<size_t> offsets(ngrams_count, 0); thrust::exclusive_scan(execpol->on(0), sizes.begin(), sizes.end(), offsets.begin()); size_t* d_offsets = offsets.data().get(); // build the memory and a list of pointers rmm::device_vector<thrust::pair<const char*, size_t>> results(ngrams_count); thrust::pair<const char*, size_t>* d_results = results.data().get(); thrust::for_each_n( execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), ngrams_count, [d_strings, d_separator, ngrams, d_offsets, d_buffer, d_results] __device__(unsigned int idx) { char* buffer = d_buffer + d_offsets[idx]; char* sptr = buffer; size_t length = 0; for (unsigned int n = 0; n < ngrams; ++n) { custring_view* dstr = d_strings[n + idx]; unsigned int bytes = dstr->size(); length += bytes; memcpy(sptr, dstr->data(), bytes); sptr += bytes; if ((n + 1) >= ngrams) continue; bytes = d_separator->size(); length += bytes; memcpy(sptr, d_separator->data(), bytes); sptr += bytes; } d_results[idx].first = buffer; d_results[idx].second = length; }); // RMM_FREE(d_separator, 0); // build strings object from results elements return NVStrings::create_from_index((std::pair<const char*, size_t>*)d_results, ngrams_count); } // using position_pair = thrust::pair<int32_t, int32_t>; using index_pair = thrust::pair<const char*, size_t>; // This class walks a string looking for specified delimiter character(s). // It will automatically ignore adjacent delimiters (i.e. different than split). // The next_token method returns character start position (spos) and end // position (epos) between delimiter runs identifying each token. // An iterator is used to retrieve each utf8 character to be checked. // The spaces parameter identifies a run of delimiters (or not delimiters). struct base_string_tokenize { custring_view_array d_strings; custring_view* d_delimiter; custring_view* d_separator; __device__ bool is_delimiter(Char ch) { if (!d_delimiter) return (ch <= ' '); // all ascii whitespace return d_delimiter->find(ch) >= 0; } __device__ bool next_token( custring_view* dstr, bool& spaces, custring_view::iterator& itr, int& spos, int& epos) { if (spos >= dstr->chars_count()) return false; for (; itr != dstr->end(); ++itr) { Char ch = *itr; if (spaces == is_delimiter(ch)) { if (spaces) spos = itr.position() + 1; else epos = itr.position() + 1; continue; } spaces = !spaces; if (spaces) { epos = itr.position(); break; } } return spos < epos; } }; // Count the number of tokens within a string. struct string_token_counter_fn : base_string_tokenize { string_token_counter_fn(custring_view_array d_strings, custring_view* d_delimiter, custring_view* d_separator) : base_string_tokenize{d_strings, d_delimiter, d_separator} { } __device__ int32_t operator()(custring_view* dstr) { bool spaces = true; int nchars = dstr->chars_count(); int spos = 0, epos = nchars; int32_t token_count = 0; auto itr = dstr->begin(); while (next_token(dstr, spaces, itr, spos, epos)) { ++token_count; spos = epos + 1; epos = nchars; ++itr; } return token_count; } }; // Record the byte positions of each token within each string. struct string_tokens_positions_fn : base_string_tokenize { const int32_t* d_token_offsets; position_pair* d_token_positions; string_tokens_positions_fn(custring_view_array d_strings, custring_view* d_delimiter, custring_view* d_separator, const int32_t* d_token_offsets, position_pair* d_token_positions) : base_string_tokenize{d_strings, d_delimiter, d_separator}, d_token_offsets(d_token_offsets), d_token_positions(d_token_positions) { } __device__ void operator()(unsigned int idx) { custring_view* dstr = d_strings[idx]; bool spaces = true; int nchars = dstr->chars_count(); int spos = 0, epos = nchars, token_index = 0; auto token_positions = d_token_positions + d_token_offsets[idx]; auto itr = dstr->begin(); while (next_token(dstr, spaces, itr, spos, epos)) { token_positions[token_index++] = thrust::make_pair(dstr->byte_offset_for(spos), // convert char pos dstr->byte_offset_for(epos)); // to byte offset spos = epos + 1; epos = nchars; ++itr; } } }; // Compute the size of each ngram that will be created for each string. // Adjacent token position-pairs are used to calculate the total ngram sizes. struct ngram_sizes_fn { custring_view* d_separator; int32_t ngrams; // always >=2 const int32_t* d_token_offsets; const position_pair* d_token_positions; const int32_t* d_ngram_offsets; __device__ int32_t operator()(unsigned int idx) { auto token_positions = d_token_positions + d_token_offsets[idx]; auto token_count = d_token_offsets[idx + 1] - d_token_offsets[idx]; int32_t bytes = 0; for (int token_index = (ngrams - 1); token_index < token_count; ++token_index) { int32_t length = 0; for (int n = (ngrams - 1); n >= 0; --n) // sliding window of tokens { position_pair item = token_positions[token_index - n]; length += item.second - item.first; // size of this token in bytes length += d_separator->size(); // add size of the separator } length -= d_separator->size(); // remove trailing separator bytes += length; } return bytes; } }; // Build the ngrams for each string. // The ngrams for each string are placed contiguously within the section of memory // assigned for the string. And an index_pair is recorded for each ngram. struct ngram_builder_fn { custring_view_array d_strings; custring_view* d_separator; int32_t ngrams; const int32_t* d_token_offsets; const position_pair* d_token_positions; const int32_t* d_ngram_offsets; const int32_t* d_chars_offsets; char* d_chars; // write ngram strings to here index_pair* d_indices; // output ngram index-pairs here __device__ void operator()(int32_t idx) { custring_view* dstr = d_strings[idx]; auto token_positions = d_token_positions + d_token_offsets[idx]; auto token_count = d_token_offsets[idx + 1] - d_token_offsets[idx]; int ngram_index = 0; char* out_ptr = d_chars + d_chars_offsets[idx]; auto indices = d_indices + d_ngram_offsets[idx]; for (int token_index = (ngrams - 1); token_index < token_count; ++token_index) { int32_t length = 0; auto ngram_ptr = out_ptr; for (int n = (ngrams - 1); n >= 0; --n) { position_pair item = token_positions[token_index - n]; out_ptr = copy_and_incr(out_ptr, dstr->data() + item.first, item.second - item.first); length += item.second - item.first; if (n > 0) { // copy separator (except for the last one) out_ptr = copy_and_incr(out_ptr, d_separator->data(), d_separator->size()); length += d_separator->size(); } } indices[ngram_index++] = index_pair{ngram_ptr, length}; } } }; // This will create ngrams for each string and not across strings. NVStrings* NVText::ngrams_tokenize(NVStrings const& strs, const char* delimiter, int32_t ngrams, const char* separator) { auto count = strs.size(); if (count == 0) return strs.copy(); if (ngrams == 1) return NVText::tokenize(strs, delimiter); if (ngrams == 0) // default is 2 ngrams = 2; if (!separator) // no separator specified separator = ""; auto execpol = rmm::exec_policy(0); rmm::device_vector<custring_view*> strings(count, nullptr); custring_view** d_strings = strings.data().get(); strs.create_custring_index(d_strings); // first remove any nulls or empty strings auto end = thrust::remove_if( execpol->on(0), d_strings, d_strings + count, [] __device__(custring_view * dstr) { return (dstr == nullptr) || dstr->empty(); }); count = (unsigned int)(end - d_strings); // new count custring_view* d_separator = custring_from_host(separator); custring_view* d_delimiter = custring_from_host(delimiter); // Example for comments with ngrams=2 // ["a bb ccc","dd e"] => ["a_bb", "bb_ccc", "dd_e"] // get the number of tokens per string // token-counts = [3,2]; token-offsets = [0,3,5] rmm::device_vector<int32_t> token_offsets(count + 1); auto d_token_offsets = token_offsets.data().get(); thrust::transform_inclusive_scan(execpol->on(0), strings.begin(), strings.end(), d_token_offsets + 1, string_token_counter_fn{d_strings, d_delimiter, d_separator}, thrust::plus<int32_t>()); CUDA_TRY(cudaMemset(d_token_offsets, 0, sizeof(int32_t))); int32_t total_tokens = token_offsets[count]; // 5 // get the token positions per string // => [(0,1),(2,4),(5,8), (0,2),(3,4)] rmm::device_vector<position_pair> token_positions(total_tokens); auto d_token_positions = token_positions.data().get(); thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<int32_t>(0), count, string_tokens_positions_fn{ d_strings, d_delimiter, d_separator, d_token_offsets, d_token_positions}); // compute the number of ngrams per string // ngram-counts = [2,1]; ngram-offsets = [0,2,3] rmm::device_vector<int32_t> ngram_offsets(count + 1); auto d_ngram_offsets = ngram_offsets.data().get(); thrust::transform_inclusive_scan( execpol->on(0), thrust::make_counting_iterator<int32_t>(0), thrust::make_counting_iterator<int32_t>(count), d_ngram_offsets + 1, [d_token_offsets, ngrams] __device__(int32_t idx) { auto token_count = d_token_offsets[idx + 1] - d_token_offsets[idx]; return (token_count >= ngrams) ? token_count - ngrams + 1 : 0; }, thrust::plus<int32_t>()); CUDA_TRY(cudaMemset(d_ngram_offsets, 0, sizeof(int32_t))); int32_t total_ngrams = ngram_offsets[count]; // 3 // compute the size of the ngrams for each string // sizes = [10,4]; offsets = [0,10,14] // 2 bigrams in 1st string total to 10 bytes; 1 bigram in 2nd string is 4 bytes rmm::device_vector<int32_t> chars_offsets(count + 1); auto d_chars_offsets = chars_offsets.data().get(); thrust::transform_inclusive_scan(execpol->on(0), thrust::make_counting_iterator<int32_t>(0), thrust::make_counting_iterator<int32_t>(count), d_chars_offsets + 1, ngram_sizes_fn{d_separator, ngrams, d_token_offsets, d_token_positions, d_ngram_offsets}, //, d_ngram_sizes}, thrust::plus<int32_t>()); CUDA_TRY(cudaMemset(d_chars_offsets, 0, sizeof(int32_t))); // create output buffer size auto output_chars_size = chars_offsets[count]; // 14 rmm::device_vector<char> output_buffer(output_chars_size); auto d_output_buffer = output_buffer.data().get(); // build the ngrams in the output buffer rmm::device_vector<index_pair> results(total_ngrams); auto d_results = results.data().get(); thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<int32_t>(0), count, ngram_builder_fn{d_strings, d_separator, ngrams, d_token_offsets, d_token_positions, d_ngram_offsets, d_chars_offsets, d_output_buffer, d_results}); RMM_FREE(d_separator, 0); RMM_FREE(d_delimiter, 0); // build strings object from results indices return NVStrings::create_from_index((std::pair<const char*, size_t>*)d_results, total_ngrams); }
0f312389b03281d4e39b18ad65dd4cb73fea23b4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <vector> #include "caffe/layer.hpp" #include "caffe/util/io.hpp" #include "caffe/util/math_functions.hpp" #include "caffe/layers/loss_layer.hpp" #include "caffe/layers/loc_loss_layer.hpp" namespace caffe { template <typename Dtype> __global__ void LocLossForwardGPU(const int nthreads, const Dtype* locs, Dtype threshold, Dtype* loss_array) { CUDA_KERNEL_LOOP(index, nthreads) { Dtype loss = (Dtype)0; if(locs[index] < -threshold) { loss += (locs[index] + threshold) * (locs[index] + threshold) / 2; } else if(locs[index] > threshold) { loss += (locs[index] - threshold) * (locs[index] - threshold) / 2; } loss_array[index] = loss; } } template <typename Dtype> void LocLossLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { string prefix = "LocLossLayer::Forward_gpu::\t"; const Dtype* locs = bottom[0]->gpu_data(); Dtype* loss_array = loss_.mutable_gpu_data(); caffe_gpu_set(loss_.count(), (Dtype)0, loss_array); const int nthreads = N; hipLaunchKernelGGL(( LocLossForwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)), dim3( CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, locs, threshold, loss_array); Dtype loss; caffe_gpu_asum(nthreads, loss_array, &loss); loss /= nthreads; top[0]->mutable_cpu_data()[0] = loss; } template <typename Dtype> __global__ void LocLossBackwardGPU(const int nthreads, const Dtype* locs, Dtype threshold, Dtype* dLocs) { CUDA_KERNEL_LOOP(index, nthreads) { if(locs[index] < -threshold) { dLocs[index] = locs[index] + threshold; } else if(locs[index] > threshold) { dLocs[index] = locs[index] - threshold; } } } template <typename Dtype> void LocLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { const Dtype* locs = bottom[0]->gpu_data(); Dtype* dloc = bottom[0]->mutable_gpu_diff(); const int nthreads = N; hipLaunchKernelGGL(( LocLossBackwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)), dim3( CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, locs, threshold, dloc); caffe_gpu_scal(bottom[0]->count(), top[0]->cpu_diff()[0] / nthreads, dloc); } INSTANTIATE_LAYER_GPU_FUNCS(LocLossLayer); } // namespace caffe
0f312389b03281d4e39b18ad65dd4cb73fea23b4.cu
#include <vector> #include "caffe/layer.hpp" #include "caffe/util/io.hpp" #include "caffe/util/math_functions.hpp" #include "caffe/layers/loss_layer.hpp" #include "caffe/layers/loc_loss_layer.hpp" namespace caffe { template <typename Dtype> __global__ void LocLossForwardGPU(const int nthreads, const Dtype* locs, Dtype threshold, Dtype* loss_array) { CUDA_KERNEL_LOOP(index, nthreads) { Dtype loss = (Dtype)0; if(locs[index] < -threshold) { loss += (locs[index] + threshold) * (locs[index] + threshold) / 2; } else if(locs[index] > threshold) { loss += (locs[index] - threshold) * (locs[index] - threshold) / 2; } loss_array[index] = loss; } } template <typename Dtype> void LocLossLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { string prefix = "LocLossLayer::Forward_gpu::\t"; const Dtype* locs = bottom[0]->gpu_data(); Dtype* loss_array = loss_.mutable_gpu_data(); caffe_gpu_set(loss_.count(), (Dtype)0, loss_array); const int nthreads = N; LocLossForwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(nthreads, locs, threshold, loss_array); Dtype loss; caffe_gpu_asum(nthreads, loss_array, &loss); loss /= nthreads; top[0]->mutable_cpu_data()[0] = loss; } template <typename Dtype> __global__ void LocLossBackwardGPU(const int nthreads, const Dtype* locs, Dtype threshold, Dtype* dLocs) { CUDA_KERNEL_LOOP(index, nthreads) { if(locs[index] < -threshold) { dLocs[index] = locs[index] + threshold; } else if(locs[index] > threshold) { dLocs[index] = locs[index] - threshold; } } } template <typename Dtype> void LocLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { const Dtype* locs = bottom[0]->gpu_data(); Dtype* dloc = bottom[0]->mutable_gpu_diff(); const int nthreads = N; LocLossBackwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(nthreads, locs, threshold, dloc); caffe_gpu_scal(bottom[0]->count(), top[0]->cpu_diff()[0] / nthreads, dloc); } INSTANTIATE_LAYER_GPU_FUNCS(LocLossLayer); } // namespace caffe
1c7da7ca9466d804ecbf969be9f1330cf117e90f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <common.hpp> #include <layers/slice_layer.hpp> #include <utils.cuh> #include <utils.hpp> #ifndef NDEBUG #include <iostream> #endif namespace HugeCTR { namespace { template <size_t length, typename T> __device__ int array_length(T (&arr)[length]) { return length; } template <typename T, typename... Args> __global__ void slice_kernel(bool forward, T* in, const int h, const int in_w, const int virt_w, const Args... args) { const typename SliceLayer<T>::OutParam out_params[] = {args...}; const int n_outs = array_length(out_params); for (int row = blockIdx.x; row < h; row += gridDim.x) { for (int k = 0; k < n_outs; k++) { int st = out_params[k].st; int ed = out_params[k].ed; int out_w = ed - st; for (int out_col = threadIdx.x; out_col < out_w; out_col += blockDim.x) { int in_col = out_col + st; int in_idx = row * in_w + in_col; int out_idx = row * out_w + out_col; T* out = out_params[k].out; if (forward) { out[out_idx] = in[in_idx]; } else { in[in_idx] += out[out_idx]; } } __syncthreads(); } } } } // anonymous namespace template <typename T> SliceLayer<T>::SliceLayer(const Tensor2<T>& in_tensor, Tensors2<T>& out_tensors, const std::shared_ptr<GeneralBuffer2<CudaAllocator>>& blobs_buff, std::vector<std::pair<int, int>>& ranges, const std::shared_ptr<GPUResource>& gpu_resource) : Layer(gpu_resource), virt_w_(0) { try { if (ranges.empty()) { CK_THROW_(Error_t::WrongInput, "Empty slice ranges is not allowed"); } if (!out_tensors.empty()) { CK_THROW_(Error_t::WrongInput, "output tensor vector must be empty"); } auto in_dims = in_tensor.get_dimensions(); if (in_dims.size() != 2) { CK_THROW_(Error_t::WrongInput, "Only 2D tensors can be concatenated"); } size_t height = in_dims[0]; int in_w = in_dims[1]; int prev_min = -1; int prev_max = 0; for (auto& range : ranges) { int cur_min = range.first; int cur_max = range.second; if (cur_min >= cur_max) { CK_THROW_(Error_t::WrongInput, "Reverse range is not allowed"); } if (cur_min < 0 || cur_max < 0) { CK_THROW_(Error_t::WrongInput, "Negative ranges cannot be allowed"); } if (!(prev_min <= cur_min && prev_max <= cur_max)) { CK_THROW_(Error_t::WrongInput, "A range cannot be out-order nor included in another"); } if (cur_min >= in_w || cur_max > in_w) { CK_THROW_(Error_t::WrongInput, "Ranges cannot be bigger than the input width"); } size_t out_w = cur_max - cur_min; std::vector<size_t> out_dims = {height, out_w}; { Tensor2<T> tensor; blobs_buff->reserve(out_dims, &tensor); out_tensors.push_back(tensor); } sts_.push_back(cur_min); virt_w_ += out_w; prev_min = cur_min; prev_max = cur_max; } in_tensors_.push_back(in_tensor); for (auto& out_tensor : out_tensors) { out_tensors_.push_back(out_tensor); } } catch (const std::runtime_error& rt_err) { std::cerr << rt_err.what() << std::endl; throw; } } template <typename T> void SliceLayer<T>::fprop(bool is_train) { prop_common(true, is_train, get_gpu().get_stream()); } template <typename T> void SliceLayer<T>::bprop() { prop_common(false, true, get_gpu().get_stream()); } template <typename T> void SliceLayer<T>::prop_common(bool forward, bool is_train, hipStream_t stream) { CudaDeviceContext context(get_device_id()); int n_out_tensors = out_tensors_.size(); if (n_out_tensors == 2) { std::vector<OutParam> out_params = set_out_params(2); kernel_launch(forward, is_train, stream, out_params[0], out_params[1]); } else if (n_out_tensors == 3) { std::vector<OutParam> out_params = set_out_params(3); kernel_launch(forward, is_train, stream, out_params[0], out_params[1], out_params[2]); } else if (n_out_tensors == 4) { std::vector<OutParam> out_params = set_out_params(4); kernel_launch(forward, is_train, stream, out_params[0], out_params[1], out_params[2], out_params[3]); } else if (n_out_tensors == 5) { std::vector<OutParam> out_params = set_out_params(5); kernel_launch(forward, is_train, stream, out_params[0], out_params[1], out_params[2], out_params[3], out_params[4]); } else { CK_THROW_(Error_t::UnSupportedFormat, "Slicing into > 5 layers is not supported"); } #ifndef NDEBUG hipDeviceSynchronize(); CK_CUDA_THROW_(hipGetLastError()); #endif } template <typename T> std::vector<typename SliceLayer<T>::OutParam> SliceLayer<T>::set_out_params(int n) { std::vector<OutParam> out_params; for (int i = 0; i < n; i++) { Tensor2<T>& out_tensor = out_tensors_[i]; T* out = out_tensor.get_ptr(); int st = sts_[i]; int w = out_tensor.get_dimensions()[1]; out_params.push_back({out, st, st + w}); } return out_params; } template <typename T> template <typename... Args> void SliceLayer<T>::kernel_launch(bool forward, bool is_train, hipStream_t stream, Args&... args) { int block_size = 512; int n_blocks = get_gpu().get_sm_count() * 4; Tensor2<T>& in_tensor = get_in_tensors(is_train)[0]; T* in = in_tensor.get_ptr(); int h = in_tensor.get_dimensions()[0]; int in_w = in_tensor.get_dimensions()[1]; if (!forward) { hipLaunchKernelGGL(( initialize_array), dim3(n_blocks), dim3(block_size), 0, stream, in, h * in_w, T(0)); } hipLaunchKernelGGL(( slice_kernel), dim3(n_blocks), dim3(block_size), 0, stream, forward, in, h, in_w, virt_w_, args...); } template class SliceLayer<float>; template class SliceLayer<__half>; } // namespace HugeCTR
1c7da7ca9466d804ecbf969be9f1330cf117e90f.cu
/* * Copyright (c) 2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <common.hpp> #include <layers/slice_layer.hpp> #include <utils.cuh> #include <utils.hpp> #ifndef NDEBUG #include <iostream> #endif namespace HugeCTR { namespace { template <size_t length, typename T> __device__ int array_length(T (&arr)[length]) { return length; } template <typename T, typename... Args> __global__ void slice_kernel(bool forward, T* in, const int h, const int in_w, const int virt_w, const Args... args) { const typename SliceLayer<T>::OutParam out_params[] = {args...}; const int n_outs = array_length(out_params); for (int row = blockIdx.x; row < h; row += gridDim.x) { for (int k = 0; k < n_outs; k++) { int st = out_params[k].st; int ed = out_params[k].ed; int out_w = ed - st; for (int out_col = threadIdx.x; out_col < out_w; out_col += blockDim.x) { int in_col = out_col + st; int in_idx = row * in_w + in_col; int out_idx = row * out_w + out_col; T* out = out_params[k].out; if (forward) { out[out_idx] = in[in_idx]; } else { in[in_idx] += out[out_idx]; } } __syncthreads(); } } } } // anonymous namespace template <typename T> SliceLayer<T>::SliceLayer(const Tensor2<T>& in_tensor, Tensors2<T>& out_tensors, const std::shared_ptr<GeneralBuffer2<CudaAllocator>>& blobs_buff, std::vector<std::pair<int, int>>& ranges, const std::shared_ptr<GPUResource>& gpu_resource) : Layer(gpu_resource), virt_w_(0) { try { if (ranges.empty()) { CK_THROW_(Error_t::WrongInput, "Empty slice ranges is not allowed"); } if (!out_tensors.empty()) { CK_THROW_(Error_t::WrongInput, "output tensor vector must be empty"); } auto in_dims = in_tensor.get_dimensions(); if (in_dims.size() != 2) { CK_THROW_(Error_t::WrongInput, "Only 2D tensors can be concatenated"); } size_t height = in_dims[0]; int in_w = in_dims[1]; int prev_min = -1; int prev_max = 0; for (auto& range : ranges) { int cur_min = range.first; int cur_max = range.second; if (cur_min >= cur_max) { CK_THROW_(Error_t::WrongInput, "Reverse range is not allowed"); } if (cur_min < 0 || cur_max < 0) { CK_THROW_(Error_t::WrongInput, "Negative ranges cannot be allowed"); } if (!(prev_min <= cur_min && prev_max <= cur_max)) { CK_THROW_(Error_t::WrongInput, "A range cannot be out-order nor included in another"); } if (cur_min >= in_w || cur_max > in_w) { CK_THROW_(Error_t::WrongInput, "Ranges cannot be bigger than the input width"); } size_t out_w = cur_max - cur_min; std::vector<size_t> out_dims = {height, out_w}; { Tensor2<T> tensor; blobs_buff->reserve(out_dims, &tensor); out_tensors.push_back(tensor); } sts_.push_back(cur_min); virt_w_ += out_w; prev_min = cur_min; prev_max = cur_max; } in_tensors_.push_back(in_tensor); for (auto& out_tensor : out_tensors) { out_tensors_.push_back(out_tensor); } } catch (const std::runtime_error& rt_err) { std::cerr << rt_err.what() << std::endl; throw; } } template <typename T> void SliceLayer<T>::fprop(bool is_train) { prop_common(true, is_train, get_gpu().get_stream()); } template <typename T> void SliceLayer<T>::bprop() { prop_common(false, true, get_gpu().get_stream()); } template <typename T> void SliceLayer<T>::prop_common(bool forward, bool is_train, cudaStream_t stream) { CudaDeviceContext context(get_device_id()); int n_out_tensors = out_tensors_.size(); if (n_out_tensors == 2) { std::vector<OutParam> out_params = set_out_params(2); kernel_launch(forward, is_train, stream, out_params[0], out_params[1]); } else if (n_out_tensors == 3) { std::vector<OutParam> out_params = set_out_params(3); kernel_launch(forward, is_train, stream, out_params[0], out_params[1], out_params[2]); } else if (n_out_tensors == 4) { std::vector<OutParam> out_params = set_out_params(4); kernel_launch(forward, is_train, stream, out_params[0], out_params[1], out_params[2], out_params[3]); } else if (n_out_tensors == 5) { std::vector<OutParam> out_params = set_out_params(5); kernel_launch(forward, is_train, stream, out_params[0], out_params[1], out_params[2], out_params[3], out_params[4]); } else { CK_THROW_(Error_t::UnSupportedFormat, "Slicing into > 5 layers is not supported"); } #ifndef NDEBUG cudaDeviceSynchronize(); CK_CUDA_THROW_(cudaGetLastError()); #endif } template <typename T> std::vector<typename SliceLayer<T>::OutParam> SliceLayer<T>::set_out_params(int n) { std::vector<OutParam> out_params; for (int i = 0; i < n; i++) { Tensor2<T>& out_tensor = out_tensors_[i]; T* out = out_tensor.get_ptr(); int st = sts_[i]; int w = out_tensor.get_dimensions()[1]; out_params.push_back({out, st, st + w}); } return out_params; } template <typename T> template <typename... Args> void SliceLayer<T>::kernel_launch(bool forward, bool is_train, cudaStream_t stream, Args&... args) { int block_size = 512; int n_blocks = get_gpu().get_sm_count() * 4; Tensor2<T>& in_tensor = get_in_tensors(is_train)[0]; T* in = in_tensor.get_ptr(); int h = in_tensor.get_dimensions()[0]; int in_w = in_tensor.get_dimensions()[1]; if (!forward) { initialize_array<<<n_blocks, block_size, 0, stream>>>(in, h * in_w, T(0)); } slice_kernel<<<n_blocks, block_size, 0, stream>>>(forward, in, h, in_w, virt_w_, args...); } template class SliceLayer<float>; template class SliceLayer<__half>; } // namespace HugeCTR
6f058863282f63e773699aac3bd385e156538360.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "Prepare_1_MeansForJoin.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *input = NULL; hipMalloc(&input, XSIZE*YSIZE); int c_src1 = 1; int c_src2 = 1; int c_n = 1; float *delta = NULL; hipMalloc(&delta, XSIZE*YSIZE); int imageWidth = XSIZE; int imageHeight = YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( Prepare_1_MeansForJoin), dim3(gridBlock),dim3(threadBlock), 0, 0, input,c_src1,c_src2,c_n,delta,imageWidth,imageHeight); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( Prepare_1_MeansForJoin), dim3(gridBlock),dim3(threadBlock), 0, 0, input,c_src1,c_src2,c_n,delta,imageWidth,imageHeight); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( Prepare_1_MeansForJoin), dim3(gridBlock),dim3(threadBlock), 0, 0, input,c_src1,c_src2,c_n,delta,imageWidth,imageHeight); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
6f058863282f63e773699aac3bd385e156538360.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "Prepare_1_MeansForJoin.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *input = NULL; cudaMalloc(&input, XSIZE*YSIZE); int c_src1 = 1; int c_src2 = 1; int c_n = 1; float *delta = NULL; cudaMalloc(&delta, XSIZE*YSIZE); int imageWidth = XSIZE; int imageHeight = YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); Prepare_1_MeansForJoin<<<gridBlock,threadBlock>>>(input,c_src1,c_src2,c_n,delta,imageWidth,imageHeight); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { Prepare_1_MeansForJoin<<<gridBlock,threadBlock>>>(input,c_src1,c_src2,c_n,delta,imageWidth,imageHeight); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { Prepare_1_MeansForJoin<<<gridBlock,threadBlock>>>(input,c_src1,c_src2,c_n,delta,imageWidth,imageHeight); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
fdf3fc5189a45494061c3de540e4adc3325eaf21.hip
// !!! This is a file automatically generated by hipify!!! #include<iostream> #include<stdlib.h> #include<time.h> #include<math.h> #include <cmath> #include <hip/hip_runtime.h> using namespace std; #define ll long long int const int Block_Size = 1024; // This GPU kernel does blockwise in-place scan __global__ void Inclusive_Scan(ll *d_in, ll* d_out) { __shared__ ll sh_array[Block_Size]; int id = blockIdx.x * blockDim.x + threadIdx.x; int tid = threadIdx.x; int bid = blockIdx.x; // Copying data from global to shared memory sh_array[tid] = d_in[id]; __syncthreads(); // Performing block-wise in-place Hillis-Steele scan // In the ith step (starting from 0), to every element we add the element that is (2^i) places to its left, or retain the same element if it does not have so many places to its left for(int step = 1; step <= Block_Size; step *= 2) { if(tid >= step) { ll temp = sh_array[tid-step]; __syncthreads(); sh_array[tid] += temp; } __syncthreads(); } __syncthreads(); // Copying the scan result back into global memory d_in[id] = sh_array[tid]; // d_in now contains blockwise scan result __syncthreads(); // Storing the blockwise sums into d_out, i.e, we store the last element in each block of scanned array in its corresponding position in d_out if(tid == (Block_Size - 1)) d_out[bid] = d_in[id]; __syncthreads(); } // This GPU kernel adds the value d_out[id] to all values in the (id+1)th block of d_in __global__ void Add(ll* d_in, ll* d_out) { int id = blockIdx.x * blockDim.x + threadIdx.x; int bid = blockIdx.x; if(bid > 0) d_in[id] += d_out[bid-1]; __syncthreads(); } int main() { ll *h_in, *h_scan; int Size; cout << "Enter size of the array.\n"; cin >> Size; int Reduced_Size = (int)ceil(1.0*Size/Block_Size); // The number of blocks that we need to launch int Array_Bytes = Size * sizeof(ll); int Reduced_Array_Bytes = Reduced_Size * sizeof(ll); h_in = (ll*)malloc(Array_Bytes); h_scan = (ll*)malloc(Array_Bytes); // Populating array with random numbers srand(time(0)); for(ll i=0; i<Size; i++) { h_in[i] = rand()%10; } /*cout << "Input Array : \n"; for(ll i=0; i<Size; i++) cout << h_in[i] << " "; cout << endl;*/ ll *d_in, *d_out, *d_sum; // GPU Memory allocations hipMalloc((void**)&d_in, Reduced_Size*Block_Size*sizeof(ll)); // Padding the input array to the next multiple of Block_Size. // The scan algorithm is not dependent on elements past the end of the array, so we don't have to use a special case for the last block. hipMalloc((void**)&d_out, Reduced_Array_Bytes); hipMalloc((void**)&d_sum, sizeof(ll)); // Copying input array from CPU to GPU hipMemcpy(d_in, h_in, Array_Bytes, hipMemcpyHostToDevice); hipLaunchKernelGGL(( Inclusive_Scan) , dim3(Reduced_Size), dim3(Block_Size) , 0, 0, d_in, d_out); // After first kernel call, d_in has the blockwise scan results and d_out is an auxiliary array that has the blockwise sums // Second kernel call is done to scan the blockwise sums array // Then the ith value in the resultant scanned blockwise sums array is added to every value in the (i+1)th block // This addition step is done in the Add kernel // This is required only if size of the array is greater than the block size if(Size > Block_Size) { hipLaunchKernelGGL(( Inclusive_Scan) , dim3(1), dim3(Block_Size), 0, 0, d_out, d_sum); hipLaunchKernelGGL(( Add) , dim3(Reduced_Size), dim3(Block_Size) , 0, 0, d_in, d_out); } // Copying the result back to the CPU hipMemcpy(h_scan, d_in, Array_Bytes, hipMemcpyDeviceToHost); hipFree(d_in); hipFree(d_out); /*cout << "Inclusive Scan Array : \n"; for(ll i=0; i<Size; i++) cout << h_scan[i] << " "; cout << endl;*/ // CPU computation of the scan ll *pref; pref = (ll*)malloc(Array_Bytes); pref[0] = h_in[0]; for(ll i=1; i<Size; i++) pref[i] = pref[i-1] + h_in[i]; // Checking correctness of the result by comparing CPU and GPU results ll flag = 0; for(ll i=0; i<Size; i++) { if(h_scan[i] != pref[i]) { flag = 1; break; } } if(flag == 0) cout << "Result computed correctly!\n"; else cout << "Result wrong!\n"; }
fdf3fc5189a45494061c3de540e4adc3325eaf21.cu
#include<iostream> #include<stdlib.h> #include<time.h> #include<math.h> #include <cmath> #include <cuda.h> using namespace std; #define ll long long int const int Block_Size = 1024; // This GPU kernel does blockwise in-place scan __global__ void Inclusive_Scan(ll *d_in, ll* d_out) { __shared__ ll sh_array[Block_Size]; int id = blockIdx.x * blockDim.x + threadIdx.x; int tid = threadIdx.x; int bid = blockIdx.x; // Copying data from global to shared memory sh_array[tid] = d_in[id]; __syncthreads(); // Performing block-wise in-place Hillis-Steele scan // In the ith step (starting from 0), to every element we add the element that is (2^i) places to its left, or retain the same element if it does not have so many places to its left for(int step = 1; step <= Block_Size; step *= 2) { if(tid >= step) { ll temp = sh_array[tid-step]; __syncthreads(); sh_array[tid] += temp; } __syncthreads(); } __syncthreads(); // Copying the scan result back into global memory d_in[id] = sh_array[tid]; // d_in now contains blockwise scan result __syncthreads(); // Storing the blockwise sums into d_out, i.e, we store the last element in each block of scanned array in its corresponding position in d_out if(tid == (Block_Size - 1)) d_out[bid] = d_in[id]; __syncthreads(); } // This GPU kernel adds the value d_out[id] to all values in the (id+1)th block of d_in __global__ void Add(ll* d_in, ll* d_out) { int id = blockIdx.x * blockDim.x + threadIdx.x; int bid = blockIdx.x; if(bid > 0) d_in[id] += d_out[bid-1]; __syncthreads(); } int main() { ll *h_in, *h_scan; int Size; cout << "Enter size of the array.\n"; cin >> Size; int Reduced_Size = (int)ceil(1.0*Size/Block_Size); // The number of blocks that we need to launch int Array_Bytes = Size * sizeof(ll); int Reduced_Array_Bytes = Reduced_Size * sizeof(ll); h_in = (ll*)malloc(Array_Bytes); h_scan = (ll*)malloc(Array_Bytes); // Populating array with random numbers srand(time(0)); for(ll i=0; i<Size; i++) { h_in[i] = rand()%10; } /*cout << "Input Array : \n"; for(ll i=0; i<Size; i++) cout << h_in[i] << " "; cout << endl;*/ ll *d_in, *d_out, *d_sum; // GPU Memory allocations cudaMalloc((void**)&d_in, Reduced_Size*Block_Size*sizeof(ll)); // Padding the input array to the next multiple of Block_Size. // The scan algorithm is not dependent on elements past the end of the array, so we don't have to use a special case for the last block. cudaMalloc((void**)&d_out, Reduced_Array_Bytes); cudaMalloc((void**)&d_sum, sizeof(ll)); // Copying input array from CPU to GPU cudaMemcpy(d_in, h_in, Array_Bytes, cudaMemcpyHostToDevice); Inclusive_Scan <<< Reduced_Size, Block_Size >>> (d_in, d_out); // After first kernel call, d_in has the blockwise scan results and d_out is an auxiliary array that has the blockwise sums // Second kernel call is done to scan the blockwise sums array // Then the ith value in the resultant scanned blockwise sums array is added to every value in the (i+1)th block // This addition step is done in the Add kernel // This is required only if size of the array is greater than the block size if(Size > Block_Size) { Inclusive_Scan <<< 1, Block_Size>>> (d_out, d_sum); Add <<< Reduced_Size, Block_Size >>> (d_in, d_out); } // Copying the result back to the CPU cudaMemcpy(h_scan, d_in, Array_Bytes, cudaMemcpyDeviceToHost); cudaFree(d_in); cudaFree(d_out); /*cout << "Inclusive Scan Array : \n"; for(ll i=0; i<Size; i++) cout << h_scan[i] << " "; cout << endl;*/ // CPU computation of the scan ll *pref; pref = (ll*)malloc(Array_Bytes); pref[0] = h_in[0]; for(ll i=1; i<Size; i++) pref[i] = pref[i-1] + h_in[i]; // Checking correctness of the result by comparing CPU and GPU results ll flag = 0; for(ll i=0; i<Size; i++) { if(h_scan[i] != pref[i]) { flag = 1; break; } } if(flag == 0) cout << "Result computed correctly!\n"; else cout << "Result wrong!\n"; }
2345098f79059f59afd7d0cede63ecc88db6df41.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void cumo_na_index_aref_naview_index_stride_last_kernel(size_t *idx, ssize_t s1, size_t last, uint64_t n) { for (uint64_t i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) { idx[i] = (last - idx[i]) * s1; } }
2345098f79059f59afd7d0cede63ecc88db6df41.cu
#include "includes.h" __global__ void cumo_na_index_aref_naview_index_stride_last_kernel(size_t *idx, ssize_t s1, size_t last, uint64_t n) { for (uint64_t i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) { idx[i] = (last - idx[i]) * s1; } }
075288fd9dc3bfc84add4968d6a5f92e21147bf3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // SDSC SCC Training - GPU Computing and Programming // Jan 24, 2020 // Andreas Goetz (agoetz@sdsc.edu) // CUDA program to add two vectors in parallel on the GPU // version 2: // launch a fixed number of blocks and threads // #include<stdio.h> // define vector length, number of blocks NBL and threads per block TPB #define N (255*4096) #define NBL 256 #define TPB 128 // // CUDA device function that adds two integer vectors // __global__ void add(int *a, int *b, int *c, int n){ int tid = threadIdx.x + blockIdx.x * blockDim.x; int stride = gridDim.x * blockDim.x; for (int i = tid; i < n; i += stride) { c[i] = a[i] + b[i]; } } // // main program // int main(void){ int *h_a, *h_b, *h_c; int *d_a, *d_b, *d_c; int size = N * sizeof(int); int i, err; // allocate host memory h_a = (int *) malloc(size); h_b = (int *) malloc(size); h_c = (int *) malloc(size); // allocate device memory hipMalloc((void **)&d_a, size); hipMalloc((void **)&d_b, size); hipMalloc((void **)&d_c, size); // initialize vectors for (i=0; i<N; i++){ h_a[i] = i+1; h_b[i] = i+1; } // copy input data to device hipMemcpy(d_a, h_a, size, hipMemcpyHostToDevice); hipMemcpy(d_b, h_b, size, hipMemcpyHostToDevice); // add vectors by launching a sufficient number of blocks of the add() kernel printf("\nLaunching vector addition kernel...\n"); printf("Vector length = %d\n",N); printf("Blocks = %d\n",NBL); printf("Threads per block = %d\n",TPB); printf("Kernel copies = %d\n",NBL*TPB); hipLaunchKernelGGL(( add), dim3(NBL),dim3(TPB), 0, 0, d_a, d_b, d_c, N); // copy results back to host hipMemcpy(h_c, d_c, size, hipMemcpyDeviceToHost); // deallocate memory hipFree(d_a); hipFree(d_b); hipFree(d_c); // check results err = 0; for (i=0; i<N; i++){ if (h_c[i] != 2*(i+1)) err += 1; } if (err != 0){ printf("\n Error, %d elements do not match!\n\n", err); } else { printf("\n Success! All elements match.\n\n"); } // deallocate host memory free(h_a); free(h_b); free(h_c); return err; }
075288fd9dc3bfc84add4968d6a5f92e21147bf3.cu
// SDSC SCC Training - GPU Computing and Programming // Jan 24, 2020 // Andreas Goetz (agoetz@sdsc.edu) // CUDA program to add two vectors in parallel on the GPU // version 2: // launch a fixed number of blocks and threads // #include<stdio.h> // define vector length, number of blocks NBL and threads per block TPB #define N (255*4096) #define NBL 256 #define TPB 128 // // CUDA device function that adds two integer vectors // __global__ void add(int *a, int *b, int *c, int n){ int tid = threadIdx.x + blockIdx.x * blockDim.x; int stride = gridDim.x * blockDim.x; for (int i = tid; i < n; i += stride) { c[i] = a[i] + b[i]; } } // // main program // int main(void){ int *h_a, *h_b, *h_c; int *d_a, *d_b, *d_c; int size = N * sizeof(int); int i, err; // allocate host memory h_a = (int *) malloc(size); h_b = (int *) malloc(size); h_c = (int *) malloc(size); // allocate device memory cudaMalloc((void **)&d_a, size); cudaMalloc((void **)&d_b, size); cudaMalloc((void **)&d_c, size); // initialize vectors for (i=0; i<N; i++){ h_a[i] = i+1; h_b[i] = i+1; } // copy input data to device cudaMemcpy(d_a, h_a, size, cudaMemcpyHostToDevice); cudaMemcpy(d_b, h_b, size, cudaMemcpyHostToDevice); // add vectors by launching a sufficient number of blocks of the add() kernel printf("\nLaunching vector addition kernel...\n"); printf("Vector length = %d\n",N); printf("Blocks = %d\n",NBL); printf("Threads per block = %d\n",TPB); printf("Kernel copies = %d\n",NBL*TPB); add<<<NBL,TPB>>>(d_a, d_b, d_c, N); // copy results back to host cudaMemcpy(h_c, d_c, size, cudaMemcpyDeviceToHost); // deallocate memory cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); // check results err = 0; for (i=0; i<N; i++){ if (h_c[i] != 2*(i+1)) err += 1; } if (err != 0){ printf("\n Error, %d elements do not match!\n\n", err); } else { printf("\n Success! All elements match.\n\n"); } // deallocate host memory free(h_a); free(h_b); free(h_c); return err; }
5b44a96db54569d8b38d5ef585e3b0e23f0d9c91.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" __device__ float sigmoid(float x) { return 1 / (1 + expf(-x)); } extern "C" __global__ void produceState2(const float* arguments, const int argsSize, const float* weights, const int* topology, const int topSize, float* outStates) { const int tid = threadIdx.x; const int dim = argsSize + topSize; extern __shared__ float s[]; float* states = s; bool* ready = (bool*)&states[dim]; __shared__ int counter[1]; int r = tid; while(r < dim) { ready[r] = false; r += blockDim.x; } if (tid == 0) { counter[tid] = argsSize; } if (tid < argsSize) { states[tid] = arguments[tid]; ready[tid] = true; } __syncthreads(); while(counter[0] < dim) { const int index = counter[0] + tid; const int topIndex = index - argsSize; if (topIndex < topSize) { const int leftBorder = topology[topIndex*3]; const int rightBorder = topology[topIndex*3 + 1]; const int weightsStart = topology[topIndex*3 + 2]; if (rightBorder <= counter[0]) { float sum = 0; for (int i = leftBorder; i < rightBorder; i++) { sum += states[i] * weights[weightsStart + i - leftBorder]; } states[index] = sigmoid(sum); ready[index] = true; } } __syncthreads(); if (tid == 0) { int total = counter[0]; for (int i = total; i < total + blockDim.x && i < dim; i++) { if (ready[i]) { counter[0]++; } } } __syncthreads(); } int n = tid; while(n < dim) { outStates[n] = states[n]; n += blockDim.x; } } extern "C" __global__ void produceState3(const float* arguments, const int argsSize, const float* weights, const int* topology, const int topSize, float* outStates) { const int tid = threadIdx.x; const int dim = argsSize + topSize; extern __shared__ float s[]; float* states = s; int* iters = (int*)&states[dim]; if (tid < argsSize) { states[tid] = arguments[tid]; iters[tid] = 1; } else { iters[tid] = 0; } __syncthreads(); while(iters[tid] * blockDim.x + tid < dim) { const int index = iters[tid] * blockDim.x + tid; const int topIndex = index - argsSize; const int leftBorder = topology[topIndex*3]; const int rightBorder = topology[topIndex*3 + 1]; const int weightsStart = topology[topIndex*3 + 2]; bool canStart = true; for (int i = leftBorder; i < rightBorder; i++) { int threadId = i % blockDim.x; int mustCounted = i / blockDim.x + 1; if (iters[threadId] < mustCounted) { canStart = false; break; } } if (canStart) { float sum = 0; for (int i = leftBorder; i < rightBorder; i++) { sum += states[i] * weights[weightsStart + i - leftBorder]; } states[index] = sigmoid(sum); iters[tid]++; } __syncthreads(); } __syncthreads(); int n = tid; while(n < dim) { outStates[n] = states[n]; n += blockDim.x; } }
5b44a96db54569d8b38d5ef585e3b0e23f0d9c91.cu
__device__ float sigmoid(float x) { return 1 / (1 + expf(-x)); } extern "C" __global__ void produceState2(const float* arguments, const int argsSize, const float* weights, const int* topology, const int topSize, float* outStates) { const int tid = threadIdx.x; const int dim = argsSize + topSize; extern __shared__ float s[]; float* states = s; bool* ready = (bool*)&states[dim]; __shared__ int counter[1]; int r = tid; while(r < dim) { ready[r] = false; r += blockDim.x; } if (tid == 0) { counter[tid] = argsSize; } if (tid < argsSize) { states[tid] = arguments[tid]; ready[tid] = true; } __syncthreads(); while(counter[0] < dim) { const int index = counter[0] + tid; const int topIndex = index - argsSize; if (topIndex < topSize) { const int leftBorder = topology[topIndex*3]; const int rightBorder = topology[topIndex*3 + 1]; const int weightsStart = topology[topIndex*3 + 2]; if (rightBorder <= counter[0]) { float sum = 0; for (int i = leftBorder; i < rightBorder; i++) { sum += states[i] * weights[weightsStart + i - leftBorder]; } states[index] = sigmoid(sum); ready[index] = true; } } __syncthreads(); if (tid == 0) { int total = counter[0]; for (int i = total; i < total + blockDim.x && i < dim; i++) { if (ready[i]) { counter[0]++; } } } __syncthreads(); } int n = tid; while(n < dim) { outStates[n] = states[n]; n += blockDim.x; } } extern "C" __global__ void produceState3(const float* arguments, const int argsSize, const float* weights, const int* topology, const int topSize, float* outStates) { const int tid = threadIdx.x; const int dim = argsSize + topSize; extern __shared__ float s[]; float* states = s; int* iters = (int*)&states[dim]; if (tid < argsSize) { states[tid] = arguments[tid]; iters[tid] = 1; } else { iters[tid] = 0; } __syncthreads(); while(iters[tid] * blockDim.x + tid < dim) { const int index = iters[tid] * blockDim.x + tid; const int topIndex = index - argsSize; const int leftBorder = topology[topIndex*3]; const int rightBorder = topology[topIndex*3 + 1]; const int weightsStart = topology[topIndex*3 + 2]; bool canStart = true; for (int i = leftBorder; i < rightBorder; i++) { int threadId = i % blockDim.x; int mustCounted = i / blockDim.x + 1; if (iters[threadId] < mustCounted) { canStart = false; break; } } if (canStart) { float sum = 0; for (int i = leftBorder; i < rightBorder; i++) { sum += states[i] * weights[weightsStart + i - leftBorder]; } states[index] = sigmoid(sum); iters[tid]++; } __syncthreads(); } __syncthreads(); int n = tid; while(n < dim) { outStates[n] = states[n]; n += blockDim.x; } }
40b1dc47b5992f28b18c4eb99f2a4d81fc9198fc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //source: http://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#shared-memory // Matrices are stored in row-major order: // M(row, col) = *(M.elements + row * M.width + col) typedef struct { int width; int height; float* elements; } Matrix; // Thread block size #define BLOCK_SIZE 16 // Forward declaration of the matrix multiplication kernel __global__ void MatMulKernel(const Matrix, const Matrix, Matrix); // Matrix multiplication - Host code // Matrix dimensions are assumed to be multiples of BLOCK_SIZE void MatMul(const Matrix A, const Matrix B, Matrix C) { // Load A and B to device memory Matrix d_A; d_A.width = A.width; d_A.height = A.height; size_t size = A.width * A.height * sizeof(float); hipMalloc(&d_A.elements, size); hipMemcpy(d_A.elements, A.elements, size, hipMemcpyHostToDevice); Matrix d_B; d_B.width = B.width; d_B.height = B.height; size = B.width * B.height * sizeof(float); hipMalloc(&d_B.elements, size); hipMemcpy(d_B.elements, B.elements, size, hipMemcpyHostToDevice); // Allocate C in device memory Matrix d_C; d_C.width = C.width; d_C.height = C.height; size = C.width * C.height * sizeof(float); hipMalloc(&d_C.elements, size); // Invoke kernel dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 dimGrid(B.width / dimBlock.x, A.height / dimBlock.y); hipLaunchKernelGGL(( MatMulKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_A, d_B, d_C); // Read C from device memory hipMemcpy(C.elements, d_C.elements, size, hipMemcpyDeviceToHost); // Free device memory hipFree(d_A.elements); hipFree(d_B.elements); hipFree(d_C.elements); } // Matrix multiplication kernel called by MatMul() __global__ void MatMulKernel(Matrix A, Matrix B, Matrix C) { // Each thread computes one element of C // by accumulating results into Cvalue float Cvalue = 0; int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; for (int e = 0; e < A.width; ++e) Cvalue += A.elements[row * A.width + e] * B.elements[e * B.width + col]; C.elements[row * C.width + col] = Cvalue; }
40b1dc47b5992f28b18c4eb99f2a4d81fc9198fc.cu
//source: http://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#shared-memory // Matrices are stored in row-major order: // M(row, col) = *(M.elements + row * M.width + col) typedef struct { int width; int height; float* elements; } Matrix; // Thread block size #define BLOCK_SIZE 16 // Forward declaration of the matrix multiplication kernel __global__ void MatMulKernel(const Matrix, const Matrix, Matrix); // Matrix multiplication - Host code // Matrix dimensions are assumed to be multiples of BLOCK_SIZE void MatMul(const Matrix A, const Matrix B, Matrix C) { // Load A and B to device memory Matrix d_A; d_A.width = A.width; d_A.height = A.height; size_t size = A.width * A.height * sizeof(float); cudaMalloc(&d_A.elements, size); cudaMemcpy(d_A.elements, A.elements, size, cudaMemcpyHostToDevice); Matrix d_B; d_B.width = B.width; d_B.height = B.height; size = B.width * B.height * sizeof(float); cudaMalloc(&d_B.elements, size); cudaMemcpy(d_B.elements, B.elements, size, cudaMemcpyHostToDevice); // Allocate C in device memory Matrix d_C; d_C.width = C.width; d_C.height = C.height; size = C.width * C.height * sizeof(float); cudaMalloc(&d_C.elements, size); // Invoke kernel dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 dimGrid(B.width / dimBlock.x, A.height / dimBlock.y); MatMulKernel<<<dimGrid, dimBlock>>>(d_A, d_B, d_C); // Read C from device memory cudaMemcpy(C.elements, d_C.elements, size, cudaMemcpyDeviceToHost); // Free device memory cudaFree(d_A.elements); cudaFree(d_B.elements); cudaFree(d_C.elements); } // Matrix multiplication kernel called by MatMul() __global__ void MatMulKernel(Matrix A, Matrix B, Matrix C) { // Each thread computes one element of C // by accumulating results into Cvalue float Cvalue = 0; int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; for (int e = 0; e < A.width; ++e) Cvalue += A.elements[row * A.width + e] * B.elements[e * B.width + col]; C.elements[row * C.width + col] = Cvalue; }
87b2c35ccd2018bd8a778648bfd18bd03e85b174.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #include "Split.hpp" #include <hip/hip_fp16.h> #include <cassert> nvinfer1::Dims SplitPlugin::getOutputDimensions(int index, const nvinfer1::Dims *inputDims, int nbInputs) { assert(nbInputs == 1); assert(index < this->getNbOutputs()); nvinfer1::Dims const& input_dims = inputDims[0]; nvinfer1::Dims output_dims = input_dims; output_dims.d[_axis] = _output_lengths.at(index); return output_dims; } int SplitPlugin::initialize() { std::vector<int> segment_offsets(1, 0); for( int i=0; i<this->getNbOutputs(); ++i ) { segment_offsets.push_back(segment_offsets.back() + _output_lengths[i]); } _d_segment_offsets = segment_offsets; nvinfer1::Dims dims = this->getInputDims(0); _nx = 1; for( int i=dims.nbDims-1; i>_axis; --i ) { _nx *= dims.d[i]; } _ny = dims.d[_axis]; _nz = 1; for( int i=_axis-1; i>=0; --i ) { _nz *= dims.d[i]; } _d_output_ptrs.resize(this->getNbOutputs(), nullptr); return 0; } template<typename T> __device__ int upper_bound(T const* vals, int n, T const& key) { int i = 0; while( n > 0 ) { int m = n / 2; int j = i + m; if( !(key < vals[j]) ) { i = j + 1; n -= m + 1; } else { n = m; } } return i; } template<typename T> __global__ void split_kernel(int nsegment, int const* __restrict__ segment_offsets, T const* __restrict__ idata, T* const* odatas, int nx, int src_ny, int nz) { int x0 = threadIdx.x + blockIdx.x * blockDim.x; int src_y0 = threadIdx.y + blockIdx.y * blockDim.y; int z0 = threadIdx.z + blockIdx.z * blockDim.z; for( int z=z0; z<nz; z+=blockDim.z*gridDim.z ) { for( int src_y=src_y0; src_y<src_ny; src_y+=blockDim.y*gridDim.y ) { for( int x=x0; x<nx; x+=blockDim.x*gridDim.x ) { int segment = upper_bound(segment_offsets, nsegment, src_y) - 1; int dst_y = src_y - segment_offsets[segment]; int dst_ny = segment_offsets[segment + 1] - segment_offsets[segment]; odatas[segment][x + nx*(dst_y + dst_ny*z)] = idata[x + nx*(src_y + src_ny*z)]; } } } } int SplitPlugin::enqueue(int batchSize, const void *const *inputs, void **outputs, void *workspace, hipStream_t stream) { auto const& input_dims = this->getInputDims(0); int const* d_segment_offsets_ptr = thrust::raw_pointer_cast(&_d_segment_offsets[0]); float const* idata = reinterpret_cast<float const*>(inputs[0]); float* const* h_odatas = reinterpret_cast<float* const*>(outputs); float** odatas = thrust::raw_pointer_cast(&_d_output_ptrs[0]); hipError_t cuda_status = hipMemcpyAsync(odatas, h_odatas, _d_output_ptrs.size() * sizeof(float*), hipMemcpyHostToDevice, stream); if( cuda_status != hipSuccess ) { return 1; } int nz = _nz * batchSize; dim3 block(32, 16); dim3 grid(::min((_nx - 1) / block.x + 1, 65535u), ::min((_ny - 1) / block.y + 1, 65535u), ::min((_nz - 1) / block.z + 1, 65535u)); if (getDataType()==nvinfer1::DataType::kFLOAT) { hipLaunchKernelGGL(( split_kernel), dim3(grid), dim3(block), 0, stream, _d_segment_offsets.size(), d_segment_offsets_ptr, idata, odatas, _nx, _ny, nz); } else { hipLaunchKernelGGL(( split_kernel), dim3(grid), dim3(block), 0, stream, _d_segment_offsets.size(), d_segment_offsets_ptr, (__half const*)idata, (__half**)odatas, _nx, _ny, nz); } return hipGetLastError() != hipSuccess; }
87b2c35ccd2018bd8a778648bfd18bd03e85b174.cu
/* * Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #include "Split.hpp" #include <cuda_fp16.h> #include <cassert> nvinfer1::Dims SplitPlugin::getOutputDimensions(int index, const nvinfer1::Dims *inputDims, int nbInputs) { assert(nbInputs == 1); assert(index < this->getNbOutputs()); nvinfer1::Dims const& input_dims = inputDims[0]; nvinfer1::Dims output_dims = input_dims; output_dims.d[_axis] = _output_lengths.at(index); return output_dims; } int SplitPlugin::initialize() { std::vector<int> segment_offsets(1, 0); for( int i=0; i<this->getNbOutputs(); ++i ) { segment_offsets.push_back(segment_offsets.back() + _output_lengths[i]); } _d_segment_offsets = segment_offsets; nvinfer1::Dims dims = this->getInputDims(0); _nx = 1; for( int i=dims.nbDims-1; i>_axis; --i ) { _nx *= dims.d[i]; } _ny = dims.d[_axis]; _nz = 1; for( int i=_axis-1; i>=0; --i ) { _nz *= dims.d[i]; } _d_output_ptrs.resize(this->getNbOutputs(), nullptr); return 0; } template<typename T> __device__ int upper_bound(T const* vals, int n, T const& key) { int i = 0; while( n > 0 ) { int m = n / 2; int j = i + m; if( !(key < vals[j]) ) { i = j + 1; n -= m + 1; } else { n = m; } } return i; } template<typename T> __global__ void split_kernel(int nsegment, int const* __restrict__ segment_offsets, T const* __restrict__ idata, T* const* odatas, int nx, int src_ny, int nz) { int x0 = threadIdx.x + blockIdx.x * blockDim.x; int src_y0 = threadIdx.y + blockIdx.y * blockDim.y; int z0 = threadIdx.z + blockIdx.z * blockDim.z; for( int z=z0; z<nz; z+=blockDim.z*gridDim.z ) { for( int src_y=src_y0; src_y<src_ny; src_y+=blockDim.y*gridDim.y ) { for( int x=x0; x<nx; x+=blockDim.x*gridDim.x ) { int segment = upper_bound(segment_offsets, nsegment, src_y) - 1; int dst_y = src_y - segment_offsets[segment]; int dst_ny = segment_offsets[segment + 1] - segment_offsets[segment]; odatas[segment][x + nx*(dst_y + dst_ny*z)] = idata[x + nx*(src_y + src_ny*z)]; } } } } int SplitPlugin::enqueue(int batchSize, const void *const *inputs, void **outputs, void *workspace, cudaStream_t stream) { auto const& input_dims = this->getInputDims(0); int const* d_segment_offsets_ptr = thrust::raw_pointer_cast(&_d_segment_offsets[0]); float const* idata = reinterpret_cast<float const*>(inputs[0]); float* const* h_odatas = reinterpret_cast<float* const*>(outputs); float** odatas = thrust::raw_pointer_cast(&_d_output_ptrs[0]); cudaError_t cuda_status = cudaMemcpyAsync(odatas, h_odatas, _d_output_ptrs.size() * sizeof(float*), cudaMemcpyHostToDevice, stream); if( cuda_status != cudaSuccess ) { return 1; } int nz = _nz * batchSize; dim3 block(32, 16); dim3 grid(std::min((_nx - 1) / block.x + 1, 65535u), std::min((_ny - 1) / block.y + 1, 65535u), std::min((_nz - 1) / block.z + 1, 65535u)); if (getDataType()==nvinfer1::DataType::kFLOAT) { split_kernel<<<grid, block, 0, stream>>> (_d_segment_offsets.size(), d_segment_offsets_ptr, idata, odatas, _nx, _ny, nz); } else { split_kernel<<<grid, block, 0, stream>>> (_d_segment_offsets.size(), d_segment_offsets_ptr, (__half const*)idata, (__half**)odatas, _nx, _ny, nz); } return cudaGetLastError() != cudaSuccess; }
5147c14fd5a4a3671ddf1c4278099b8b4b5907a5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "caffe2/operators/max_pool_with_index_gpu.h" #include "caffe2/utils/conversions.h" namespace caffe2 { namespace { /*** * Note: CUDA kernels are minor changes from those at: * https://github.com/BVLC/caffe/blob/master/src/caffe/layers/pooling_layer.cu * Originally licensed under BSD **/ template <typename Dtype> __global__ void MaxPoolForward( const int nthreads, const Dtype* const bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, Dtype* const top_data, int* mask) { CUDA_1D_KERNEL_LOOP(index, nthreads) { const int pw = index % pooled_width; const int ph = (index / pooled_width) % pooled_height; const int c = (index / pooled_width / pooled_height) % channels; const int n = index / pooled_width / pooled_height / channels; int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; const int hend = min(hstart + kernel_h, height); const int wend = min(wstart + kernel_w, width); hstart = max(hstart, 0); wstart = max(wstart, 0); float maxval = -FLT_MAX; int maxidx = -1; const Dtype* const bottom_slice = bottom_data + (n * channels + c) * height * width; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { if (convert::To<Dtype, float>(bottom_slice[h * width + w]) > maxval) { maxidx = h * width + w; maxval = convert::To<Dtype, float>(bottom_slice[maxidx]); } } } top_data[index] = convert::To<float, Dtype>(maxval); mask[index] = maxidx; } } template <typename Dtype> __global__ void MaxPoolBackward( const int nthreads, const Dtype* const top_diff, const int* const mask, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, Dtype* const bottom_diff) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset const int w = index % width; const int h = (index / width) % height; const int c = (index / width / height) % channels; const int n = index / width / height / channels; const int phstart = (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1; const int phend = min((h + pad_h) / stride_h + 1, pooled_height); const int pwstart = (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1; const int pwend = min((w + pad_w) / stride_w + 1, pooled_width); float gradient = 0; const int offset = (n * channels + c) * pooled_height * pooled_width; const Dtype* const top_diff_slice = top_diff + offset; const int* const mask_slice = mask + offset; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { if (mask_slice[ph * pooled_width + pw] == h * width + w) { gradient += convert::To<Dtype, float>(top_diff_slice[ph * pooled_width + pw]); } } } bottom_diff[index] = convert::To<float, Dtype>(gradient); } } }; template <typename T> bool MaxPoolWithIndexOp::DoRunWithType() { auto& X = Input(0); auto* mask = Output(1); auto sizes = ConvPoolOpBase<CUDAContext>::GetOutputSize(X, X.dim32(1)); auto* Y = Output(0, sizes, at::dtype<T>()); int output_size = Y->size(); mask->Resize(output_size); hipLaunchKernelGGL(( MaxPoolForward<T>) , dim3(CAFFE_GET_BLOCKS(output_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), output_size, X.data<T>(), X.dim32(0), X.dim32(1), X.dim32(2), X.dim32(3), Y->dim32(2), Y->dim32(3), kernel_h(), kernel_w(), stride_h(), stride_w(), pad_t(), pad_l(), Y->template mutable_data<T>(), mask->template mutable_data<int>()); return true; } bool MaxPoolWithIndexOp::RunOnDevice() { auto& X = Input(0); CAFFE_ENFORCE(X.ndim() == 4, "Operator only supports 4D tensors"); if (X.IsType<float>()) { return DoRunWithType<float>(); } else if (X.IsType<at::Half>()) { return DoRunWithType<at::Half>(); } else { CAFFE_THROW("Unsupported input type"); } } template <typename T> bool MaxPoolWithIndexGradientOp::DoRunWithType() { auto& X = Input(0); auto& dY = Input(1); auto& mask = Input(2); auto* dX = Output(0); CAFFE_ENFORCE(X.ndim() == 4, "Operator only supports 4D tensors"); dX->ResizeLike(X); ConvPoolOpBase<CUDAContext>::ComputePads(vector<int>{X.dim32(2), X.dim32(3)}); hipLaunchKernelGGL(( MaxPoolBackward<T>), dim3(CAFFE_GET_BLOCKS(X.size())), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), X.size(), dY.data<T>(), mask.data<int>(), X.dim32(0), X.dim32(1), X.dim32(2), X.dim32(3), dY.dim32(2), dY.dim32(3), kernel_h(), kernel_w(), stride_h(), stride_w(), pad_t(), pad_l(), dX->template mutable_data<T>()); return true; } bool MaxPoolWithIndexGradientOp::RunOnDevice() { auto& X = Input(0); if (X.IsType<float>()) { return DoRunWithType<float>(); } else if (X.IsType<at::Half>()) { return DoRunWithType<at::Half>(); } else { CAFFE_THROW("Unsupported input type"); } } namespace { REGISTER_CUDA_OPERATOR(MaxPoolWithIndex, MaxPoolWithIndexOp); REGISTER_CUDA_OPERATOR(MaxPoolWithIndexGradient, MaxPoolWithIndexGradientOp); class GetMaxPoolWithIndexGradient : public GradientMakerBase { using GradientMakerBase::GradientMakerBase; vector<OperatorDef> GetGradientDefs() override { return SingleGradientDef( "MaxPoolWithIndexGradient", "", vector<string>{I(0), GO(0), O(1)}, vector<string>{GI(0)}); } }; REGISTER_GRADIENT(MaxPoolWithIndex, GetMaxPoolWithIndexGradient); OPERATOR_SCHEMA(MaxPoolWithIndexGradient); OPERATOR_SCHEMA(MaxPoolWithIndex) .NumInputs(1) .NumOutputs(2) .TensorInferenceFunction(ConvPoolOpBase<CPUContext>::TensorInferenceForPool) .SetDoc(R"DOC( MaxPoolWithIndex consumes an input blob X and applies max pooling across the blob according to kernel sizes, stride sizes and pad lengths defined by the ConvPoolOpBase operator. It also produces an explicit mask that defines the location that all maximum values were found, which is re-used in the gradient pass. This op is deterministic. )DOC") .Input( 0, "X", "Input data tensor from the previous operator; dimensions " "depend on whether the NCHW or NHWC operators are being used. For " "example, in the former, the input has size (N x C x H x W), where N is" " the batch size, C is the number of channels, and H and W are the " "height and the width of the data. The corresponding permutation of " "dimensions is used in the latter case. ") .Output( 0, "Y", "Output data tensor from average pooling across the input " "tensor. Dimensions will vary based on various kernel, stride, and pad " "sizes.") .Output( 1, "Index", "Mask of location indices of the found maximum values, " " used in the gradient operator to accumulate dY values to the " "appropriate locations in Y"); }; }; // namespace caffe2
5147c14fd5a4a3671ddf1c4278099b8b4b5907a5.cu
#include "caffe2/operators/max_pool_with_index_gpu.h" #include "caffe2/utils/conversions.h" namespace caffe2 { namespace { /*** * Note: CUDA kernels are minor changes from those at: * https://github.com/BVLC/caffe/blob/master/src/caffe/layers/pooling_layer.cu * Originally licensed under BSD **/ template <typename Dtype> __global__ void MaxPoolForward( const int nthreads, const Dtype* const bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, Dtype* const top_data, int* mask) { CUDA_1D_KERNEL_LOOP(index, nthreads) { const int pw = index % pooled_width; const int ph = (index / pooled_width) % pooled_height; const int c = (index / pooled_width / pooled_height) % channels; const int n = index / pooled_width / pooled_height / channels; int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; const int hend = min(hstart + kernel_h, height); const int wend = min(wstart + kernel_w, width); hstart = max(hstart, 0); wstart = max(wstart, 0); float maxval = -FLT_MAX; int maxidx = -1; const Dtype* const bottom_slice = bottom_data + (n * channels + c) * height * width; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { if (convert::To<Dtype, float>(bottom_slice[h * width + w]) > maxval) { maxidx = h * width + w; maxval = convert::To<Dtype, float>(bottom_slice[maxidx]); } } } top_data[index] = convert::To<float, Dtype>(maxval); mask[index] = maxidx; } } template <typename Dtype> __global__ void MaxPoolBackward( const int nthreads, const Dtype* const top_diff, const int* const mask, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, Dtype* const bottom_diff) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset const int w = index % width; const int h = (index / width) % height; const int c = (index / width / height) % channels; const int n = index / width / height / channels; const int phstart = (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1; const int phend = min((h + pad_h) / stride_h + 1, pooled_height); const int pwstart = (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1; const int pwend = min((w + pad_w) / stride_w + 1, pooled_width); float gradient = 0; const int offset = (n * channels + c) * pooled_height * pooled_width; const Dtype* const top_diff_slice = top_diff + offset; const int* const mask_slice = mask + offset; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { if (mask_slice[ph * pooled_width + pw] == h * width + w) { gradient += convert::To<Dtype, float>(top_diff_slice[ph * pooled_width + pw]); } } } bottom_diff[index] = convert::To<float, Dtype>(gradient); } } }; template <typename T> bool MaxPoolWithIndexOp::DoRunWithType() { auto& X = Input(0); auto* mask = Output(1); auto sizes = ConvPoolOpBase<CUDAContext>::GetOutputSize(X, X.dim32(1)); auto* Y = Output(0, sizes, at::dtype<T>()); int output_size = Y->size(); mask->Resize(output_size); MaxPoolForward<T> <<<CAFFE_GET_BLOCKS(output_size), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( output_size, X.data<T>(), X.dim32(0), X.dim32(1), X.dim32(2), X.dim32(3), Y->dim32(2), Y->dim32(3), kernel_h(), kernel_w(), stride_h(), stride_w(), pad_t(), pad_l(), Y->template mutable_data<T>(), mask->template mutable_data<int>()); return true; } bool MaxPoolWithIndexOp::RunOnDevice() { auto& X = Input(0); CAFFE_ENFORCE(X.ndim() == 4, "Operator only supports 4D tensors"); if (X.IsType<float>()) { return DoRunWithType<float>(); } else if (X.IsType<at::Half>()) { return DoRunWithType<at::Half>(); } else { CAFFE_THROW("Unsupported input type"); } } template <typename T> bool MaxPoolWithIndexGradientOp::DoRunWithType() { auto& X = Input(0); auto& dY = Input(1); auto& mask = Input(2); auto* dX = Output(0); CAFFE_ENFORCE(X.ndim() == 4, "Operator only supports 4D tensors"); dX->ResizeLike(X); ConvPoolOpBase<CUDAContext>::ComputePads(vector<int>{X.dim32(2), X.dim32(3)}); MaxPoolBackward<T><<< CAFFE_GET_BLOCKS(X.size()), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( X.size(), dY.data<T>(), mask.data<int>(), X.dim32(0), X.dim32(1), X.dim32(2), X.dim32(3), dY.dim32(2), dY.dim32(3), kernel_h(), kernel_w(), stride_h(), stride_w(), pad_t(), pad_l(), dX->template mutable_data<T>()); return true; } bool MaxPoolWithIndexGradientOp::RunOnDevice() { auto& X = Input(0); if (X.IsType<float>()) { return DoRunWithType<float>(); } else if (X.IsType<at::Half>()) { return DoRunWithType<at::Half>(); } else { CAFFE_THROW("Unsupported input type"); } } namespace { REGISTER_CUDA_OPERATOR(MaxPoolWithIndex, MaxPoolWithIndexOp); REGISTER_CUDA_OPERATOR(MaxPoolWithIndexGradient, MaxPoolWithIndexGradientOp); class GetMaxPoolWithIndexGradient : public GradientMakerBase { using GradientMakerBase::GradientMakerBase; vector<OperatorDef> GetGradientDefs() override { return SingleGradientDef( "MaxPoolWithIndexGradient", "", vector<string>{I(0), GO(0), O(1)}, vector<string>{GI(0)}); } }; REGISTER_GRADIENT(MaxPoolWithIndex, GetMaxPoolWithIndexGradient); OPERATOR_SCHEMA(MaxPoolWithIndexGradient); OPERATOR_SCHEMA(MaxPoolWithIndex) .NumInputs(1) .NumOutputs(2) .TensorInferenceFunction(ConvPoolOpBase<CPUContext>::TensorInferenceForPool) .SetDoc(R"DOC( MaxPoolWithIndex consumes an input blob X and applies max pooling across the blob according to kernel sizes, stride sizes and pad lengths defined by the ConvPoolOpBase operator. It also produces an explicit mask that defines the location that all maximum values were found, which is re-used in the gradient pass. This op is deterministic. )DOC") .Input( 0, "X", "Input data tensor from the previous operator; dimensions " "depend on whether the NCHW or NHWC operators are being used. For " "example, in the former, the input has size (N x C x H x W), where N is" " the batch size, C is the number of channels, and H and W are the " "height and the width of the data. The corresponding permutation of " "dimensions is used in the latter case. ") .Output( 0, "Y", "Output data tensor from average pooling across the input " "tensor. Dimensions will vary based on various kernel, stride, and pad " "sizes.") .Output( 1, "Index", "Mask of location indices of the found maximum values, " " used in the gradient operator to accumulate dY values to the " "appropriate locations in Y"); }; }; // namespace caffe2
546b3d56ed95aab64cc2de5e1940f4e754f282f2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //////////////////////////////////////////////////////////////////////////// // // Copyright 1993-2013 NVIDIA Corporation. All rights reserved. // // Please refer to the NVIDIA end user license agreement (EULA) associated // with this source code for terms and conditions that govern your use of // this software. Any use, reproduction, disclosure, or distribution of // this software and related documentation outside the terms of the EULA // is strictly prohibited. // //////////////////////////////////////////////////////////////////////////// // ---------------------------------------------------------------------------------------- // Transpose // // This file contains both device and host code for transposing a floating-point // matrix. It performs several transpose kernels, which incrementally improve performance // through coalescing, removing shared memory bank conflicts, and eliminating partition // camping. Several of the kernels perform a copy, used to represent the best case // performance that a transpose can achieve. // // Please see the whitepaper in the docs folder of the transpose project for a detailed // description of this performance study. // ---------------------------------------------------------------------------------------- // Utilities and system includes //#include <helper_string.h> // helper for string parsing //#include <helper_image.h> // helper for image and data compariosn //#include <helper_cuda.h> // helper for cuda error checking functions #include <stdio.h> // helper for cuda error checking functions const char *sSDKsample = "Transpose"; // Each block transposes/copies a tile of TILE_DIM x TILE_DIM elements // using TILE_DIM x BLOCK_ROWS threads, so that each thread transposes // TILE_DIM/BLOCK_ROWS elements. TILE_DIM must be an integral multiple of BLOCK_ROWS #define TILE_DIM 16 #define BLOCK_ROWS 16 // This sample assumes that MATRIX_SIZE_X = MATRIX_SIZE_Y int MATRIX_SIZE_X = 1024; int MATRIX_SIZE_Y = 1024; int MUL_FACTOR = TILE_DIM; #define FLOOR(a,b) (a-(a%b)) // Compute the tile size necessary to illustrate performance cases for SM12+ hardware int MAX_TILES_SM12 = (FLOOR(MATRIX_SIZE_X,512) * FLOOR(MATRIX_SIZE_Y,512)) / (TILE_DIM *TILE_DIM); // Compute the tile size necessary to illustrate performance cases for SM10,SM11 hardware int MAX_TILES_SM10 = (FLOOR(MATRIX_SIZE_X,384) * FLOOR(MATRIX_SIZE_Y,384)) / (TILE_DIM *TILE_DIM); // Number of repetitions used for timing. Two sets of repetitions are performed: // 1) over kernel launches and 2) inside the kernel over just the loads and stores #define NUM_REPS 100 // ------------------------------------------------------- // Copies // width and height must be integral multiples of TILE_DIM // ------------------------------------------------------- __global__ void copy(float *odata, float *idata, int width, int height) { int xIndex = blockIdx.x * TILE_DIM + threadIdx.x; int yIndex = blockIdx.y * TILE_DIM + threadIdx.y; int index = xIndex + width*yIndex; for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) { odata[index+i*width] = idata[index+i*width]; } } __global__ void copySharedMem(float *odata, float *idata, int width, int height) { __shared__ float tile[TILE_DIM][TILE_DIM]; int xIndex = blockIdx.x * TILE_DIM + threadIdx.x; int yIndex = blockIdx.y * TILE_DIM + threadIdx.y; int index = xIndex + width*yIndex; for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) { if (xIndex < width && yIndex < height) { tile[threadIdx.y][threadIdx.x] = idata[index]; } } __syncthreads(); for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) { if (xIndex < height && yIndex < width) { odata[index] = tile[threadIdx.y][threadIdx.x]; } } } // ------------------------------------------------------- // Transposes // width and height must be integral multiples of TILE_DIM // ------------------------------------------------------- __global__ void transposeNaive(float *odata, float *idata, int width, int height) { int xIndex = blockIdx.x * TILE_DIM + threadIdx.x; int yIndex = blockIdx.y * TILE_DIM + threadIdx.y; int index_in = xIndex + width * yIndex; int index_out = yIndex + height * xIndex; for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) { odata[index_out+i] = idata[index_in+i*width]; } } // coalesced transpose (with bank conflicts) __global__ void transposeCoalesced(float *odata, float *idata, int width, int height) { __shared__ float tile[TILE_DIM][TILE_DIM]; int xIndex = blockIdx.x * TILE_DIM + threadIdx.x; int yIndex = blockIdx.y * TILE_DIM + threadIdx.y; int index_in = xIndex + (yIndex)*width; xIndex = blockIdx.y * TILE_DIM + threadIdx.x; yIndex = blockIdx.x * TILE_DIM + threadIdx.y; int index_out = xIndex + (yIndex)*height; for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) { tile[threadIdx.y+i][threadIdx.x] = idata[index_in+i*width]; } __syncthreads(); for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) { odata[index_out+i*height] = tile[threadIdx.x][threadIdx.y+i]; } } // Coalesced transpose with no bank conflicts __global__ void transposeNoBankConflicts(float *odata, float *idata, int width, int height) { __shared__ float tile[TILE_DIM][TILE_DIM+1]; int xIndex = blockIdx.x * TILE_DIM + threadIdx.x; int yIndex = blockIdx.y * TILE_DIM + threadIdx.y; int index_in = xIndex + (yIndex)*width; xIndex = blockIdx.y * TILE_DIM + threadIdx.x; yIndex = blockIdx.x * TILE_DIM + threadIdx.y; int index_out = xIndex + (yIndex)*height; for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) { tile[threadIdx.y+i][threadIdx.x] = idata[index_in+i*width]; } __syncthreads(); for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) { odata[index_out+i*height] = tile[threadIdx.x][threadIdx.y+i]; } } // Transpose that effectively reorders execution of thread blocks along diagonals of the // matrix (also coalesced and has no bank conflicts) // // Here blockIdx.x is interpreted as the distance along a diagonal and blockIdx.y as // corresponding to different diagonals // // blockIdx_x and blockIdx_y expressions map the diagonal coordinates to the more commonly // used cartesian coordinates so that the only changes to the code from the coalesced version // are the calculation of the blockIdx_x and blockIdx_y and replacement of blockIdx.x and // bloclIdx.y with the subscripted versions in the remaining code __global__ void transposeDiagonal(float *odata, float *idata, int width, int height) { __shared__ float tile[TILE_DIM][TILE_DIM+1]; int blockIdx_x, blockIdx_y; // do diagonal reordering if (width == height) { blockIdx_y = blockIdx.x; blockIdx_x = (blockIdx.x+blockIdx.y)%gridDim.x; } else { int bid = blockIdx.x + gridDim.x*blockIdx.y; blockIdx_y = bid%gridDim.y; blockIdx_x = ((bid/gridDim.y)+blockIdx_y)%gridDim.x; } // from here on the code is same as previous kernel except blockIdx_x replaces blockIdx.x // and similarly for y int xIndex = blockIdx_x * TILE_DIM + threadIdx.x; int yIndex = blockIdx_y * TILE_DIM + threadIdx.y; int index_in = xIndex + (yIndex)*width; xIndex = blockIdx_y * TILE_DIM + threadIdx.x; yIndex = blockIdx_x * TILE_DIM + threadIdx.y; int index_out = xIndex + (yIndex)*height; for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) { tile[threadIdx.y+i][threadIdx.x] = idata[index_in+i*width]; } __syncthreads(); for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) { odata[index_out+i*height] = tile[threadIdx.x][threadIdx.y+i]; } } // -------------------------------------------------------------------- // Partial transposes // NB: the coarse- and fine-grained routines only perform part of a // transpose and will fail the test against the reference solution // // They are used to assess performance characteristics of different // components of a full transpose // -------------------------------------------------------------------- __global__ void transposeFineGrained(float *odata, float *idata, int width, int height) { __shared__ float block[TILE_DIM][TILE_DIM+1]; int xIndex = blockIdx.x * TILE_DIM + threadIdx.x; int yIndex = blockIdx.y * TILE_DIM + threadIdx.y; int index = xIndex + (yIndex)*width; for (int i=0; i < TILE_DIM; i += BLOCK_ROWS) { block[threadIdx.y+i][threadIdx.x] = idata[index+i*width]; } __syncthreads(); for (int i=0; i < TILE_DIM; i += BLOCK_ROWS) { odata[index+i*height] = block[threadIdx.x][threadIdx.y+i]; } } __global__ void transposeCoarseGrained(float *odata, float *idata, int width, int height) { __shared__ float block[TILE_DIM][TILE_DIM+1]; int xIndex = blockIdx.x * TILE_DIM + threadIdx.x; int yIndex = blockIdx.y * TILE_DIM + threadIdx.y; int index_in = xIndex + (yIndex)*width; xIndex = blockIdx.y * TILE_DIM + threadIdx.x; yIndex = blockIdx.x * TILE_DIM + threadIdx.y; int index_out = xIndex + (yIndex)*height; for (int i=0; i<TILE_DIM; i += BLOCK_ROWS) { block[threadIdx.y+i][threadIdx.x] = idata[index_in+i*width]; } __syncthreads(); for (int i=0; i<TILE_DIM; i += BLOCK_ROWS) { odata[index_out+i*height] = block[threadIdx.y+i][threadIdx.x]; } } // --------------------- // host utility routines // --------------------- void computeTransposeGold(float *gold, float *idata, const int size_x, const int size_y) { for (int y = 0; y < size_y; ++y) { for (int x = 0; x < size_x; ++x) { gold[(x * size_y) + y] = idata[(y * size_x) + x]; } } } /* void getParams(int argc, char **argv, hipDeviceProp_t &deviceProp, int &size_x, int &size_y, int max_tile_dim) { // set matrix size (if (x,y) dim of matrix is not square, then this will have to be modified if (checkCmdLineFlag(argc, (const char **)argv, "dimX")) { size_x = getCmdLineArgumentInt(argc, (const char **) argv, "dimX"); if (size_x > max_tile_dim) { printf("> MatrixSize X = %d is greater than the recommended size = %d\n", size_x, max_tile_dim); } else { printf("> MatrixSize X = %d\n", size_x); } } else { size_x = max_tile_dim; // If this is SM12 hardware, we want to round down to a multiple of 512 if ((deviceProp.major == 1 && deviceProp.minor >= 2) || deviceProp.major > 1) { size_x = FLOOR(size_x, 512); } else // else for SM10,SM11 we round down to a multiple of 384 { size_x = FLOOR(size_x, 384); } } if (checkCmdLineFlag(argc, (const char **)argv, "dimY")) { size_y = getCmdLineArgumentInt(argc, (const char **) argv, "dimY"); if (size_y > max_tile_dim) { printf("> MatrixSize Y = %d is greater than the recommended size = %d\n", size_y, max_tile_dim); } else { printf("> MatrixSize Y = %d\n", size_y); } } else { size_y = max_tile_dim; // If this is SM12 hardware, we want to round down to a multiple of 512 if ((deviceProp.major == 1 && deviceProp.minor >= 2) || deviceProp.major > 1) { size_y = FLOOR(size_y, 512); } else // else for SM10,SM11 we round down to a multiple of 384 { size_y = FLOOR(size_y, 384); } } } */ void showHelp() { printf("\n%s : Command line options\n", sSDKsample); printf("\t-device=n (where n=0,1,2.... for the GPU device)\n\n"); printf("> The default matrix size can be overridden with these parameters\n"); printf("\t-dimX=row_dim_size (matrix row dimensions)\n"); printf("\t-dimY=col_dim_size (matrix column dimensions)\n"); } // ---- // main // ---- int main(int argc, char **argv) { // Start logs //printf("%s Starting...\n\n", sSDKsample); /*if (checkCmdLineFlag(argc, (const char **)argv, "help")) { showHelp(); return 0; }*/ //int devID = findCudaDevice(argc, (const char **)argv); int devID = 0; //hipDeviceProp_t deviceProp; // get number of SMs on this GPU //hipGetDevice(&devID); //hipGetDeviceProperties(&deviceProp, devID); // compute the scaling factor (for GPUs with fewer MPs) //float scale_factor, total_tiles; //scale_factor = max((192.0f / (_ConvertSMVer2Cores(deviceProp.major, deviceProp.minor) * (float)deviceProp.multiProcessorCount)), 1.0f); //printf("> Device %d: \"%s\"\n", devID, deviceProp.name); //printf("> SM Capability %d.%d detected:\n", deviceProp.major, deviceProp.minor); // Calculate number of tiles we will run for the Matrix Transpose performance tests int size_x, size_y, max_matrix_dim, matrix_size_test; //if ((deviceProp.major == 1 && deviceProp.minor >= 2) || deviceProp.major > 1) //{ matrix_size_test = 512; // we round down max_matrix_dim for this perf test //total_tiles = (float)MAX_TILES_SM12 / scale_factor; //total_tiles = 1000.0f; //} //else //{ // matrix_size_test = 384; // we round down max_matrix_dim for this perf test // total_tiles = (float)MAX_TILES_SM10 / scale_factor; //} //max_matrix_dim = FLOOR((int)(floor(sqrt(total_tiles))* TILE_DIM), matrix_size_test); // This is the minimum size allowed //if (max_matrix_dim == 0) //{ max_matrix_dim = matrix_size_test; //} //printf("> [%s] has %d MP(s) x %d (Cores/MP) = %d (Cores)\n", // deviceProp.name, deviceProp.multiProcessorCount, // _ConvertSMVer2Cores(deviceProp.major, deviceProp.minor), // _ConvertSMVer2Cores(deviceProp.major, deviceProp.minor) * deviceProp.multiProcessorCount); //printf("> Compute performance scaling factor = %4.2f\n", scale_factor); // Extract parameters if there are any, command line -dimx and -dimy can override // any of these settings //getParams(argc, argv, deviceProp, size_x, size_y, max_matrix_dim); size_x = max_matrix_dim; size_y = max_matrix_dim; /*if (size_x != size_y) { printf("\n[%s] does not support non-square matrices (row_dim_size(%d) != col_dim_size(%d))\nExiting...\n\n", sSDKsample, size_x, size_y); hipDeviceReset(); exit(EXIT_FAILURE); } if (size_x%TILE_DIM != 0 || size_y%TILE_DIM != 0) { printf("[%s] Matrix size must be integral multiple of tile size\nExiting...\n\n", sSDKsample); hipDeviceReset(); exit(EXIT_FAILURE); }*/ // kernel pointer and descriptor void (*kernel)(float *, float *, int, int); char *kernelName; // execution configuration parameters dim3 grid(size_x/TILE_DIM, size_y/TILE_DIM), threads(TILE_DIM,BLOCK_ROWS); // CUDA events hipEvent_t start, stop; // size of memory required to store the matrix const int mem_size = sizeof(float) * size_x*size_y; /*if (2*mem_size > deviceProp.totalGlobalMem) { printf("Input matrix size is larger than the available device memory!\n"); printf("Please choose a smaller size matrix\n"); hipDeviceReset(); exit(EXIT_FAILURE); }*/ // allocate host memory float *h_idata = (float *) malloc(mem_size); float *h_odata = (float *) malloc(mem_size); //float *transposeGold = (float *) malloc(mem_size); //float *gold; // allocate device memory float *d_idata, *d_odata; //checkCudaErrors(hipMalloc((void **) &d_idata, mem_size)); //checkCudaErrors(hipMalloc((void **) &d_odata, mem_size)); hipMalloc((void **) &d_idata, mem_size); hipMalloc((void **) &d_odata, mem_size); #ifdef _SYM klee_make_symbolic(h_idata, sizeof(float)*size_x*size_y, "h_idata_input"); #else // initalize host data for (int i = 0; i < (size_x*size_y); ++i) { h_idata[i] = (float) i; } #endif // copy host data to device //checkCudaErrors(hipMemcpy(d_idata, h_idata, mem_size, hipMemcpyHostToDevice)); hipMemcpy(d_idata, h_idata, mem_size, hipMemcpyHostToDevice); // Compute reference transpose solution //computeTransposeGold(transposeGold, h_idata, size_x, size_y); // print out common data for all kernels printf("\nMatrix size: %dx%d (%dx%d tiles), tile size: %dx%d, block size: %dx%d\n\n", size_x, size_y, size_x/TILE_DIM, size_y/TILE_DIM, TILE_DIM, TILE_DIM, TILE_DIM, BLOCK_ROWS); // initialize events //checkCudaErrors(hipEventCreate(&start)); //checkCudaErrors(hipEventCreate(&stop)); // // loop over different kernels // bool success = true; kernel = &copy; kernelName = "simple copy "; hipLaunchKernelGGL(( kernel), dim3(grid), dim3(threads), 0, 0, d_odata, d_idata, size_x, size_y); kernel = &copySharedMem; kernelName = "shared memory copy"; hipLaunchKernelGGL(( kernel), dim3(grid), dim3(threads), 0, 0, d_odata, d_idata, size_x, size_y); kernel = &transposeNaive; kernelName = "naive "; hipLaunchKernelGGL(( kernel), dim3(grid), dim3(threads), 0, 0, d_odata, d_idata, size_x, size_y); kernel = &transposeCoalesced; kernelName = "coalesced "; hipLaunchKernelGGL(( kernel), dim3(grid), dim3(threads), 0, 0, d_odata, d_idata, size_x, size_y); kernel = &transposeNoBankConflicts; kernelName = "optimized "; hipLaunchKernelGGL(( kernel), dim3(grid), dim3(threads), 0, 0, d_odata, d_idata, size_x, size_y); kernel = &transposeCoarseGrained; kernelName = "coarse-grained "; hipLaunchKernelGGL(( kernel), dim3(grid), dim3(threads), 0, 0, d_odata, d_idata, size_x, size_y); kernel = &transposeFineGrained; kernelName = "fine-grained "; hipLaunchKernelGGL(( kernel), dim3(grid), dim3(threads), 0, 0, d_odata, d_idata, size_x, size_y); kernel = &transposeDiagonal; kernelName = "diagonal "; hipLaunchKernelGGL(( kernel), dim3(grid), dim3(threads), 0, 0, d_odata, d_idata, size_x, size_y); /*for (int k = 0; k<8; k++) { // set kernel pointer switch (k) { case 0: kernel = &copy; kernelName = "simple copy "; break; case 1: kernel = &copySharedMem; kernelName = "shared memory copy"; break; case 2: kernel = &transposeNaive; kernelName = "naive "; break; case 3: kernel = &transposeCoalesced; kernelName = "coalesced "; break; case 4: kernel = &transposeNoBankConflicts; kernelName = "optimized "; break; case 5: kernel = &transposeCoarseGrained; kernelName = "coarse-grained "; break; case 6: kernel = &transposeFineGrained; kernelName = "fine-grained "; break; case 7: kernel = &transposeDiagonal; kernelName = "diagonal "; break; }*/ // set reference solution /*if (kernel == &copy || kernel == &copySharedMem) { gold = h_idata; } else if (kernel == &transposeCoarseGrained || kernel == &transposeFineGrained) { gold = h_odata; // fine- and coarse-grained kernels are not full transposes, so bypass check } else { gold = transposeGold; }*/ // Clear error status //checkCudaErrors(hipGetLastError()); // warmup to avoid timing startup // take measurements for loop over kernel launches //checkCudaErrors(hipEventRecord(start, 0)); //for (int i=0; i < NUM_REPS; i++) //{ //hipLaunchKernelGGL(( kernel), dim3(grid), dim3(threads), 0, 0, d_odata, d_idata, size_x, size_y); // Ensure no launch failure //checkCudaErrors(hipGetLastError()); //} //checkCudaErrors(hipEventRecord(stop, 0)); //checkCudaErrors(hipEventSynchronize(stop)); float kernelTime; //checkCudaErrors(hipEventElapsedTime(&kernelTime, start, stop)); //checkCudaErrors(hipMemcpy(h_odata, d_odata, mem_size, hipMemcpyDeviceToHost)); hipMemcpy(h_odata, d_odata, mem_size, hipMemcpyDeviceToHost); //bool res = compareData(gold, h_odata, size_x*size_y, 0.01f, 0.0f); /*if (res == false) { printf("*** %s kernel FAILED ***\n", kernelName); success = false; }*/ // take measurements for loop inside kernel //checkCudaErrors(hipMemcpy(h_odata, d_odata, mem_size, hipMemcpyDeviceToHost)); hipMemcpy(h_odata, d_odata, mem_size, hipMemcpyDeviceToHost); //res = compareData(gold, h_odata, size_x*size_y, 0.01f, 0.0f); /*if (res == false) { printf("*** %s kernel FAILED ***\n", kernelName); success = false; }*/ // report effective bandwidths //float kernelBandwidth = 2.0f * 1000.0f * mem_size/(1024*1024*1024)/(kernelTime/NUM_REPS); //printf("transpose %s, Throughput = %.4f GB/s, Time = %.5f ms, Size = %u fp32 elements, NumDevsUsed = %u, Workgroup = %u\n", // kernelName, // kernelBandwidth, // kernelTime/NUM_REPS, // (size_x *size_y), 1, TILE_DIM *BLOCK_ROWS); //} // cleanup free(h_idata); free(h_odata); //free(transposeGold); hipFree(d_idata); hipFree(d_odata); //checkCudaErrors(hipEventDestroy(start)); //checkCudaErrors(hipEventDestroy(stop)); hipDeviceReset(); if (!success) { printf("Test failed!\n"); exit(EXIT_FAILURE); } printf("Test passed\n"); exit(EXIT_SUCCESS); }
546b3d56ed95aab64cc2de5e1940f4e754f282f2.cu
//////////////////////////////////////////////////////////////////////////// // // Copyright 1993-2013 NVIDIA Corporation. All rights reserved. // // Please refer to the NVIDIA end user license agreement (EULA) associated // with this source code for terms and conditions that govern your use of // this software. Any use, reproduction, disclosure, or distribution of // this software and related documentation outside the terms of the EULA // is strictly prohibited. // //////////////////////////////////////////////////////////////////////////// // ---------------------------------------------------------------------------------------- // Transpose // // This file contains both device and host code for transposing a floating-point // matrix. It performs several transpose kernels, which incrementally improve performance // through coalescing, removing shared memory bank conflicts, and eliminating partition // camping. Several of the kernels perform a copy, used to represent the best case // performance that a transpose can achieve. // // Please see the whitepaper in the docs folder of the transpose project for a detailed // description of this performance study. // ---------------------------------------------------------------------------------------- // Utilities and system includes //#include <helper_string.h> // helper for string parsing //#include <helper_image.h> // helper for image and data compariosn //#include <helper_cuda.h> // helper for cuda error checking functions #include <stdio.h> // helper for cuda error checking functions const char *sSDKsample = "Transpose"; // Each block transposes/copies a tile of TILE_DIM x TILE_DIM elements // using TILE_DIM x BLOCK_ROWS threads, so that each thread transposes // TILE_DIM/BLOCK_ROWS elements. TILE_DIM must be an integral multiple of BLOCK_ROWS #define TILE_DIM 16 #define BLOCK_ROWS 16 // This sample assumes that MATRIX_SIZE_X = MATRIX_SIZE_Y int MATRIX_SIZE_X = 1024; int MATRIX_SIZE_Y = 1024; int MUL_FACTOR = TILE_DIM; #define FLOOR(a,b) (a-(a%b)) // Compute the tile size necessary to illustrate performance cases for SM12+ hardware int MAX_TILES_SM12 = (FLOOR(MATRIX_SIZE_X,512) * FLOOR(MATRIX_SIZE_Y,512)) / (TILE_DIM *TILE_DIM); // Compute the tile size necessary to illustrate performance cases for SM10,SM11 hardware int MAX_TILES_SM10 = (FLOOR(MATRIX_SIZE_X,384) * FLOOR(MATRIX_SIZE_Y,384)) / (TILE_DIM *TILE_DIM); // Number of repetitions used for timing. Two sets of repetitions are performed: // 1) over kernel launches and 2) inside the kernel over just the loads and stores #define NUM_REPS 100 // ------------------------------------------------------- // Copies // width and height must be integral multiples of TILE_DIM // ------------------------------------------------------- __global__ void copy(float *odata, float *idata, int width, int height) { int xIndex = blockIdx.x * TILE_DIM + threadIdx.x; int yIndex = blockIdx.y * TILE_DIM + threadIdx.y; int index = xIndex + width*yIndex; for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) { odata[index+i*width] = idata[index+i*width]; } } __global__ void copySharedMem(float *odata, float *idata, int width, int height) { __shared__ float tile[TILE_DIM][TILE_DIM]; int xIndex = blockIdx.x * TILE_DIM + threadIdx.x; int yIndex = blockIdx.y * TILE_DIM + threadIdx.y; int index = xIndex + width*yIndex; for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) { if (xIndex < width && yIndex < height) { tile[threadIdx.y][threadIdx.x] = idata[index]; } } __syncthreads(); for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) { if (xIndex < height && yIndex < width) { odata[index] = tile[threadIdx.y][threadIdx.x]; } } } // ------------------------------------------------------- // Transposes // width and height must be integral multiples of TILE_DIM // ------------------------------------------------------- __global__ void transposeNaive(float *odata, float *idata, int width, int height) { int xIndex = blockIdx.x * TILE_DIM + threadIdx.x; int yIndex = blockIdx.y * TILE_DIM + threadIdx.y; int index_in = xIndex + width * yIndex; int index_out = yIndex + height * xIndex; for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) { odata[index_out+i] = idata[index_in+i*width]; } } // coalesced transpose (with bank conflicts) __global__ void transposeCoalesced(float *odata, float *idata, int width, int height) { __shared__ float tile[TILE_DIM][TILE_DIM]; int xIndex = blockIdx.x * TILE_DIM + threadIdx.x; int yIndex = blockIdx.y * TILE_DIM + threadIdx.y; int index_in = xIndex + (yIndex)*width; xIndex = blockIdx.y * TILE_DIM + threadIdx.x; yIndex = blockIdx.x * TILE_DIM + threadIdx.y; int index_out = xIndex + (yIndex)*height; for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) { tile[threadIdx.y+i][threadIdx.x] = idata[index_in+i*width]; } __syncthreads(); for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) { odata[index_out+i*height] = tile[threadIdx.x][threadIdx.y+i]; } } // Coalesced transpose with no bank conflicts __global__ void transposeNoBankConflicts(float *odata, float *idata, int width, int height) { __shared__ float tile[TILE_DIM][TILE_DIM+1]; int xIndex = blockIdx.x * TILE_DIM + threadIdx.x; int yIndex = blockIdx.y * TILE_DIM + threadIdx.y; int index_in = xIndex + (yIndex)*width; xIndex = blockIdx.y * TILE_DIM + threadIdx.x; yIndex = blockIdx.x * TILE_DIM + threadIdx.y; int index_out = xIndex + (yIndex)*height; for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) { tile[threadIdx.y+i][threadIdx.x] = idata[index_in+i*width]; } __syncthreads(); for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) { odata[index_out+i*height] = tile[threadIdx.x][threadIdx.y+i]; } } // Transpose that effectively reorders execution of thread blocks along diagonals of the // matrix (also coalesced and has no bank conflicts) // // Here blockIdx.x is interpreted as the distance along a diagonal and blockIdx.y as // corresponding to different diagonals // // blockIdx_x and blockIdx_y expressions map the diagonal coordinates to the more commonly // used cartesian coordinates so that the only changes to the code from the coalesced version // are the calculation of the blockIdx_x and blockIdx_y and replacement of blockIdx.x and // bloclIdx.y with the subscripted versions in the remaining code __global__ void transposeDiagonal(float *odata, float *idata, int width, int height) { __shared__ float tile[TILE_DIM][TILE_DIM+1]; int blockIdx_x, blockIdx_y; // do diagonal reordering if (width == height) { blockIdx_y = blockIdx.x; blockIdx_x = (blockIdx.x+blockIdx.y)%gridDim.x; } else { int bid = blockIdx.x + gridDim.x*blockIdx.y; blockIdx_y = bid%gridDim.y; blockIdx_x = ((bid/gridDim.y)+blockIdx_y)%gridDim.x; } // from here on the code is same as previous kernel except blockIdx_x replaces blockIdx.x // and similarly for y int xIndex = blockIdx_x * TILE_DIM + threadIdx.x; int yIndex = blockIdx_y * TILE_DIM + threadIdx.y; int index_in = xIndex + (yIndex)*width; xIndex = blockIdx_y * TILE_DIM + threadIdx.x; yIndex = blockIdx_x * TILE_DIM + threadIdx.y; int index_out = xIndex + (yIndex)*height; for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) { tile[threadIdx.y+i][threadIdx.x] = idata[index_in+i*width]; } __syncthreads(); for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) { odata[index_out+i*height] = tile[threadIdx.x][threadIdx.y+i]; } } // -------------------------------------------------------------------- // Partial transposes // NB: the coarse- and fine-grained routines only perform part of a // transpose and will fail the test against the reference solution // // They are used to assess performance characteristics of different // components of a full transpose // -------------------------------------------------------------------- __global__ void transposeFineGrained(float *odata, float *idata, int width, int height) { __shared__ float block[TILE_DIM][TILE_DIM+1]; int xIndex = blockIdx.x * TILE_DIM + threadIdx.x; int yIndex = blockIdx.y * TILE_DIM + threadIdx.y; int index = xIndex + (yIndex)*width; for (int i=0; i < TILE_DIM; i += BLOCK_ROWS) { block[threadIdx.y+i][threadIdx.x] = idata[index+i*width]; } __syncthreads(); for (int i=0; i < TILE_DIM; i += BLOCK_ROWS) { odata[index+i*height] = block[threadIdx.x][threadIdx.y+i]; } } __global__ void transposeCoarseGrained(float *odata, float *idata, int width, int height) { __shared__ float block[TILE_DIM][TILE_DIM+1]; int xIndex = blockIdx.x * TILE_DIM + threadIdx.x; int yIndex = blockIdx.y * TILE_DIM + threadIdx.y; int index_in = xIndex + (yIndex)*width; xIndex = blockIdx.y * TILE_DIM + threadIdx.x; yIndex = blockIdx.x * TILE_DIM + threadIdx.y; int index_out = xIndex + (yIndex)*height; for (int i=0; i<TILE_DIM; i += BLOCK_ROWS) { block[threadIdx.y+i][threadIdx.x] = idata[index_in+i*width]; } __syncthreads(); for (int i=0; i<TILE_DIM; i += BLOCK_ROWS) { odata[index_out+i*height] = block[threadIdx.y+i][threadIdx.x]; } } // --------------------- // host utility routines // --------------------- void computeTransposeGold(float *gold, float *idata, const int size_x, const int size_y) { for (int y = 0; y < size_y; ++y) { for (int x = 0; x < size_x; ++x) { gold[(x * size_y) + y] = idata[(y * size_x) + x]; } } } /* void getParams(int argc, char **argv, cudaDeviceProp &deviceProp, int &size_x, int &size_y, int max_tile_dim) { // set matrix size (if (x,y) dim of matrix is not square, then this will have to be modified if (checkCmdLineFlag(argc, (const char **)argv, "dimX")) { size_x = getCmdLineArgumentInt(argc, (const char **) argv, "dimX"); if (size_x > max_tile_dim) { printf("> MatrixSize X = %d is greater than the recommended size = %d\n", size_x, max_tile_dim); } else { printf("> MatrixSize X = %d\n", size_x); } } else { size_x = max_tile_dim; // If this is SM12 hardware, we want to round down to a multiple of 512 if ((deviceProp.major == 1 && deviceProp.minor >= 2) || deviceProp.major > 1) { size_x = FLOOR(size_x, 512); } else // else for SM10,SM11 we round down to a multiple of 384 { size_x = FLOOR(size_x, 384); } } if (checkCmdLineFlag(argc, (const char **)argv, "dimY")) { size_y = getCmdLineArgumentInt(argc, (const char **) argv, "dimY"); if (size_y > max_tile_dim) { printf("> MatrixSize Y = %d is greater than the recommended size = %d\n", size_y, max_tile_dim); } else { printf("> MatrixSize Y = %d\n", size_y); } } else { size_y = max_tile_dim; // If this is SM12 hardware, we want to round down to a multiple of 512 if ((deviceProp.major == 1 && deviceProp.minor >= 2) || deviceProp.major > 1) { size_y = FLOOR(size_y, 512); } else // else for SM10,SM11 we round down to a multiple of 384 { size_y = FLOOR(size_y, 384); } } } */ void showHelp() { printf("\n%s : Command line options\n", sSDKsample); printf("\t-device=n (where n=0,1,2.... for the GPU device)\n\n"); printf("> The default matrix size can be overridden with these parameters\n"); printf("\t-dimX=row_dim_size (matrix row dimensions)\n"); printf("\t-dimY=col_dim_size (matrix column dimensions)\n"); } // ---- // main // ---- int main(int argc, char **argv) { // Start logs //printf("%s Starting...\n\n", sSDKsample); /*if (checkCmdLineFlag(argc, (const char **)argv, "help")) { showHelp(); return 0; }*/ //int devID = findCudaDevice(argc, (const char **)argv); int devID = 0; //cudaDeviceProp deviceProp; // get number of SMs on this GPU //cudaGetDevice(&devID); //cudaGetDeviceProperties(&deviceProp, devID); // compute the scaling factor (for GPUs with fewer MPs) //float scale_factor, total_tiles; //scale_factor = max((192.0f / (_ConvertSMVer2Cores(deviceProp.major, deviceProp.minor) * (float)deviceProp.multiProcessorCount)), 1.0f); //printf("> Device %d: \"%s\"\n", devID, deviceProp.name); //printf("> SM Capability %d.%d detected:\n", deviceProp.major, deviceProp.minor); // Calculate number of tiles we will run for the Matrix Transpose performance tests int size_x, size_y, max_matrix_dim, matrix_size_test; //if ((deviceProp.major == 1 && deviceProp.minor >= 2) || deviceProp.major > 1) //{ matrix_size_test = 512; // we round down max_matrix_dim for this perf test //total_tiles = (float)MAX_TILES_SM12 / scale_factor; //total_tiles = 1000.0f; //} //else //{ // matrix_size_test = 384; // we round down max_matrix_dim for this perf test // total_tiles = (float)MAX_TILES_SM10 / scale_factor; //} //max_matrix_dim = FLOOR((int)(floor(sqrt(total_tiles))* TILE_DIM), matrix_size_test); // This is the minimum size allowed //if (max_matrix_dim == 0) //{ max_matrix_dim = matrix_size_test; //} //printf("> [%s] has %d MP(s) x %d (Cores/MP) = %d (Cores)\n", // deviceProp.name, deviceProp.multiProcessorCount, // _ConvertSMVer2Cores(deviceProp.major, deviceProp.minor), // _ConvertSMVer2Cores(deviceProp.major, deviceProp.minor) * deviceProp.multiProcessorCount); //printf("> Compute performance scaling factor = %4.2f\n", scale_factor); // Extract parameters if there are any, command line -dimx and -dimy can override // any of these settings //getParams(argc, argv, deviceProp, size_x, size_y, max_matrix_dim); size_x = max_matrix_dim; size_y = max_matrix_dim; /*if (size_x != size_y) { printf("\n[%s] does not support non-square matrices (row_dim_size(%d) != col_dim_size(%d))\nExiting...\n\n", sSDKsample, size_x, size_y); cudaDeviceReset(); exit(EXIT_FAILURE); } if (size_x%TILE_DIM != 0 || size_y%TILE_DIM != 0) { printf("[%s] Matrix size must be integral multiple of tile size\nExiting...\n\n", sSDKsample); cudaDeviceReset(); exit(EXIT_FAILURE); }*/ // kernel pointer and descriptor void (*kernel)(float *, float *, int, int); char *kernelName; // execution configuration parameters dim3 grid(size_x/TILE_DIM, size_y/TILE_DIM), threads(TILE_DIM,BLOCK_ROWS); // CUDA events cudaEvent_t start, stop; // size of memory required to store the matrix const int mem_size = sizeof(float) * size_x*size_y; /*if (2*mem_size > deviceProp.totalGlobalMem) { printf("Input matrix size is larger than the available device memory!\n"); printf("Please choose a smaller size matrix\n"); cudaDeviceReset(); exit(EXIT_FAILURE); }*/ // allocate host memory float *h_idata = (float *) malloc(mem_size); float *h_odata = (float *) malloc(mem_size); //float *transposeGold = (float *) malloc(mem_size); //float *gold; // allocate device memory float *d_idata, *d_odata; //checkCudaErrors(cudaMalloc((void **) &d_idata, mem_size)); //checkCudaErrors(cudaMalloc((void **) &d_odata, mem_size)); cudaMalloc((void **) &d_idata, mem_size); cudaMalloc((void **) &d_odata, mem_size); #ifdef _SYM klee_make_symbolic(h_idata, sizeof(float)*size_x*size_y, "h_idata_input"); #else // initalize host data for (int i = 0; i < (size_x*size_y); ++i) { h_idata[i] = (float) i; } #endif // copy host data to device //checkCudaErrors(cudaMemcpy(d_idata, h_idata, mem_size, cudaMemcpyHostToDevice)); cudaMemcpy(d_idata, h_idata, mem_size, cudaMemcpyHostToDevice); // Compute reference transpose solution //computeTransposeGold(transposeGold, h_idata, size_x, size_y); // print out common data for all kernels printf("\nMatrix size: %dx%d (%dx%d tiles), tile size: %dx%d, block size: %dx%d\n\n", size_x, size_y, size_x/TILE_DIM, size_y/TILE_DIM, TILE_DIM, TILE_DIM, TILE_DIM, BLOCK_ROWS); // initialize events //checkCudaErrors(cudaEventCreate(&start)); //checkCudaErrors(cudaEventCreate(&stop)); // // loop over different kernels // bool success = true; kernel = &copy; kernelName = "simple copy "; kernel<<<grid, threads>>>(d_odata, d_idata, size_x, size_y); kernel = &copySharedMem; kernelName = "shared memory copy"; kernel<<<grid, threads>>>(d_odata, d_idata, size_x, size_y); kernel = &transposeNaive; kernelName = "naive "; kernel<<<grid, threads>>>(d_odata, d_idata, size_x, size_y); kernel = &transposeCoalesced; kernelName = "coalesced "; kernel<<<grid, threads>>>(d_odata, d_idata, size_x, size_y); kernel = &transposeNoBankConflicts; kernelName = "optimized "; kernel<<<grid, threads>>>(d_odata, d_idata, size_x, size_y); kernel = &transposeCoarseGrained; kernelName = "coarse-grained "; kernel<<<grid, threads>>>(d_odata, d_idata, size_x, size_y); kernel = &transposeFineGrained; kernelName = "fine-grained "; kernel<<<grid, threads>>>(d_odata, d_idata, size_x, size_y); kernel = &transposeDiagonal; kernelName = "diagonal "; kernel<<<grid, threads>>>(d_odata, d_idata, size_x, size_y); /*for (int k = 0; k<8; k++) { // set kernel pointer switch (k) { case 0: kernel = &copy; kernelName = "simple copy "; break; case 1: kernel = &copySharedMem; kernelName = "shared memory copy"; break; case 2: kernel = &transposeNaive; kernelName = "naive "; break; case 3: kernel = &transposeCoalesced; kernelName = "coalesced "; break; case 4: kernel = &transposeNoBankConflicts; kernelName = "optimized "; break; case 5: kernel = &transposeCoarseGrained; kernelName = "coarse-grained "; break; case 6: kernel = &transposeFineGrained; kernelName = "fine-grained "; break; case 7: kernel = &transposeDiagonal; kernelName = "diagonal "; break; }*/ // set reference solution /*if (kernel == &copy || kernel == &copySharedMem) { gold = h_idata; } else if (kernel == &transposeCoarseGrained || kernel == &transposeFineGrained) { gold = h_odata; // fine- and coarse-grained kernels are not full transposes, so bypass check } else { gold = transposeGold; }*/ // Clear error status //checkCudaErrors(cudaGetLastError()); // warmup to avoid timing startup // take measurements for loop over kernel launches //checkCudaErrors(cudaEventRecord(start, 0)); //for (int i=0; i < NUM_REPS; i++) //{ // kernel<<<grid, threads>>>(d_odata, d_idata, size_x, size_y); // Ensure no launch failure //checkCudaErrors(cudaGetLastError()); //} //checkCudaErrors(cudaEventRecord(stop, 0)); //checkCudaErrors(cudaEventSynchronize(stop)); float kernelTime; //checkCudaErrors(cudaEventElapsedTime(&kernelTime, start, stop)); //checkCudaErrors(cudaMemcpy(h_odata, d_odata, mem_size, cudaMemcpyDeviceToHost)); cudaMemcpy(h_odata, d_odata, mem_size, cudaMemcpyDeviceToHost); //bool res = compareData(gold, h_odata, size_x*size_y, 0.01f, 0.0f); /*if (res == false) { printf("*** %s kernel FAILED ***\n", kernelName); success = false; }*/ // take measurements for loop inside kernel //checkCudaErrors(cudaMemcpy(h_odata, d_odata, mem_size, cudaMemcpyDeviceToHost)); cudaMemcpy(h_odata, d_odata, mem_size, cudaMemcpyDeviceToHost); //res = compareData(gold, h_odata, size_x*size_y, 0.01f, 0.0f); /*if (res == false) { printf("*** %s kernel FAILED ***\n", kernelName); success = false; }*/ // report effective bandwidths //float kernelBandwidth = 2.0f * 1000.0f * mem_size/(1024*1024*1024)/(kernelTime/NUM_REPS); //printf("transpose %s, Throughput = %.4f GB/s, Time = %.5f ms, Size = %u fp32 elements, NumDevsUsed = %u, Workgroup = %u\n", // kernelName, // kernelBandwidth, // kernelTime/NUM_REPS, // (size_x *size_y), 1, TILE_DIM *BLOCK_ROWS); //} // cleanup free(h_idata); free(h_odata); //free(transposeGold); cudaFree(d_idata); cudaFree(d_odata); //checkCudaErrors(cudaEventDestroy(start)); //checkCudaErrors(cudaEventDestroy(stop)); cudaDeviceReset(); if (!success) { printf("Test failed!\n"); exit(EXIT_FAILURE); } printf("Test passed\n"); exit(EXIT_SUCCESS); }
c83b7945c651facc5c1122695c7dccc86220c296.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //===----- data_sharing.cu - NVPTX OpenMP debug utilities -------- CUDA -*-===// // // The LLVM Compiler Infrastructure // // This file is dual licensed under the MIT and the University of Illinois Open // Source Licenses. See LICENSE.txt for details. // //===----------------------------------------------------------------------===// // // This file contains the implementation of data sharing environments/ // //===----------------------------------------------------------------------===// #include "omptarget-nvptx.h" #include <stdio.h> // Number of threads in the CUDA block. __device__ static unsigned getNumThreads() { return blockDim.x; } // Thread ID in the CUDA block __device__ static unsigned getThreadId() { return threadIdx.x; } // Warp ID in the CUDA block __device__ static unsigned getWarpId() { return threadIdx.x / WARPSIZE; } // The CUDA thread ID of the master thread. __device__ static unsigned getMasterThreadId() { unsigned Mask = WARPSIZE - 1; return (getNumThreads() - 1) & (~Mask); } // Find the active threads in the warp - return a mask whose n-th bit is set if // the n-th thread in the warp is active. __device__ static unsigned getActiveThreadsMask() { return __BALLOT_SYNC(0xFFFFFFFF, true); } // Return true if this is the first active thread in the warp. __device__ static bool IsWarpMasterActiveThread() { unsigned long long Mask = getActiveThreadsMask(); unsigned long long ShNum = WARPSIZE - (getThreadId() % WARPSIZE); unsigned long long Sh = Mask << ShNum; return Sh == 0; } // Return true if this is the master thread. __device__ static bool IsMasterThread() { return getMasterThreadId() == getThreadId(); } /// Return the provided size aligned to the size of a pointer. __device__ static size_t AlignVal(size_t Val) { const size_t Align = (size_t)sizeof(void *); if (Val & (Align - 1)) { Val += Align; Val &= ~(Align - 1); } return Val; } #define DSFLAG 0 #define DSFLAG_INIT 0 #define DSPRINT(_flag, _str, _args...) \ { \ if (_flag) { \ /*printf("(%d,%d) -> " _str, blockIdx.x, threadIdx.x, _args);*/ \ } \ } #define DSPRINT0(_flag, _str) \ { \ if (_flag) { \ /*printf("(%d,%d) -> " _str, blockIdx.x, threadIdx.x);*/ \ } \ } // Initialize the shared data structures. This is expected to be called for the // master thread and warp masters. \param RootS: A pointer to the root of the // data sharing stack. \param InitialDataSize: The initial size of the data in // the slot. EXTERN void __kmpc_initialize_data_sharing_environment(__kmpc_data_sharing_slot *rootS, size_t InitialDataSize) { DSPRINT0(DSFLAG_INIT, "Entering __kmpc_initialize_data_sharing_environment\n"); unsigned WID = getWarpId(); DSPRINT(DSFLAG_INIT, "Warp ID: %d\n", WID); omptarget_nvptx_TeamDescr *teamDescr = &omptarget_nvptx_threadPrivateContext->TeamContext(); __kmpc_data_sharing_slot *RootS = teamDescr->RootS(WID); DataSharingState.SlotPtr[WID] = RootS; DataSharingState.StackPtr[WID] = (void *)&RootS->Data[0]; // We don't need to initialize the frame and active threads. DSPRINT(DSFLAG_INIT, "Initial data size: %08x \n", InitialDataSize); DSPRINT(DSFLAG_INIT, "Root slot at: %016llx \n", (long long)RootS); DSPRINT(DSFLAG_INIT, "Root slot data-end at: %016llx \n", (long long)RootS->DataEnd); DSPRINT(DSFLAG_INIT, "Root slot next at: %016llx \n", (long long)RootS->Next); DSPRINT(DSFLAG_INIT, "Shared slot ptr at: %016llx \n", (long long)DataSharingState.SlotPtr[WID]); DSPRINT(DSFLAG_INIT, "Shared stack ptr at: %016llx \n", (long long)DataSharingState.StackPtr[WID]); DSPRINT0(DSFLAG_INIT, "Exiting __kmpc_initialize_data_sharing_environment\n"); } EXTERN void *__kmpc_data_sharing_environment_begin( __kmpc_data_sharing_slot **SavedSharedSlot, void **SavedSharedStack, void **SavedSharedFrame, int32_t *SavedActiveThreads, size_t SharingDataSize, size_t SharingDefaultDataSize, int16_t IsOMPRuntimeInitialized) { DSPRINT0(DSFLAG, "Entering __kmpc_data_sharing_environment_begin\n"); // If the runtime has been elided, used __shared__ memory for master-worker // data sharing. if (!IsOMPRuntimeInitialized) return (void *)&DataSharingState; DSPRINT(DSFLAG, "Data Size %016llx\n", SharingDataSize); DSPRINT(DSFLAG, "Default Data Size %016llx\n", SharingDefaultDataSize); unsigned WID = getWarpId(); unsigned CurActiveThreads = getActiveThreadsMask(); __kmpc_data_sharing_slot *&SlotP = DataSharingState.SlotPtr[WID]; void *&StackP = DataSharingState.StackPtr[WID]; void *&FrameP = DataSharingState.FramePtr[WID]; int32_t &ActiveT = DataSharingState.ActiveThreads[WID]; DSPRINT0(DSFLAG, "Save current slot/stack values.\n"); // Save the current values. *SavedSharedSlot = SlotP; *SavedSharedStack = StackP; *SavedSharedFrame = FrameP; *SavedActiveThreads = ActiveT; DSPRINT(DSFLAG, "Warp ID: %d\n", WID); DSPRINT(DSFLAG, "Saved slot ptr at: %016llx \n", (long long)SlotP); DSPRINT(DSFLAG, "Saved stack ptr at: %016llx \n", (long long)StackP); DSPRINT(DSFLAG, "Saved frame ptr at: %016llx \n", (long long)FrameP); DSPRINT(DSFLAG, "Active threads: %08x \n", ActiveT); // Only the warp active master needs to grow the stack. if (IsWarpMasterActiveThread()) { // Save the current active threads. ActiveT = CurActiveThreads; // Make sure we use aligned sizes to avoid rematerialization of data. SharingDataSize = AlignVal(SharingDataSize); // FIXME: The default data size can be assumed to be aligned? SharingDefaultDataSize = AlignVal(SharingDefaultDataSize); // Check if we have room for the data in the current slot. const uintptr_t CurrentStartAddress = (uintptr_t)StackP; const uintptr_t CurrentEndAddress = (uintptr_t)SlotP->DataEnd; const uintptr_t RequiredEndAddress = CurrentStartAddress + (uintptr_t)SharingDataSize; DSPRINT(DSFLAG, "Data Size %016llx\n", SharingDataSize); DSPRINT(DSFLAG, "Default Data Size %016llx\n", SharingDefaultDataSize); DSPRINT(DSFLAG, "Current Start Address %016llx\n", CurrentStartAddress); DSPRINT(DSFLAG, "Current End Address %016llx\n", CurrentEndAddress); DSPRINT(DSFLAG, "Required End Address %016llx\n", RequiredEndAddress); DSPRINT(DSFLAG, "Active Threads %08x\n", ActiveT); // If we require a new slot, allocate it and initialize it (or attempt to // reuse one). Also, set the shared stack and slot pointers to the new // place. If we do not need to grow the stack, just adapt the stack and // frame pointers. if (CurrentEndAddress < RequiredEndAddress) { size_t NewSize = (SharingDataSize > SharingDefaultDataSize) ? SharingDataSize : SharingDefaultDataSize; __kmpc_data_sharing_slot *NewSlot = 0; // Attempt to reuse an existing slot. if (__kmpc_data_sharing_slot *ExistingSlot = SlotP->Next) { uintptr_t ExistingSlotSize = (uintptr_t)ExistingSlot->DataEnd - (uintptr_t)(&ExistingSlot->Data[0]); if (ExistingSlotSize >= NewSize) { DSPRINT(DSFLAG, "Reusing stack slot %016llx\n", (long long)ExistingSlot); NewSlot = ExistingSlot; } else { DSPRINT(DSFLAG, "Cleaning up -failed reuse - %016llx\n", (long long)SlotP->Next); free(ExistingSlot); } } if (!NewSlot) { NewSlot = (__kmpc_data_sharing_slot *)malloc( sizeof(__kmpc_data_sharing_slot) + NewSize); DSPRINT(DSFLAG, "New slot allocated %016llx (data size=%016llx)\n", (long long)NewSlot, NewSize); } NewSlot->Next = 0; NewSlot->DataEnd = &NewSlot->Data[NewSize]; SlotP->Next = NewSlot; SlotP = NewSlot; StackP = &NewSlot->Data[SharingDataSize]; FrameP = &NewSlot->Data[0]; } else { // Clean up any old slot that we may still have. The slot producers, do // not eliminate them because that may be used to return data. if (SlotP->Next) { DSPRINT(DSFLAG, "Cleaning up - old not required - %016llx\n", (long long)SlotP->Next); free(SlotP->Next); SlotP->Next = 0; } FrameP = StackP; StackP = (void *)RequiredEndAddress; } } // FIXME: Need to see the impact of doing it here. __threadfence_block(); DSPRINT0(DSFLAG, "Exiting __kmpc_data_sharing_environment_begin\n"); // All the threads in this warp get the frame they should work with. return FrameP; } EXTERN void __kmpc_data_sharing_environment_end( __kmpc_data_sharing_slot **SavedSharedSlot, void **SavedSharedStack, void **SavedSharedFrame, int32_t *SavedActiveThreads, int32_t IsEntryPoint) { DSPRINT0(DSFLAG, "Entering __kmpc_data_sharing_environment_end\n"); unsigned WID = getWarpId(); if (IsEntryPoint) { if (IsWarpMasterActiveThread()) { DSPRINT0(DSFLAG, "Doing clean up\n"); // The master thread cleans the saved slot, because this is an environment // only for the master. __kmpc_data_sharing_slot *S = IsMasterThread() ? *SavedSharedSlot : DataSharingState.SlotPtr[WID]; if (S->Next) { free(S->Next); S->Next = 0; } } DSPRINT0(DSFLAG, "Exiting Exiting __kmpc_data_sharing_environment_end\n"); return; } int32_t CurActive = getActiveThreadsMask(); // Only the warp master can restore the stack and frame information, and only // if there are no other threads left behind in this environment (i.e. the // warp diverged and returns in different places). This only works if we // assume that threads will converge right after the call site that started // the environment. if (IsWarpMasterActiveThread()) { int32_t &ActiveT = DataSharingState.ActiveThreads[WID]; DSPRINT0(DSFLAG, "Before restoring the stack\n"); // Zero the bits in the mask. If it is still different from zero, then we // have other threads that will return after the current ones. ActiveT &= ~CurActive; DSPRINT(DSFLAG, "Active threads: %08x; New mask: %08x\n", CurActive, ActiveT); if (!ActiveT) { // No other active threads? Great, lets restore the stack. __kmpc_data_sharing_slot *&SlotP = DataSharingState.SlotPtr[WID]; void *&StackP = DataSharingState.StackPtr[WID]; void *&FrameP = DataSharingState.FramePtr[WID]; SlotP = *SavedSharedSlot; StackP = *SavedSharedStack; FrameP = *SavedSharedFrame; ActiveT = *SavedActiveThreads; DSPRINT(DSFLAG, "Restored slot ptr at: %016llx \n", (long long)SlotP); DSPRINT(DSFLAG, "Restored stack ptr at: %016llx \n", (long long)StackP); DSPRINT(DSFLAG, "Restored frame ptr at: %016llx \n", (long long)FrameP); DSPRINT(DSFLAG, "Active threads: %08x \n", ActiveT); } } // FIXME: Need to see the impact of doing it here. __threadfence_block(); DSPRINT0(DSFLAG, "Exiting __kmpc_data_sharing_environment_end\n"); return; } EXTERN void * __kmpc_get_data_sharing_environment_frame(int32_t SourceThreadID, int16_t IsOMPRuntimeInitialized) { DSPRINT0(DSFLAG, "Entering __kmpc_get_data_sharing_environment_frame\n"); // If the runtime has been elided, use __shared__ memory for master-worker // data sharing. We're reusing the statically allocated data structure // that is used for standard data sharing. if (!IsOMPRuntimeInitialized) return (void *)&DataSharingState; // Get the frame used by the requested thread. unsigned SourceWID = SourceThreadID / WARPSIZE; DSPRINT(DSFLAG, "Source warp: %d\n", SourceWID); void *P = DataSharingState.FramePtr[SourceWID]; DSPRINT0(DSFLAG, "Exiting __kmpc_get_data_sharing_environment_frame\n"); return P; }
c83b7945c651facc5c1122695c7dccc86220c296.cu
//===----- data_sharing.cu - NVPTX OpenMP debug utilities -------- CUDA -*-===// // // The LLVM Compiler Infrastructure // // This file is dual licensed under the MIT and the University of Illinois Open // Source Licenses. See LICENSE.txt for details. // //===----------------------------------------------------------------------===// // // This file contains the implementation of data sharing environments/ // //===----------------------------------------------------------------------===// #include "omptarget-nvptx.h" #include <stdio.h> // Number of threads in the CUDA block. __device__ static unsigned getNumThreads() { return blockDim.x; } // Thread ID in the CUDA block __device__ static unsigned getThreadId() { return threadIdx.x; } // Warp ID in the CUDA block __device__ static unsigned getWarpId() { return threadIdx.x / WARPSIZE; } // The CUDA thread ID of the master thread. __device__ static unsigned getMasterThreadId() { unsigned Mask = WARPSIZE - 1; return (getNumThreads() - 1) & (~Mask); } // Find the active threads in the warp - return a mask whose n-th bit is set if // the n-th thread in the warp is active. __device__ static unsigned getActiveThreadsMask() { return __BALLOT_SYNC(0xFFFFFFFF, true); } // Return true if this is the first active thread in the warp. __device__ static bool IsWarpMasterActiveThread() { unsigned long long Mask = getActiveThreadsMask(); unsigned long long ShNum = WARPSIZE - (getThreadId() % WARPSIZE); unsigned long long Sh = Mask << ShNum; return Sh == 0; } // Return true if this is the master thread. __device__ static bool IsMasterThread() { return getMasterThreadId() == getThreadId(); } /// Return the provided size aligned to the size of a pointer. __device__ static size_t AlignVal(size_t Val) { const size_t Align = (size_t)sizeof(void *); if (Val & (Align - 1)) { Val += Align; Val &= ~(Align - 1); } return Val; } #define DSFLAG 0 #define DSFLAG_INIT 0 #define DSPRINT(_flag, _str, _args...) \ { \ if (_flag) { \ /*printf("(%d,%d) -> " _str, blockIdx.x, threadIdx.x, _args);*/ \ } \ } #define DSPRINT0(_flag, _str) \ { \ if (_flag) { \ /*printf("(%d,%d) -> " _str, blockIdx.x, threadIdx.x);*/ \ } \ } // Initialize the shared data structures. This is expected to be called for the // master thread and warp masters. \param RootS: A pointer to the root of the // data sharing stack. \param InitialDataSize: The initial size of the data in // the slot. EXTERN void __kmpc_initialize_data_sharing_environment(__kmpc_data_sharing_slot *rootS, size_t InitialDataSize) { DSPRINT0(DSFLAG_INIT, "Entering __kmpc_initialize_data_sharing_environment\n"); unsigned WID = getWarpId(); DSPRINT(DSFLAG_INIT, "Warp ID: %d\n", WID); omptarget_nvptx_TeamDescr *teamDescr = &omptarget_nvptx_threadPrivateContext->TeamContext(); __kmpc_data_sharing_slot *RootS = teamDescr->RootS(WID); DataSharingState.SlotPtr[WID] = RootS; DataSharingState.StackPtr[WID] = (void *)&RootS->Data[0]; // We don't need to initialize the frame and active threads. DSPRINT(DSFLAG_INIT, "Initial data size: %08x \n", InitialDataSize); DSPRINT(DSFLAG_INIT, "Root slot at: %016llx \n", (long long)RootS); DSPRINT(DSFLAG_INIT, "Root slot data-end at: %016llx \n", (long long)RootS->DataEnd); DSPRINT(DSFLAG_INIT, "Root slot next at: %016llx \n", (long long)RootS->Next); DSPRINT(DSFLAG_INIT, "Shared slot ptr at: %016llx \n", (long long)DataSharingState.SlotPtr[WID]); DSPRINT(DSFLAG_INIT, "Shared stack ptr at: %016llx \n", (long long)DataSharingState.StackPtr[WID]); DSPRINT0(DSFLAG_INIT, "Exiting __kmpc_initialize_data_sharing_environment\n"); } EXTERN void *__kmpc_data_sharing_environment_begin( __kmpc_data_sharing_slot **SavedSharedSlot, void **SavedSharedStack, void **SavedSharedFrame, int32_t *SavedActiveThreads, size_t SharingDataSize, size_t SharingDefaultDataSize, int16_t IsOMPRuntimeInitialized) { DSPRINT0(DSFLAG, "Entering __kmpc_data_sharing_environment_begin\n"); // If the runtime has been elided, used __shared__ memory for master-worker // data sharing. if (!IsOMPRuntimeInitialized) return (void *)&DataSharingState; DSPRINT(DSFLAG, "Data Size %016llx\n", SharingDataSize); DSPRINT(DSFLAG, "Default Data Size %016llx\n", SharingDefaultDataSize); unsigned WID = getWarpId(); unsigned CurActiveThreads = getActiveThreadsMask(); __kmpc_data_sharing_slot *&SlotP = DataSharingState.SlotPtr[WID]; void *&StackP = DataSharingState.StackPtr[WID]; void *&FrameP = DataSharingState.FramePtr[WID]; int32_t &ActiveT = DataSharingState.ActiveThreads[WID]; DSPRINT0(DSFLAG, "Save current slot/stack values.\n"); // Save the current values. *SavedSharedSlot = SlotP; *SavedSharedStack = StackP; *SavedSharedFrame = FrameP; *SavedActiveThreads = ActiveT; DSPRINT(DSFLAG, "Warp ID: %d\n", WID); DSPRINT(DSFLAG, "Saved slot ptr at: %016llx \n", (long long)SlotP); DSPRINT(DSFLAG, "Saved stack ptr at: %016llx \n", (long long)StackP); DSPRINT(DSFLAG, "Saved frame ptr at: %016llx \n", (long long)FrameP); DSPRINT(DSFLAG, "Active threads: %08x \n", ActiveT); // Only the warp active master needs to grow the stack. if (IsWarpMasterActiveThread()) { // Save the current active threads. ActiveT = CurActiveThreads; // Make sure we use aligned sizes to avoid rematerialization of data. SharingDataSize = AlignVal(SharingDataSize); // FIXME: The default data size can be assumed to be aligned? SharingDefaultDataSize = AlignVal(SharingDefaultDataSize); // Check if we have room for the data in the current slot. const uintptr_t CurrentStartAddress = (uintptr_t)StackP; const uintptr_t CurrentEndAddress = (uintptr_t)SlotP->DataEnd; const uintptr_t RequiredEndAddress = CurrentStartAddress + (uintptr_t)SharingDataSize; DSPRINT(DSFLAG, "Data Size %016llx\n", SharingDataSize); DSPRINT(DSFLAG, "Default Data Size %016llx\n", SharingDefaultDataSize); DSPRINT(DSFLAG, "Current Start Address %016llx\n", CurrentStartAddress); DSPRINT(DSFLAG, "Current End Address %016llx\n", CurrentEndAddress); DSPRINT(DSFLAG, "Required End Address %016llx\n", RequiredEndAddress); DSPRINT(DSFLAG, "Active Threads %08x\n", ActiveT); // If we require a new slot, allocate it and initialize it (or attempt to // reuse one). Also, set the shared stack and slot pointers to the new // place. If we do not need to grow the stack, just adapt the stack and // frame pointers. if (CurrentEndAddress < RequiredEndAddress) { size_t NewSize = (SharingDataSize > SharingDefaultDataSize) ? SharingDataSize : SharingDefaultDataSize; __kmpc_data_sharing_slot *NewSlot = 0; // Attempt to reuse an existing slot. if (__kmpc_data_sharing_slot *ExistingSlot = SlotP->Next) { uintptr_t ExistingSlotSize = (uintptr_t)ExistingSlot->DataEnd - (uintptr_t)(&ExistingSlot->Data[0]); if (ExistingSlotSize >= NewSize) { DSPRINT(DSFLAG, "Reusing stack slot %016llx\n", (long long)ExistingSlot); NewSlot = ExistingSlot; } else { DSPRINT(DSFLAG, "Cleaning up -failed reuse - %016llx\n", (long long)SlotP->Next); free(ExistingSlot); } } if (!NewSlot) { NewSlot = (__kmpc_data_sharing_slot *)malloc( sizeof(__kmpc_data_sharing_slot) + NewSize); DSPRINT(DSFLAG, "New slot allocated %016llx (data size=%016llx)\n", (long long)NewSlot, NewSize); } NewSlot->Next = 0; NewSlot->DataEnd = &NewSlot->Data[NewSize]; SlotP->Next = NewSlot; SlotP = NewSlot; StackP = &NewSlot->Data[SharingDataSize]; FrameP = &NewSlot->Data[0]; } else { // Clean up any old slot that we may still have. The slot producers, do // not eliminate them because that may be used to return data. if (SlotP->Next) { DSPRINT(DSFLAG, "Cleaning up - old not required - %016llx\n", (long long)SlotP->Next); free(SlotP->Next); SlotP->Next = 0; } FrameP = StackP; StackP = (void *)RequiredEndAddress; } } // FIXME: Need to see the impact of doing it here. __threadfence_block(); DSPRINT0(DSFLAG, "Exiting __kmpc_data_sharing_environment_begin\n"); // All the threads in this warp get the frame they should work with. return FrameP; } EXTERN void __kmpc_data_sharing_environment_end( __kmpc_data_sharing_slot **SavedSharedSlot, void **SavedSharedStack, void **SavedSharedFrame, int32_t *SavedActiveThreads, int32_t IsEntryPoint) { DSPRINT0(DSFLAG, "Entering __kmpc_data_sharing_environment_end\n"); unsigned WID = getWarpId(); if (IsEntryPoint) { if (IsWarpMasterActiveThread()) { DSPRINT0(DSFLAG, "Doing clean up\n"); // The master thread cleans the saved slot, because this is an environment // only for the master. __kmpc_data_sharing_slot *S = IsMasterThread() ? *SavedSharedSlot : DataSharingState.SlotPtr[WID]; if (S->Next) { free(S->Next); S->Next = 0; } } DSPRINT0(DSFLAG, "Exiting Exiting __kmpc_data_sharing_environment_end\n"); return; } int32_t CurActive = getActiveThreadsMask(); // Only the warp master can restore the stack and frame information, and only // if there are no other threads left behind in this environment (i.e. the // warp diverged and returns in different places). This only works if we // assume that threads will converge right after the call site that started // the environment. if (IsWarpMasterActiveThread()) { int32_t &ActiveT = DataSharingState.ActiveThreads[WID]; DSPRINT0(DSFLAG, "Before restoring the stack\n"); // Zero the bits in the mask. If it is still different from zero, then we // have other threads that will return after the current ones. ActiveT &= ~CurActive; DSPRINT(DSFLAG, "Active threads: %08x; New mask: %08x\n", CurActive, ActiveT); if (!ActiveT) { // No other active threads? Great, lets restore the stack. __kmpc_data_sharing_slot *&SlotP = DataSharingState.SlotPtr[WID]; void *&StackP = DataSharingState.StackPtr[WID]; void *&FrameP = DataSharingState.FramePtr[WID]; SlotP = *SavedSharedSlot; StackP = *SavedSharedStack; FrameP = *SavedSharedFrame; ActiveT = *SavedActiveThreads; DSPRINT(DSFLAG, "Restored slot ptr at: %016llx \n", (long long)SlotP); DSPRINT(DSFLAG, "Restored stack ptr at: %016llx \n", (long long)StackP); DSPRINT(DSFLAG, "Restored frame ptr at: %016llx \n", (long long)FrameP); DSPRINT(DSFLAG, "Active threads: %08x \n", ActiveT); } } // FIXME: Need to see the impact of doing it here. __threadfence_block(); DSPRINT0(DSFLAG, "Exiting __kmpc_data_sharing_environment_end\n"); return; } EXTERN void * __kmpc_get_data_sharing_environment_frame(int32_t SourceThreadID, int16_t IsOMPRuntimeInitialized) { DSPRINT0(DSFLAG, "Entering __kmpc_get_data_sharing_environment_frame\n"); // If the runtime has been elided, use __shared__ memory for master-worker // data sharing. We're reusing the statically allocated data structure // that is used for standard data sharing. if (!IsOMPRuntimeInitialized) return (void *)&DataSharingState; // Get the frame used by the requested thread. unsigned SourceWID = SourceThreadID / WARPSIZE; DSPRINT(DSFLAG, "Source warp: %d\n", SourceWID); void *P = DataSharingState.FramePtr[SourceWID]; DSPRINT0(DSFLAG, "Exiting __kmpc_get_data_sharing_environment_frame\n"); return P; }
64f0ed1483f83c985454749b8536ecc49010aeef.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "head.h" #define tpb 256 extern float *d_t; extern float *d_it; extern float *d_V; extern float *d_dV2; extern float *d_Vnew; extern float *d_m; extern float *d_h; extern float *d_jj; extern float *d_d; extern float *d_f; extern float *d_X; extern float *d_cai; extern float *d_m0; extern float *d_h0; extern float *d_jj0; extern float *d_d0; extern float *d_f0; extern float *d_X0; extern float *d_dVdt; extern float *dcai; __global__ void boundary(float *d_V){ int k = blockDim.x * blockIdx.x + threadIdx.x; if(k<nx){ d_V[(k+1)*(nx+2)] = d_V[(k+1)*(nx+2)+1]; d_V[(k+1)*(nx+2)+(nx+1)] = d_V[(k+1)*(nx+2)+nx]; d_V[k+1] = d_V[k+1+(nx+2)]; d_V[(ny+1)*(nx+2)+k+1] = d_V[ny*(nx+2)+k+1]; } } void bc(){ int bpg; //tpb = 256; bpg = (nx+tpb-1)/tpb; hipLaunchKernelGGL(( boundary), dim3(bpg), dim3(tpb), 0, 0, d_V); //hipDeviceSynchronize(); } __global__ void comp_dV2(float *d_V ,float *d_dV2){ int k = threadIdx.x + blockIdx.x * blockDim.x; if(k<nx*ny){ int i = (int)(k/nx); int id = k+(nx+2)+1+(2*i); d_dV2[k] = D*((d_V[id+1] + d_V[id-1] - 2*d_V[id]) / (dx*dx) + (d_V[id+(nx+2)] + d_V[id-(nx+2)] - 2*d_V[id])/(dy*dy)); } } void dV2(){ int bpg; //tpb = 256; bpg = (nx*ny+tpb-1)/tpb; hipLaunchKernelGGL(( comp_dV2), dim3(bpg), dim3(tpb), 0, 0, d_V, d_dV2); //hipDeviceSynchronize(); } __device__ void comp_it(float *d_V, float *d_m, float *d_h, float *d_jj, float *d_d, float *d_f, float *d_cai, float *dcai, float *d_X, float *d_it, float *d_m0, float *d_h0, float *d_jj0, float *d_d0, float *d_f0, float *d_X0, int I, int i, int k, float *d_t) { //int id = k+nx+2+1+2*j; d_it[k] = 0.0; //comp_ina float gna = 23; float ena = ((R*temp) / frdy)*log(nao / nai); float am = 0.32*(d_V[k+nx+2+1+2*i] + 47.13) / (1 - exp(-0.1*(d_V[k+nx+2+1+2*i] + 47.13))); float bm = 0.08*exp(-d_V[k+nx+2+1+2*i] / 11); float ah, bh, aj ,bj; if (d_V[k+nx+2+1+2*i] < -40.0) { ah = 0.135*exp((80 + d_V[k+nx+2+1+2*i]) / -6.8); bh = 3.56*exp(0.079*d_V[k+nx+2+1+2*i]) + 310000 * exp(0.35*d_V[k+nx+2+1+2*i]); aj = (-127140 * exp(0.2444*d_V[k+nx+2+1+2*i]) - 0.00003474*exp(-0.04391*d_V[k+nx+2+1+2*i]))* ((d_V[k+nx+2+1+2*i] + 37.78)/(1 + exp(0.311*(d_V[k+nx+2+1+2*i] + 79.23)))); bj = (0.1212*exp(-0.01052*d_V[k+nx+2+1+2*i])) / (1 + exp(-0.1378*(d_V[k+nx+2+1+2*i] + 40.14))); } else { ah = 0; bh = 1 / (0.13*(1 + exp((d_V[k+nx+2+1+2*i] + 10.66) / -11.1))); aj = 0; bj = (0.3*exp(-0.0000002535*d_V[k+nx+2+1+2*i])) / (1 + exp(-0.1*(d_V[k+nx+2+1+2*i] + 32))); } float mtau = 1 / (am + bm); float htau = 1 / (ah + bh); float jtau = 1 / (aj + bj); float mss = am*mtau; float hss = ah*htau; float jss = aj*jtau; d_m0[k] = mss - (mss - d_m[k])*exp(-d_t[k] / mtau); d_h0[k] = hss - (hss - d_h[k])*exp(-d_t[k] / htau); d_jj0[k] = jss - (jss - d_jj[k])*exp(-d_t[k] / jtau); d_it[k] += gna*d_m0[k] * d_m0[k] * d_m0[k] * d_h0[k] * d_jj0[k] * (d_V[k+nx+2+1+2*i] - ena); //comp_ical __shared__ float esi[tpb]; __shared__ float isi[tpb]; esi[I] = 7.7 - 13.0287*log(d_cai[k]); float ad = 50 * 0.095*exp(-0.01*(d_V[k+nx+2+1+2*i] - 5)) / (1 + exp(-0.072*(d_V[k+nx+2+1+2*i] - 5))); float bd = 50 * 0.07*exp(-0.017*(d_V[k+nx+2+1+2*i] + 44)) / (1 + exp(0.05*(d_V[k+nx+2+1+2*i] + 44))); float af = 50 * 0.012*exp(-0.008*(d_V[k+nx+2+1+2*i] + 28)) / (1 + exp(0.15*(d_V[k+nx+2+1+2*i] + 28))); float bf = 50 * 0.0065*exp(-0.02*(d_V[k+nx+2+1+2*i] + 30)) / (1 + exp(-0.2*(d_V[k+nx+2+1+2*i] + 30))); float taud = 1 / (ad + bd); float tauf = 1 / (af + bf); float dss = ad*taud; float fss = af*tauf; d_d0[k] = dss - (dss - d_d[k])*exp(-d_t[k] / taud); d_f0[k] = fss - (fss - d_f[k])*exp(-d_t[k] / tauf); isi[I] = 0.09*d_d0[k] * d_f0[k] * (d_V[k+nx+2+1+2*i] - esi[I]); dcai[k] = -0.0001*isi[I] + 0.07*(0.0001 - d_cai[k]); //d_cai[k] = d_cai[k] + dcai*dt; d_it[k] = d_it[k] + isi[I]; //comp_ik float gk = 0.282*sqrt(ko / 5.4); float ek = ((R*temp) / frdy)*log(ko / ki); //float prnak = 0.01833; //ek = ((R*temp) / frdy)*log((ko + prnak*nao) / (ki + prnak*nai)); float ax = 50 * 0.0005*exp(0.083*(d_V[k+nx+2+1+2*i] + 50)) / (1 + exp(0.057*(d_V[k+nx+2+1+2*i] + 50))); float bx = 50 * 0.0013*exp(-0.06*(d_V[k+nx+2+1+2*i] + 20)) / (1 + exp(-0.04*(d_V[k+nx+2+1+2*i] + 20))); float taux = 1 / (ax + bx); float xss = ax*taux; d_X0[k] = xss - (xss - d_X[k])*exp(-d_t[k] / taux); float Xi; if (d_V[k+nx+2+1+2*i] > -100) { Xi = 2.837*(exp(0.04*(d_V[k+nx+2+1+2*i] + 77)) - 1)/ ((d_V[k+nx+2+1+2*i] + 77 + 1e-15)*exp(0.04*(d_V[k+nx+2+1+2*i] + 35))); } else { Xi = 1; } d_it[k] += gk*d_X0[k] * Xi*(d_V[k+nx+2+1+2*i] - ek); //comp_ik1 float gk1 = 0.6047*(sqrt(ko / 5.4)); float ek1 = ((R*temp) / frdy)*log(ko / ki); float ak1 = 1.02 / (1 + exp(0.2385*(d_V[k+nx+2+1+2*i] - ek1 - 59.215))); float bk1 = (0.49124*exp(0.08032*(d_V[k+nx+2+1+2*i] - ek1 + 5.476))+ exp(0.06175*(d_V[k+nx+2+1+2*i] - ek1 - 594.31))) /(1 + exp(-0.5143*(d_V[k+nx+2+1+2*i] - ek1 + 4.753))); float K1ss = ak1 / (ak1 + bk1); d_it[k] += gk1*K1ss*(d_V[k+nx+2+1+2*i] - ek1); //comp_ikp float gkp = 0.0183; float ekp = ((R*temp) / frdy)*log(ko / ki); float kp = 1 / (1 + exp((7.488 - d_V[k+nx+2+1+2*i]) / 5.98)); d_it[k] += gkp*kp*(d_V[k+nx+2+1+2*i] - ekp); //comp_ib d_it[k] += 0.03921*(d_V[k+nx+2+1+2*i] + 59.87); } __global__ void comp_dVdt(float *d_V, float *d_m, float *d_h, float *d_jj, float *d_d, float *d_f, float *d_cai, float *dcai, float *d_X, float *d_it, float *d_m0, float *d_h0, float *d_jj0, float *d_d0, float *d_f0, float *d_X0, float *d_dVdt, float *d_t){ int k = threadIdx.x + blockIdx.x * blockDim.x; int I = threadIdx.x; if(k<nx*ny){ int i = (int)(k/nx); d_t[k] = dt_max; comp_it(d_V, d_m, d_h, d_jj, d_d, d_f, d_cai, dcai, d_X, d_it, d_m0, d_h0, d_jj0, d_d0, d_f0, d_X0, I, i, k, d_t); d_dVdt[k] = -d_it[k]; } } void dVdt(){ int bpg; bpg = (nx*ny+tpb-1)/tpb; hipLaunchKernelGGL(( comp_dVdt), dim3(bpg), dim3(tpb), 0, 0, d_V, d_m, d_h, d_jj, d_d, d_f, d_cai, dcai, d_X, d_it, d_m0, d_h0, d_jj0, d_d0, d_f0, d_X0, d_dVdt, d_t); } __global__ void plane_waves(float *d_dVdt){ int k = threadIdx.x + blockIdx.x * blockDim.x; if(k<ny*5){ int i, j, id; i = (int)(k/5); j = k-i*5; id = i*nx+j; d_dVdt[id] = d_dVdt[id] + (-st); } } void stimu(){ int bpg; //int tpb; //tpb = 256; bpg = (ny*5+tpb-1)/tpb; hipLaunchKernelGGL(( plane_waves), dim3(bpg), dim3(tpb), 0, 0, d_dVdt); //hipDeviceSynchronize(); } __device__ void gate(float *d_m, float *d_h, float *d_jj, float *d_d, float *d_f, float *d_X, float *d_m0, float *d_h0, float *d_jj0, float *d_d0, float *d_f0, float *d_X0, int k){ d_m[k] = d_m0[k]; d_h[k] = d_h0[k]; d_jj[k] = d_jj0[k]; d_d[k] = d_d0[k]; d_f[k] = d_f0[k]; d_X[k] = d_X0[k]; } __global__ void comp_ODE_stim(float *d_V, float *d_m, float *d_h, float *d_jj, float *d_d, float *d_f, float *d_cai, float *dcai, float *d_X, float *d_it, float *d_m0, float *d_h0, float *d_jj0, float *d_d0, float *d_f0, float *d_X0, float *d_dVdt, float *d_t){ int k = threadIdx.x + blockIdx.x * blockDim.x; int I = threadIdx.x; if(k<5*ny){ int i = (int)(k/5); int j = k - i*5; int id = i*nx+j; int k1, k0, ttt; int vid = (i+1)*(nx+2)+j+1; if(d_dVdt[id]>0){ k0 = 5; }else{ k0 = 1; } k1 = k0 + (int)(fabs(d_dVdt[id]) + 0.5); if (k1 >(int)(dt_max / dt_min)){ k1 = (int)(dt_max / dt_min); } d_t[id] = dt_max / k1; for (ttt = 0; ttt < k1; ttt++){ //from t to t+dt_max, t=t+dt comp_it(d_V, d_m, d_h, d_jj, d_d, d_f, d_cai, dcai, d_X, d_it, d_m0, d_h0, d_jj0, d_d0, d_f0, d_X0, I, i, id, d_t); gate(d_m, d_h, d_jj, d_d, d_f, d_X, d_m0, d_h0, d_jj0, d_d0, d_f0, d_X0, id); d_cai[id] = d_cai[id] + dcai[id]*d_t[id];//renew Cai d_dVdt[id] = -d_it[id] + (-st); d_V[vid] = d_V[vid] + d_t[id]*d_dVdt[id]; } } } __global__ void comp_ODE(float *d_V, float *d_m, float *d_h, float *d_jj, float *d_d, float *d_f, float *d_cai, float *dcai, float *d_X, float *d_it, float *d_m0, float *d_h0, float *d_jj0, float *d_d0, float *d_f0, float *d_X0, float *d_dVdt, float *d_t, int num){ int k = threadIdx.x + blockIdx.x * blockDim.x; int I = threadIdx.x; if(k<(nx-num)*ny){ int i = (int)(k/(nx-num)); int j = k - i*(nx-num) + num; int id = i*nx+j; int k1, k0, ttt; int vid = (i+1)*(nx+2)+j+1; if(d_dVdt[id]>0){ k0 = 5; }else{ k0 = 1; } k1 = k0 + (int)(fabs(d_dVdt[id])+0.5); if (k1 >(int)(dt_max / dt_min)){ k1 = (int)(dt_max / dt_min); } d_t[id] = dt_max / k1; for (ttt = 0; ttt < k1; ttt++){ //from t to t+dt_max, t=t+dt comp_it(d_V, d_m, d_h, d_jj, d_d, d_f, d_cai, dcai, d_X, d_it, d_m0, d_h0, d_jj0, d_d0, d_f0, d_X0, I, i, id, d_t); gate(d_m, d_h, d_jj, d_d, d_f, d_X, d_m0, d_h0, d_jj0, d_d0, d_f0, d_X0, id); d_cai[id] = d_cai[id] + dcai[id]*d_t[id];//renew Cai d_dVdt[id] = -d_it[id]; d_V[vid] = d_V[vid] + d_t[id]*d_dVdt[id]; } } } void ODE_stim(){ int bpg; bpg = (5*ny+tpb-1)/tpb; hipLaunchKernelGGL(( comp_ODE_stim), dim3(bpg), dim3(tpb), 0, 0, d_V, d_m, d_h, d_jj, d_d, d_f, d_cai, dcai, d_X, d_it, d_m0, d_h0, d_jj0, d_d0, d_f0, d_X0, d_dVdt, d_t); bpg = ((nx-5)*ny+tpb-1)/tpb; hipLaunchKernelGGL(( comp_ODE), dim3(bpg), dim3(tpb), 0, 0, d_V, d_m, d_h, d_jj, d_d, d_f, d_cai, dcai, d_X, d_it, d_m0, d_h0, d_jj0, d_d0, d_f0, d_X0, d_dVdt, d_t, 5); } void ODE(){ int bpg; bpg = (nx*ny+tpb-1)/tpb; hipLaunchKernelGGL(( comp_ODE), dim3(bpg), dim3(tpb), 0, 0, d_V, d_m, d_h, d_jj, d_d, d_f, d_cai, dcai, d_X, d_it, d_m0, d_h0, d_jj0, d_d0, d_f0, d_X0, d_dVdt, d_t, 0); } __global__ void Euler(float *d_V, float *d_dV2, float *d_Vnew){ int k = threadIdx.x + blockIdx.x * blockDim.x; if(k<nx*ny){ int i = (int)(k/nx); d_Vnew[k] = d_V[k+nx+2+1+2*i] + dt_max/2 *d_dV2[k]; d_V[k+nx+2+1+2*i] = d_Vnew[k]; } } void Forward_Euler(){ int bpg; //int tpb; //tpb = 256; bpg = (nx*ny+tpb-1)/tpb; hipLaunchKernelGGL(( Euler), dim3(bpg), dim3(tpb), 0, 0, d_V, d_dV2, d_Vnew); //hipDeviceSynchronize(); }
64f0ed1483f83c985454749b8536ecc49010aeef.cu
#include "head.h" #define tpb 256 extern float *d_t; extern float *d_it; extern float *d_V; extern float *d_dV2; extern float *d_Vnew; extern float *d_m; extern float *d_h; extern float *d_jj; extern float *d_d; extern float *d_f; extern float *d_X; extern float *d_cai; extern float *d_m0; extern float *d_h0; extern float *d_jj0; extern float *d_d0; extern float *d_f0; extern float *d_X0; extern float *d_dVdt; extern float *dcai; __global__ void boundary(float *d_V){ int k = blockDim.x * blockIdx.x + threadIdx.x; if(k<nx){ d_V[(k+1)*(nx+2)] = d_V[(k+1)*(nx+2)+1]; d_V[(k+1)*(nx+2)+(nx+1)] = d_V[(k+1)*(nx+2)+nx]; d_V[k+1] = d_V[k+1+(nx+2)]; d_V[(ny+1)*(nx+2)+k+1] = d_V[ny*(nx+2)+k+1]; } } void bc(){ int bpg; //tpb = 256; bpg = (nx+tpb-1)/tpb; boundary<<<bpg, tpb>>>(d_V); //cudaDeviceSynchronize(); } __global__ void comp_dV2(float *d_V ,float *d_dV2){ int k = threadIdx.x + blockIdx.x * blockDim.x; if(k<nx*ny){ int i = (int)(k/nx); int id = k+(nx+2)+1+(2*i); d_dV2[k] = D*((d_V[id+1] + d_V[id-1] - 2*d_V[id]) / (dx*dx) + (d_V[id+(nx+2)] + d_V[id-(nx+2)] - 2*d_V[id])/(dy*dy)); } } void dV2(){ int bpg; //tpb = 256; bpg = (nx*ny+tpb-1)/tpb; comp_dV2<<<bpg, tpb>>>(d_V, d_dV2); //cudaDeviceSynchronize(); } __device__ void comp_it(float *d_V, float *d_m, float *d_h, float *d_jj, float *d_d, float *d_f, float *d_cai, float *dcai, float *d_X, float *d_it, float *d_m0, float *d_h0, float *d_jj0, float *d_d0, float *d_f0, float *d_X0, int I, int i, int k, float *d_t) { //int id = k+nx+2+1+2*j; d_it[k] = 0.0; //comp_ina float gna = 23; float ena = ((R*temp) / frdy)*log(nao / nai); float am = 0.32*(d_V[k+nx+2+1+2*i] + 47.13) / (1 - exp(-0.1*(d_V[k+nx+2+1+2*i] + 47.13))); float bm = 0.08*exp(-d_V[k+nx+2+1+2*i] / 11); float ah, bh, aj ,bj; if (d_V[k+nx+2+1+2*i] < -40.0) { ah = 0.135*exp((80 + d_V[k+nx+2+1+2*i]) / -6.8); bh = 3.56*exp(0.079*d_V[k+nx+2+1+2*i]) + 310000 * exp(0.35*d_V[k+nx+2+1+2*i]); aj = (-127140 * exp(0.2444*d_V[k+nx+2+1+2*i]) - 0.00003474*exp(-0.04391*d_V[k+nx+2+1+2*i]))* ((d_V[k+nx+2+1+2*i] + 37.78)/(1 + exp(0.311*(d_V[k+nx+2+1+2*i] + 79.23)))); bj = (0.1212*exp(-0.01052*d_V[k+nx+2+1+2*i])) / (1 + exp(-0.1378*(d_V[k+nx+2+1+2*i] + 40.14))); } else { ah = 0; bh = 1 / (0.13*(1 + exp((d_V[k+nx+2+1+2*i] + 10.66) / -11.1))); aj = 0; bj = (0.3*exp(-0.0000002535*d_V[k+nx+2+1+2*i])) / (1 + exp(-0.1*(d_V[k+nx+2+1+2*i] + 32))); } float mtau = 1 / (am + bm); float htau = 1 / (ah + bh); float jtau = 1 / (aj + bj); float mss = am*mtau; float hss = ah*htau; float jss = aj*jtau; d_m0[k] = mss - (mss - d_m[k])*exp(-d_t[k] / mtau); d_h0[k] = hss - (hss - d_h[k])*exp(-d_t[k] / htau); d_jj0[k] = jss - (jss - d_jj[k])*exp(-d_t[k] / jtau); d_it[k] += gna*d_m0[k] * d_m0[k] * d_m0[k] * d_h0[k] * d_jj0[k] * (d_V[k+nx+2+1+2*i] - ena); //comp_ical __shared__ float esi[tpb]; __shared__ float isi[tpb]; esi[I] = 7.7 - 13.0287*log(d_cai[k]); float ad = 50 * 0.095*exp(-0.01*(d_V[k+nx+2+1+2*i] - 5)) / (1 + exp(-0.072*(d_V[k+nx+2+1+2*i] - 5))); float bd = 50 * 0.07*exp(-0.017*(d_V[k+nx+2+1+2*i] + 44)) / (1 + exp(0.05*(d_V[k+nx+2+1+2*i] + 44))); float af = 50 * 0.012*exp(-0.008*(d_V[k+nx+2+1+2*i] + 28)) / (1 + exp(0.15*(d_V[k+nx+2+1+2*i] + 28))); float bf = 50 * 0.0065*exp(-0.02*(d_V[k+nx+2+1+2*i] + 30)) / (1 + exp(-0.2*(d_V[k+nx+2+1+2*i] + 30))); float taud = 1 / (ad + bd); float tauf = 1 / (af + bf); float dss = ad*taud; float fss = af*tauf; d_d0[k] = dss - (dss - d_d[k])*exp(-d_t[k] / taud); d_f0[k] = fss - (fss - d_f[k])*exp(-d_t[k] / tauf); isi[I] = 0.09*d_d0[k] * d_f0[k] * (d_V[k+nx+2+1+2*i] - esi[I]); dcai[k] = -0.0001*isi[I] + 0.07*(0.0001 - d_cai[k]); //d_cai[k] = d_cai[k] + dcai*dt; d_it[k] = d_it[k] + isi[I]; //comp_ik float gk = 0.282*sqrt(ko / 5.4); float ek = ((R*temp) / frdy)*log(ko / ki); //float prnak = 0.01833; //ek = ((R*temp) / frdy)*log((ko + prnak*nao) / (ki + prnak*nai)); float ax = 50 * 0.0005*exp(0.083*(d_V[k+nx+2+1+2*i] + 50)) / (1 + exp(0.057*(d_V[k+nx+2+1+2*i] + 50))); float bx = 50 * 0.0013*exp(-0.06*(d_V[k+nx+2+1+2*i] + 20)) / (1 + exp(-0.04*(d_V[k+nx+2+1+2*i] + 20))); float taux = 1 / (ax + bx); float xss = ax*taux; d_X0[k] = xss - (xss - d_X[k])*exp(-d_t[k] / taux); float Xi; if (d_V[k+nx+2+1+2*i] > -100) { Xi = 2.837*(exp(0.04*(d_V[k+nx+2+1+2*i] + 77)) - 1)/ ((d_V[k+nx+2+1+2*i] + 77 + 1e-15)*exp(0.04*(d_V[k+nx+2+1+2*i] + 35))); } else { Xi = 1; } d_it[k] += gk*d_X0[k] * Xi*(d_V[k+nx+2+1+2*i] - ek); //comp_ik1 float gk1 = 0.6047*(sqrt(ko / 5.4)); float ek1 = ((R*temp) / frdy)*log(ko / ki); float ak1 = 1.02 / (1 + exp(0.2385*(d_V[k+nx+2+1+2*i] - ek1 - 59.215))); float bk1 = (0.49124*exp(0.08032*(d_V[k+nx+2+1+2*i] - ek1 + 5.476))+ exp(0.06175*(d_V[k+nx+2+1+2*i] - ek1 - 594.31))) /(1 + exp(-0.5143*(d_V[k+nx+2+1+2*i] - ek1 + 4.753))); float K1ss = ak1 / (ak1 + bk1); d_it[k] += gk1*K1ss*(d_V[k+nx+2+1+2*i] - ek1); //comp_ikp float gkp = 0.0183; float ekp = ((R*temp) / frdy)*log(ko / ki); float kp = 1 / (1 + exp((7.488 - d_V[k+nx+2+1+2*i]) / 5.98)); d_it[k] += gkp*kp*(d_V[k+nx+2+1+2*i] - ekp); //comp_ib d_it[k] += 0.03921*(d_V[k+nx+2+1+2*i] + 59.87); } __global__ void comp_dVdt(float *d_V, float *d_m, float *d_h, float *d_jj, float *d_d, float *d_f, float *d_cai, float *dcai, float *d_X, float *d_it, float *d_m0, float *d_h0, float *d_jj0, float *d_d0, float *d_f0, float *d_X0, float *d_dVdt, float *d_t){ int k = threadIdx.x + blockIdx.x * blockDim.x; int I = threadIdx.x; if(k<nx*ny){ int i = (int)(k/nx); d_t[k] = dt_max; comp_it(d_V, d_m, d_h, d_jj, d_d, d_f, d_cai, dcai, d_X, d_it, d_m0, d_h0, d_jj0, d_d0, d_f0, d_X0, I, i, k, d_t); d_dVdt[k] = -d_it[k]; } } void dVdt(){ int bpg; bpg = (nx*ny+tpb-1)/tpb; comp_dVdt<<<bpg, tpb>>>(d_V, d_m, d_h, d_jj, d_d, d_f, d_cai, dcai, d_X, d_it, d_m0, d_h0, d_jj0, d_d0, d_f0, d_X0, d_dVdt, d_t); } __global__ void plane_waves(float *d_dVdt){ int k = threadIdx.x + blockIdx.x * blockDim.x; if(k<ny*5){ int i, j, id; i = (int)(k/5); j = k-i*5; id = i*nx+j; d_dVdt[id] = d_dVdt[id] + (-st); } } void stimu(){ int bpg; //int tpb; //tpb = 256; bpg = (ny*5+tpb-1)/tpb; plane_waves<<<bpg, tpb>>>(d_dVdt); //cudaDeviceSynchronize(); } __device__ void gate(float *d_m, float *d_h, float *d_jj, float *d_d, float *d_f, float *d_X, float *d_m0, float *d_h0, float *d_jj0, float *d_d0, float *d_f0, float *d_X0, int k){ d_m[k] = d_m0[k]; d_h[k] = d_h0[k]; d_jj[k] = d_jj0[k]; d_d[k] = d_d0[k]; d_f[k] = d_f0[k]; d_X[k] = d_X0[k]; } __global__ void comp_ODE_stim(float *d_V, float *d_m, float *d_h, float *d_jj, float *d_d, float *d_f, float *d_cai, float *dcai, float *d_X, float *d_it, float *d_m0, float *d_h0, float *d_jj0, float *d_d0, float *d_f0, float *d_X0, float *d_dVdt, float *d_t){ int k = threadIdx.x + blockIdx.x * blockDim.x; int I = threadIdx.x; if(k<5*ny){ int i = (int)(k/5); int j = k - i*5; int id = i*nx+j; int k1, k0, ttt; int vid = (i+1)*(nx+2)+j+1; if(d_dVdt[id]>0){ k0 = 5; }else{ k0 = 1; } k1 = k0 + (int)(fabs(d_dVdt[id]) + 0.5); if (k1 >(int)(dt_max / dt_min)){ k1 = (int)(dt_max / dt_min); } d_t[id] = dt_max / k1; for (ttt = 0; ttt < k1; ttt++){ //from t to t+dt_max, t=t+dt comp_it(d_V, d_m, d_h, d_jj, d_d, d_f, d_cai, dcai, d_X, d_it, d_m0, d_h0, d_jj0, d_d0, d_f0, d_X0, I, i, id, d_t); gate(d_m, d_h, d_jj, d_d, d_f, d_X, d_m0, d_h0, d_jj0, d_d0, d_f0, d_X0, id); d_cai[id] = d_cai[id] + dcai[id]*d_t[id];//renew Cai d_dVdt[id] = -d_it[id] + (-st); d_V[vid] = d_V[vid] + d_t[id]*d_dVdt[id]; } } } __global__ void comp_ODE(float *d_V, float *d_m, float *d_h, float *d_jj, float *d_d, float *d_f, float *d_cai, float *dcai, float *d_X, float *d_it, float *d_m0, float *d_h0, float *d_jj0, float *d_d0, float *d_f0, float *d_X0, float *d_dVdt, float *d_t, int num){ int k = threadIdx.x + blockIdx.x * blockDim.x; int I = threadIdx.x; if(k<(nx-num)*ny){ int i = (int)(k/(nx-num)); int j = k - i*(nx-num) + num; int id = i*nx+j; int k1, k0, ttt; int vid = (i+1)*(nx+2)+j+1; if(d_dVdt[id]>0){ k0 = 5; }else{ k0 = 1; } k1 = k0 + (int)(fabs(d_dVdt[id])+0.5); if (k1 >(int)(dt_max / dt_min)){ k1 = (int)(dt_max / dt_min); } d_t[id] = dt_max / k1; for (ttt = 0; ttt < k1; ttt++){ //from t to t+dt_max, t=t+dt comp_it(d_V, d_m, d_h, d_jj, d_d, d_f, d_cai, dcai, d_X, d_it, d_m0, d_h0, d_jj0, d_d0, d_f0, d_X0, I, i, id, d_t); gate(d_m, d_h, d_jj, d_d, d_f, d_X, d_m0, d_h0, d_jj0, d_d0, d_f0, d_X0, id); d_cai[id] = d_cai[id] + dcai[id]*d_t[id];//renew Cai d_dVdt[id] = -d_it[id]; d_V[vid] = d_V[vid] + d_t[id]*d_dVdt[id]; } } } void ODE_stim(){ int bpg; bpg = (5*ny+tpb-1)/tpb; comp_ODE_stim<<<bpg, tpb>>>(d_V, d_m, d_h, d_jj, d_d, d_f, d_cai, dcai, d_X, d_it, d_m0, d_h0, d_jj0, d_d0, d_f0, d_X0, d_dVdt, d_t); bpg = ((nx-5)*ny+tpb-1)/tpb; comp_ODE<<<bpg, tpb>>>(d_V, d_m, d_h, d_jj, d_d, d_f, d_cai, dcai, d_X, d_it, d_m0, d_h0, d_jj0, d_d0, d_f0, d_X0, d_dVdt, d_t, 5); } void ODE(){ int bpg; bpg = (nx*ny+tpb-1)/tpb; comp_ODE<<<bpg, tpb>>>(d_V, d_m, d_h, d_jj, d_d, d_f, d_cai, dcai, d_X, d_it, d_m0, d_h0, d_jj0, d_d0, d_f0, d_X0, d_dVdt, d_t, 0); } __global__ void Euler(float *d_V, float *d_dV2, float *d_Vnew){ int k = threadIdx.x + blockIdx.x * blockDim.x; if(k<nx*ny){ int i = (int)(k/nx); d_Vnew[k] = d_V[k+nx+2+1+2*i] + dt_max/2 *d_dV2[k]; d_V[k+nx+2+1+2*i] = d_Vnew[k]; } } void Forward_Euler(){ int bpg; //int tpb; //tpb = 256; bpg = (nx*ny+tpb-1)/tpb; Euler<<<bpg, tpb>>>(d_V, d_dV2, d_Vnew); //cudaDeviceSynchronize(); }
0600b99e6e872e04fe61a432c99d83078f493820.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "vec_fdividef.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; size_t n = XSIZE*YSIZE; float *result = NULL; hipMalloc(&result, XSIZE*YSIZE); float *x = NULL; hipMalloc(&x, XSIZE*YSIZE); float *y = NULL; hipMalloc(&y, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( vec_fdividef), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,x,y); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( vec_fdividef), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,x,y); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( vec_fdividef), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,x,y); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
0600b99e6e872e04fe61a432c99d83078f493820.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "vec_fdividef.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; size_t n = XSIZE*YSIZE; float *result = NULL; cudaMalloc(&result, XSIZE*YSIZE); float *x = NULL; cudaMalloc(&x, XSIZE*YSIZE); float *y = NULL; cudaMalloc(&y, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); vec_fdividef<<<gridBlock,threadBlock>>>(n,result,x,y); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { vec_fdividef<<<gridBlock,threadBlock>>>(n,result,x,y); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { vec_fdividef<<<gridBlock,threadBlock>>>(n,result,x,y); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
34259468ac250273095e90e87c3eb2363c395c85.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Copyright (C) 1993-2011, NVIDIA Corporation, all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or bpied warranties, including, but not limited to, the bpied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #if !defined CUDA_DISABLER #include "internal_shared.hpp" #include "opencv2/gpu/device/vec_traits.hpp" #include "opencv2/gpu/device/vec_math.hpp" #include "opencv2/gpu/device/block.hpp" #include "opencv2/gpu/device/border_interpolate.hpp" using namespace cv::gpu; typedef unsigned char uchar; typedef unsigned short ushort; ////////////////////////////////////////////////////////////////////////////////// //// Non Local Means Denosing namespace cv { namespace gpu { namespace device { namespace imgproc { __device__ __forceinline__ float norm2(const float& v) { return v*v; } __device__ __forceinline__ float norm2(const float2& v) { return v.x*v.x + v.y*v.y; } __device__ __forceinline__ float norm2(const float3& v) { return v.x*v.x + v.y*v.y + v.z*v.z; } __device__ __forceinline__ float norm2(const float4& v) { return v.x*v.x + v.y*v.y + v.z*v.z + v.w*v.w; } template<typename T, typename B> __global__ void nlm_kernel(const PtrStep<T> src, PtrStepSz<T> dst, const B b, int search_radius, int block_radius, float noise_mult) { typedef typename TypeVec<float, VecTraits<T>::cn>::vec_type value_type; const int i = blockDim.y * blockIdx.y + threadIdx.y; const int j = blockDim.x * blockIdx.x + threadIdx.x; if (j >= dst.cols || i >= dst.rows) return; int bsize = search_radius + block_radius; int search_window = 2 * search_radius + 1; float minus_search_window2_inv = -1.f/(search_window * search_window); value_type sum1 = VecTraits<value_type>::all(0); float sum2 = 0.f; if (j - bsize >= 0 && j + bsize < dst.cols && i - bsize >= 0 && i + bsize < dst.rows) { for(float y = -search_radius; y <= search_radius; ++y) for(float x = -search_radius; x <= search_radius; ++x) { float dist2 = 0; for(float ty = -block_radius; ty <= block_radius; ++ty) for(float tx = -block_radius; tx <= block_radius; ++tx) { value_type bv = saturate_cast<value_type>(src(i + y + ty, j + x + tx)); value_type av = saturate_cast<value_type>(src(i + ty, j + tx)); dist2 += norm2(av - bv); } float w = __expf(dist2 * noise_mult + (x * x + y * y) * minus_search_window2_inv); /*if (i == 255 && j == 255) printf("%f %f\n", w, dist2 * minus_h2_inv + (x * x + y * y) * minus_search_window2_inv);*/ sum1 = sum1 + w * saturate_cast<value_type>(src(i + y, j + x)); sum2 += w; } } else { for(float y = -search_radius; y <= search_radius; ++y) for(float x = -search_radius; x <= search_radius; ++x) { float dist2 = 0; for(float ty = -block_radius; ty <= block_radius; ++ty) for(float tx = -block_radius; tx <= block_radius; ++tx) { value_type bv = saturate_cast<value_type>(b.at(i + y + ty, j + x + tx, src)); value_type av = saturate_cast<value_type>(b.at(i + ty, j + tx, src)); dist2 += norm2(av - bv); } float w = __expf(dist2 * noise_mult + (x * x + y * y) * minus_search_window2_inv); sum1 = sum1 + w * saturate_cast<value_type>(b.at(i + y, j + x, src)); sum2 += w; } } dst(i, j) = saturate_cast<T>(sum1 / sum2); } template<typename T, template <typename> class B> void nlm_caller(const PtrStepSzb src, PtrStepSzb dst, int search_radius, int block_radius, float h, hipStream_t stream) { dim3 block (32, 8); dim3 grid (divUp (src.cols, block.x), divUp (src.rows, block.y)); B<T> b(src.rows, src.cols); int block_window = 2 * block_radius + 1; float minus_h2_inv = -1.f/(h * h * VecTraits<T>::cn); float noise_mult = minus_h2_inv/(block_window * block_window); cudaSafeCall( hipFuncSetCacheConfig (nlm_kernel<T, B<T> >, hipFuncCachePreferL1) ); hipLaunchKernelGGL(( nlm_kernel), dim3(grid), dim3(block), 0, 0, (PtrStepSz<T>)src, (PtrStepSz<T>)dst, b, search_radius, block_radius, noise_mult); cudaSafeCall ( hipGetLastError () ); if (stream == 0) cudaSafeCall( hipDeviceSynchronize() ); } template<typename T> void nlm_bruteforce_gpu(const PtrStepSzb& src, PtrStepSzb dst, int search_radius, int block_radius, float h, int borderMode, hipStream_t stream) { typedef void (*func_t)(const PtrStepSzb src, PtrStepSzb dst, int search_radius, int block_radius, float h, hipStream_t stream); static func_t funcs[] = { nlm_caller<T, BrdReflect101>, nlm_caller<T, BrdReplicate>, nlm_caller<T, BrdConstant>, nlm_caller<T, BrdReflect>, nlm_caller<T, BrdWrap>, }; funcs[borderMode](src, dst, search_radius, block_radius, h, stream); } template void nlm_bruteforce_gpu<uchar>(const PtrStepSzb&, PtrStepSzb, int, int, float, int, hipStream_t); template void nlm_bruteforce_gpu<uchar2>(const PtrStepSzb&, PtrStepSzb, int, int, float, int, hipStream_t); template void nlm_bruteforce_gpu<uchar3>(const PtrStepSzb&, PtrStepSzb, int, int, float, int, hipStream_t); } }}} ////////////////////////////////////////////////////////////////////////////////// //// Non Local Means Denosing (fast approximate version) namespace cv { namespace gpu { namespace device { namespace imgproc { __device__ __forceinline__ int calcDist(const uchar& a, const uchar& b) { return (a-b)*(a-b); } __device__ __forceinline__ int calcDist(const uchar2& a, const uchar2& b) { return (a.x-b.x)*(a.x-b.x) + (a.y-b.y)*(a.y-b.y); } __device__ __forceinline__ int calcDist(const uchar3& a, const uchar3& b) { return (a.x-b.x)*(a.x-b.x) + (a.y-b.y)*(a.y-b.y) + (a.z-b.z)*(a.z-b.z); } template <class T> struct FastNonLocalMenas { enum { CTA_SIZE = 128, TILE_COLS = 128, TILE_ROWS = 32, STRIDE = CTA_SIZE }; struct plus { __device__ __forceinline__ float operator()(float v1, float v2) const { return v1 + v2; } }; int search_radius; int block_radius; int search_window; int block_window; float minus_h2_inv; FastNonLocalMenas(int search_window_, int block_window_, float h) : search_radius(search_window_/2), block_radius(block_window_/2), search_window(search_window_), block_window(block_window_), minus_h2_inv(-1.f/(h * h * VecTraits<T>::cn)) {} PtrStep<T> src; mutable PtrStepi buffer; __device__ __forceinline__ void initSums_BruteForce(int i, int j, int* dist_sums, PtrStepi& col_sums, PtrStepi& up_col_sums) const { for(int index = threadIdx.x; index < search_window * search_window; index += STRIDE) { dist_sums[index] = 0; for(int tx = 0; tx < block_window; ++tx) col_sums(tx, index) = 0; int y = index / search_window; int x = index - y * search_window; int ay = i; int ax = j; int by = i + y - search_radius; int bx = j + x - search_radius; #if 1 for (int tx = -block_radius; tx <= block_radius; ++tx) { int col_sum = 0; for (int ty = -block_radius; ty <= block_radius; ++ty) { int dist = calcDist(src(ay + ty, ax + tx), src(by + ty, bx + tx)); dist_sums[index] += dist; col_sum += dist; } col_sums(tx + block_radius, index) = col_sum; } #else for (int ty = -block_radius; ty <= block_radius; ++ty) for (int tx = -block_radius; tx <= block_radius; ++tx) { int dist = calcDist(src(ay + ty, ax + tx), src(by + ty, bx + tx)); dist_sums[index] += dist; col_sums(tx + block_radius, index) += dist; } #endif up_col_sums(j, index) = col_sums(block_window - 1, index); } } __device__ __forceinline__ void shiftRight_FirstRow(int i, int j, int first, int* dist_sums, PtrStepi& col_sums, PtrStepi& up_col_sums) const { for(int index = threadIdx.x; index < search_window * search_window; index += STRIDE) { int y = index / search_window; int x = index - y * search_window; int ay = i; int ax = j + block_radius; int by = i + y - search_radius; int bx = j + x - search_radius + block_radius; int col_sum = 0; for (int ty = -block_radius; ty <= block_radius; ++ty) col_sum += calcDist(src(ay + ty, ax), src(by + ty, bx)); dist_sums[index] += col_sum - col_sums(first, index); col_sums(first, index) = col_sum; up_col_sums(j, index) = col_sum; } } __device__ __forceinline__ void shiftRight_UpSums(int i, int j, int first, int* dist_sums, PtrStepi& col_sums, PtrStepi& up_col_sums) const { int ay = i; int ax = j + block_radius; T a_up = src(ay - block_radius - 1, ax); T a_down = src(ay + block_radius, ax); for(int index = threadIdx.x; index < search_window * search_window; index += STRIDE) { int y = index / search_window; int x = index - y * search_window; int by = i + y - search_radius; int bx = j + x - search_radius + block_radius; T b_up = src(by - block_radius - 1, bx); T b_down = src(by + block_radius, bx); int col_sum = up_col_sums(j, index) + calcDist(a_down, b_down) - calcDist(a_up, b_up); dist_sums[index] += col_sum - col_sums(first, index); col_sums(first, index) = col_sum; up_col_sums(j, index) = col_sum; } } __device__ __forceinline__ void convolve_window(int i, int j, const int* dist_sums, PtrStepi& col_sums, PtrStepi& up_col_sums, T& dst) const { typedef typename TypeVec<float, VecTraits<T>::cn>::vec_type sum_type; float weights_sum = 0; sum_type sum = VecTraits<sum_type>::all(0); float bw2_inv = 1.f/(block_window * block_window); int sx = j - search_radius; int sy = i - search_radius; for(int index = threadIdx.x; index < search_window * search_window; index += STRIDE) { int y = index / search_window; int x = index - y * search_window; float avg_dist = dist_sums[index] * bw2_inv; float weight = __expf(avg_dist * minus_h2_inv); weights_sum += weight; sum = sum + weight * saturate_cast<sum_type>(src(sy + y, sx + x)); } volatile __shared__ float cta_buffer[CTA_SIZE]; int tid = threadIdx.x; cta_buffer[tid] = weights_sum; __syncthreads(); Block::reduce<CTA_SIZE>(cta_buffer, plus()); weights_sum = cta_buffer[0]; __syncthreads(); for(int n = 0; n < VecTraits<T>::cn; ++n) { cta_buffer[tid] = reinterpret_cast<float*>(&sum)[n]; __syncthreads(); Block::reduce<CTA_SIZE>(cta_buffer, plus()); reinterpret_cast<float*>(&sum)[n] = cta_buffer[0]; __syncthreads(); } if (tid == 0) dst = saturate_cast<T>(sum/weights_sum); } __device__ __forceinline__ void operator()(PtrStepSz<T>& dst) const { int tbx = blockIdx.x * TILE_COLS; int tby = blockIdx.y * TILE_ROWS; int tex = ::min(tbx + TILE_COLS, dst.cols); int tey = ::min(tby + TILE_ROWS, dst.rows); PtrStepi col_sums; col_sums.data = buffer.ptr(dst.cols + blockIdx.x * block_window) + blockIdx.y * search_window * search_window; col_sums.step = buffer.step; PtrStepi up_col_sums; up_col_sums.data = buffer.data + blockIdx.y * search_window * search_window; up_col_sums.step = buffer.step; extern __shared__ int dist_sums[]; //search_window * search_window int first = 0; for (int i = tby; i < tey; ++i) for (int j = tbx; j < tex; ++j) { __syncthreads(); if (j == tbx) { initSums_BruteForce(i, j, dist_sums, col_sums, up_col_sums); first = 0; } else { if (i == tby) shiftRight_FirstRow(i, j, first, dist_sums, col_sums, up_col_sums); else shiftRight_UpSums(i, j, first, dist_sums, col_sums, up_col_sums); first = (first + 1) % block_window; } __syncthreads(); convolve_window(i, j, dist_sums, col_sums, up_col_sums, dst(i, j)); } } }; template<typename T> __global__ void fast_nlm_kernel(const FastNonLocalMenas<T> fnlm, PtrStepSz<T> dst) { fnlm(dst); } void nln_fast_get_buffer_size(const PtrStepSzb& src, int search_window, int block_window, int& buffer_cols, int& buffer_rows) { typedef FastNonLocalMenas<uchar> FNLM; dim3 grid(divUp(src.cols, FNLM::TILE_COLS), divUp(src.rows, FNLM::TILE_ROWS)); buffer_cols = search_window * search_window * grid.y; buffer_rows = src.cols + block_window * grid.x; } template<typename T> void nlm_fast_gpu(const PtrStepSzb& src, PtrStepSzb dst, PtrStepi buffer, int search_window, int block_window, float h, hipStream_t stream) { typedef FastNonLocalMenas<T> FNLM; FNLM fnlm(search_window, block_window, h); fnlm.src = (PtrStepSz<T>)src; fnlm.buffer = buffer; dim3 block(FNLM::CTA_SIZE, 1); dim3 grid(divUp(src.cols, FNLM::TILE_COLS), divUp(src.rows, FNLM::TILE_ROWS)); int smem = search_window * search_window * sizeof(int); hipLaunchKernelGGL(( fast_nlm_kernel), dim3(grid), dim3(block), smem, 0, fnlm, (PtrStepSz<T>)dst); cudaSafeCall ( hipGetLastError () ); if (stream == 0) cudaSafeCall( hipDeviceSynchronize() ); } template void nlm_fast_gpu<uchar>(const PtrStepSzb&, PtrStepSzb, PtrStepi, int, int, float, hipStream_t); template void nlm_fast_gpu<uchar2>(const PtrStepSzb&, PtrStepSzb, PtrStepi, int, int, float, hipStream_t); template void nlm_fast_gpu<uchar3>(const PtrStepSzb&, PtrStepSzb, PtrStepi, int, int, float, hipStream_t); __global__ void fnlm_split_kernel(const PtrStepSz<uchar3> lab, PtrStepb l, PtrStep<uchar2> ab) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x < lab.cols && y < lab.rows) { uchar3 p = lab(y, x); ab(y,x) = make_uchar2(p.y, p.z); l(y,x) = p.x; } } void fnlm_split_channels(const PtrStepSz<uchar3>& lab, PtrStepb l, PtrStep<uchar2> ab, hipStream_t stream) { dim3 b(32, 8); dim3 g(divUp(lab.cols, b.x), divUp(lab.rows, b.y)); hipLaunchKernelGGL(( fnlm_split_kernel), dim3(g), dim3(b), 0, 0, lab, l, ab); cudaSafeCall ( hipGetLastError () ); if (stream == 0) cudaSafeCall( hipDeviceSynchronize() ); } __global__ void fnlm_merge_kernel(const PtrStepb l, const PtrStep<uchar2> ab, PtrStepSz<uchar3> lab) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x < lab.cols && y < lab.rows) { uchar2 p = ab(y, x); lab(y, x) = make_uchar3(l(y, x), p.x, p.y); } } void fnlm_merge_channels(const PtrStepb& l, const PtrStep<uchar2>& ab, PtrStepSz<uchar3> lab, hipStream_t stream) { dim3 b(32, 8); dim3 g(divUp(lab.cols, b.x), divUp(lab.rows, b.y)); hipLaunchKernelGGL(( fnlm_merge_kernel), dim3(g), dim3(b), 0, 0, l, ab, lab); cudaSafeCall ( hipGetLastError () ); if (stream == 0) cudaSafeCall( hipDeviceSynchronize() ); } } }}} #endif /* CUDA_DISABLER */
34259468ac250273095e90e87c3eb2363c395c85.cu
/*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Copyright (C) 1993-2011, NVIDIA Corporation, all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or bpied warranties, including, but not limited to, the bpied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #if !defined CUDA_DISABLER #include "internal_shared.hpp" #include "opencv2/gpu/device/vec_traits.hpp" #include "opencv2/gpu/device/vec_math.hpp" #include "opencv2/gpu/device/block.hpp" #include "opencv2/gpu/device/border_interpolate.hpp" using namespace cv::gpu; typedef unsigned char uchar; typedef unsigned short ushort; ////////////////////////////////////////////////////////////////////////////////// //// Non Local Means Denosing namespace cv { namespace gpu { namespace device { namespace imgproc { __device__ __forceinline__ float norm2(const float& v) { return v*v; } __device__ __forceinline__ float norm2(const float2& v) { return v.x*v.x + v.y*v.y; } __device__ __forceinline__ float norm2(const float3& v) { return v.x*v.x + v.y*v.y + v.z*v.z; } __device__ __forceinline__ float norm2(const float4& v) { return v.x*v.x + v.y*v.y + v.z*v.z + v.w*v.w; } template<typename T, typename B> __global__ void nlm_kernel(const PtrStep<T> src, PtrStepSz<T> dst, const B b, int search_radius, int block_radius, float noise_mult) { typedef typename TypeVec<float, VecTraits<T>::cn>::vec_type value_type; const int i = blockDim.y * blockIdx.y + threadIdx.y; const int j = blockDim.x * blockIdx.x + threadIdx.x; if (j >= dst.cols || i >= dst.rows) return; int bsize = search_radius + block_radius; int search_window = 2 * search_radius + 1; float minus_search_window2_inv = -1.f/(search_window * search_window); value_type sum1 = VecTraits<value_type>::all(0); float sum2 = 0.f; if (j - bsize >= 0 && j + bsize < dst.cols && i - bsize >= 0 && i + bsize < dst.rows) { for(float y = -search_radius; y <= search_radius; ++y) for(float x = -search_radius; x <= search_radius; ++x) { float dist2 = 0; for(float ty = -block_radius; ty <= block_radius; ++ty) for(float tx = -block_radius; tx <= block_radius; ++tx) { value_type bv = saturate_cast<value_type>(src(i + y + ty, j + x + tx)); value_type av = saturate_cast<value_type>(src(i + ty, j + tx)); dist2 += norm2(av - bv); } float w = __expf(dist2 * noise_mult + (x * x + y * y) * minus_search_window2_inv); /*if (i == 255 && j == 255) printf("%f %f\n", w, dist2 * minus_h2_inv + (x * x + y * y) * minus_search_window2_inv);*/ sum1 = sum1 + w * saturate_cast<value_type>(src(i + y, j + x)); sum2 += w; } } else { for(float y = -search_radius; y <= search_radius; ++y) for(float x = -search_radius; x <= search_radius; ++x) { float dist2 = 0; for(float ty = -block_radius; ty <= block_radius; ++ty) for(float tx = -block_radius; tx <= block_radius; ++tx) { value_type bv = saturate_cast<value_type>(b.at(i + y + ty, j + x + tx, src)); value_type av = saturate_cast<value_type>(b.at(i + ty, j + tx, src)); dist2 += norm2(av - bv); } float w = __expf(dist2 * noise_mult + (x * x + y * y) * minus_search_window2_inv); sum1 = sum1 + w * saturate_cast<value_type>(b.at(i + y, j + x, src)); sum2 += w; } } dst(i, j) = saturate_cast<T>(sum1 / sum2); } template<typename T, template <typename> class B> void nlm_caller(const PtrStepSzb src, PtrStepSzb dst, int search_radius, int block_radius, float h, cudaStream_t stream) { dim3 block (32, 8); dim3 grid (divUp (src.cols, block.x), divUp (src.rows, block.y)); B<T> b(src.rows, src.cols); int block_window = 2 * block_radius + 1; float minus_h2_inv = -1.f/(h * h * VecTraits<T>::cn); float noise_mult = minus_h2_inv/(block_window * block_window); cudaSafeCall( cudaFuncSetCacheConfig (nlm_kernel<T, B<T> >, cudaFuncCachePreferL1) ); nlm_kernel<<<grid, block>>>((PtrStepSz<T>)src, (PtrStepSz<T>)dst, b, search_radius, block_radius, noise_mult); cudaSafeCall ( cudaGetLastError () ); if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); } template<typename T> void nlm_bruteforce_gpu(const PtrStepSzb& src, PtrStepSzb dst, int search_radius, int block_radius, float h, int borderMode, cudaStream_t stream) { typedef void (*func_t)(const PtrStepSzb src, PtrStepSzb dst, int search_radius, int block_radius, float h, cudaStream_t stream); static func_t funcs[] = { nlm_caller<T, BrdReflect101>, nlm_caller<T, BrdReplicate>, nlm_caller<T, BrdConstant>, nlm_caller<T, BrdReflect>, nlm_caller<T, BrdWrap>, }; funcs[borderMode](src, dst, search_radius, block_radius, h, stream); } template void nlm_bruteforce_gpu<uchar>(const PtrStepSzb&, PtrStepSzb, int, int, float, int, cudaStream_t); template void nlm_bruteforce_gpu<uchar2>(const PtrStepSzb&, PtrStepSzb, int, int, float, int, cudaStream_t); template void nlm_bruteforce_gpu<uchar3>(const PtrStepSzb&, PtrStepSzb, int, int, float, int, cudaStream_t); } }}} ////////////////////////////////////////////////////////////////////////////////// //// Non Local Means Denosing (fast approximate version) namespace cv { namespace gpu { namespace device { namespace imgproc { __device__ __forceinline__ int calcDist(const uchar& a, const uchar& b) { return (a-b)*(a-b); } __device__ __forceinline__ int calcDist(const uchar2& a, const uchar2& b) { return (a.x-b.x)*(a.x-b.x) + (a.y-b.y)*(a.y-b.y); } __device__ __forceinline__ int calcDist(const uchar3& a, const uchar3& b) { return (a.x-b.x)*(a.x-b.x) + (a.y-b.y)*(a.y-b.y) + (a.z-b.z)*(a.z-b.z); } template <class T> struct FastNonLocalMenas { enum { CTA_SIZE = 128, TILE_COLS = 128, TILE_ROWS = 32, STRIDE = CTA_SIZE }; struct plus { __device__ __forceinline__ float operator()(float v1, float v2) const { return v1 + v2; } }; int search_radius; int block_radius; int search_window; int block_window; float minus_h2_inv; FastNonLocalMenas(int search_window_, int block_window_, float h) : search_radius(search_window_/2), block_radius(block_window_/2), search_window(search_window_), block_window(block_window_), minus_h2_inv(-1.f/(h * h * VecTraits<T>::cn)) {} PtrStep<T> src; mutable PtrStepi buffer; __device__ __forceinline__ void initSums_BruteForce(int i, int j, int* dist_sums, PtrStepi& col_sums, PtrStepi& up_col_sums) const { for(int index = threadIdx.x; index < search_window * search_window; index += STRIDE) { dist_sums[index] = 0; for(int tx = 0; tx < block_window; ++tx) col_sums(tx, index) = 0; int y = index / search_window; int x = index - y * search_window; int ay = i; int ax = j; int by = i + y - search_radius; int bx = j + x - search_radius; #if 1 for (int tx = -block_radius; tx <= block_radius; ++tx) { int col_sum = 0; for (int ty = -block_radius; ty <= block_radius; ++ty) { int dist = calcDist(src(ay + ty, ax + tx), src(by + ty, bx + tx)); dist_sums[index] += dist; col_sum += dist; } col_sums(tx + block_radius, index) = col_sum; } #else for (int ty = -block_radius; ty <= block_radius; ++ty) for (int tx = -block_radius; tx <= block_radius; ++tx) { int dist = calcDist(src(ay + ty, ax + tx), src(by + ty, bx + tx)); dist_sums[index] += dist; col_sums(tx + block_radius, index) += dist; } #endif up_col_sums(j, index) = col_sums(block_window - 1, index); } } __device__ __forceinline__ void shiftRight_FirstRow(int i, int j, int first, int* dist_sums, PtrStepi& col_sums, PtrStepi& up_col_sums) const { for(int index = threadIdx.x; index < search_window * search_window; index += STRIDE) { int y = index / search_window; int x = index - y * search_window; int ay = i; int ax = j + block_radius; int by = i + y - search_radius; int bx = j + x - search_radius + block_radius; int col_sum = 0; for (int ty = -block_radius; ty <= block_radius; ++ty) col_sum += calcDist(src(ay + ty, ax), src(by + ty, bx)); dist_sums[index] += col_sum - col_sums(first, index); col_sums(first, index) = col_sum; up_col_sums(j, index) = col_sum; } } __device__ __forceinline__ void shiftRight_UpSums(int i, int j, int first, int* dist_sums, PtrStepi& col_sums, PtrStepi& up_col_sums) const { int ay = i; int ax = j + block_radius; T a_up = src(ay - block_radius - 1, ax); T a_down = src(ay + block_radius, ax); for(int index = threadIdx.x; index < search_window * search_window; index += STRIDE) { int y = index / search_window; int x = index - y * search_window; int by = i + y - search_radius; int bx = j + x - search_radius + block_radius; T b_up = src(by - block_radius - 1, bx); T b_down = src(by + block_radius, bx); int col_sum = up_col_sums(j, index) + calcDist(a_down, b_down) - calcDist(a_up, b_up); dist_sums[index] += col_sum - col_sums(first, index); col_sums(first, index) = col_sum; up_col_sums(j, index) = col_sum; } } __device__ __forceinline__ void convolve_window(int i, int j, const int* dist_sums, PtrStepi& col_sums, PtrStepi& up_col_sums, T& dst) const { typedef typename TypeVec<float, VecTraits<T>::cn>::vec_type sum_type; float weights_sum = 0; sum_type sum = VecTraits<sum_type>::all(0); float bw2_inv = 1.f/(block_window * block_window); int sx = j - search_radius; int sy = i - search_radius; for(int index = threadIdx.x; index < search_window * search_window; index += STRIDE) { int y = index / search_window; int x = index - y * search_window; float avg_dist = dist_sums[index] * bw2_inv; float weight = __expf(avg_dist * minus_h2_inv); weights_sum += weight; sum = sum + weight * saturate_cast<sum_type>(src(sy + y, sx + x)); } volatile __shared__ float cta_buffer[CTA_SIZE]; int tid = threadIdx.x; cta_buffer[tid] = weights_sum; __syncthreads(); Block::reduce<CTA_SIZE>(cta_buffer, plus()); weights_sum = cta_buffer[0]; __syncthreads(); for(int n = 0; n < VecTraits<T>::cn; ++n) { cta_buffer[tid] = reinterpret_cast<float*>(&sum)[n]; __syncthreads(); Block::reduce<CTA_SIZE>(cta_buffer, plus()); reinterpret_cast<float*>(&sum)[n] = cta_buffer[0]; __syncthreads(); } if (tid == 0) dst = saturate_cast<T>(sum/weights_sum); } __device__ __forceinline__ void operator()(PtrStepSz<T>& dst) const { int tbx = blockIdx.x * TILE_COLS; int tby = blockIdx.y * TILE_ROWS; int tex = ::min(tbx + TILE_COLS, dst.cols); int tey = ::min(tby + TILE_ROWS, dst.rows); PtrStepi col_sums; col_sums.data = buffer.ptr(dst.cols + blockIdx.x * block_window) + blockIdx.y * search_window * search_window; col_sums.step = buffer.step; PtrStepi up_col_sums; up_col_sums.data = buffer.data + blockIdx.y * search_window * search_window; up_col_sums.step = buffer.step; extern __shared__ int dist_sums[]; //search_window * search_window int first = 0; for (int i = tby; i < tey; ++i) for (int j = tbx; j < tex; ++j) { __syncthreads(); if (j == tbx) { initSums_BruteForce(i, j, dist_sums, col_sums, up_col_sums); first = 0; } else { if (i == tby) shiftRight_FirstRow(i, j, first, dist_sums, col_sums, up_col_sums); else shiftRight_UpSums(i, j, first, dist_sums, col_sums, up_col_sums); first = (first + 1) % block_window; } __syncthreads(); convolve_window(i, j, dist_sums, col_sums, up_col_sums, dst(i, j)); } } }; template<typename T> __global__ void fast_nlm_kernel(const FastNonLocalMenas<T> fnlm, PtrStepSz<T> dst) { fnlm(dst); } void nln_fast_get_buffer_size(const PtrStepSzb& src, int search_window, int block_window, int& buffer_cols, int& buffer_rows) { typedef FastNonLocalMenas<uchar> FNLM; dim3 grid(divUp(src.cols, FNLM::TILE_COLS), divUp(src.rows, FNLM::TILE_ROWS)); buffer_cols = search_window * search_window * grid.y; buffer_rows = src.cols + block_window * grid.x; } template<typename T> void nlm_fast_gpu(const PtrStepSzb& src, PtrStepSzb dst, PtrStepi buffer, int search_window, int block_window, float h, cudaStream_t stream) { typedef FastNonLocalMenas<T> FNLM; FNLM fnlm(search_window, block_window, h); fnlm.src = (PtrStepSz<T>)src; fnlm.buffer = buffer; dim3 block(FNLM::CTA_SIZE, 1); dim3 grid(divUp(src.cols, FNLM::TILE_COLS), divUp(src.rows, FNLM::TILE_ROWS)); int smem = search_window * search_window * sizeof(int); fast_nlm_kernel<<<grid, block, smem>>>(fnlm, (PtrStepSz<T>)dst); cudaSafeCall ( cudaGetLastError () ); if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); } template void nlm_fast_gpu<uchar>(const PtrStepSzb&, PtrStepSzb, PtrStepi, int, int, float, cudaStream_t); template void nlm_fast_gpu<uchar2>(const PtrStepSzb&, PtrStepSzb, PtrStepi, int, int, float, cudaStream_t); template void nlm_fast_gpu<uchar3>(const PtrStepSzb&, PtrStepSzb, PtrStepi, int, int, float, cudaStream_t); __global__ void fnlm_split_kernel(const PtrStepSz<uchar3> lab, PtrStepb l, PtrStep<uchar2> ab) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x < lab.cols && y < lab.rows) { uchar3 p = lab(y, x); ab(y,x) = make_uchar2(p.y, p.z); l(y,x) = p.x; } } void fnlm_split_channels(const PtrStepSz<uchar3>& lab, PtrStepb l, PtrStep<uchar2> ab, cudaStream_t stream) { dim3 b(32, 8); dim3 g(divUp(lab.cols, b.x), divUp(lab.rows, b.y)); fnlm_split_kernel<<<g, b>>>(lab, l, ab); cudaSafeCall ( cudaGetLastError () ); if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); } __global__ void fnlm_merge_kernel(const PtrStepb l, const PtrStep<uchar2> ab, PtrStepSz<uchar3> lab) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x < lab.cols && y < lab.rows) { uchar2 p = ab(y, x); lab(y, x) = make_uchar3(l(y, x), p.x, p.y); } } void fnlm_merge_channels(const PtrStepb& l, const PtrStep<uchar2>& ab, PtrStepSz<uchar3> lab, cudaStream_t stream) { dim3 b(32, 8); dim3 g(divUp(lab.cols, b.x), divUp(lab.rows, b.y)); fnlm_merge_kernel<<<g, b>>>(l, ab, lab); cudaSafeCall ( cudaGetLastError () ); if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); } } }}} #endif /* CUDA_DISABLER */
e39d1126bf3fe2aca9453a5d3e8e3f018f24843b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.5.0-beta3) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date July 2014 @precisions normal z -> c d s */ #include "common_magma.h" #if (GPUSHMEM < 200) #define BLOCK_SIZE 128 #else #define BLOCK_SIZE 512 #endif __global__ void zmgeelltmv_kernel( int num_rows, int num_cols, int num_vecs, int num_cols_per_row, magmaDoubleComplex alpha, magmaDoubleComplex *d_val, magma_index_t *d_colind, magmaDoubleComplex *d_x, magmaDoubleComplex beta, magmaDoubleComplex *d_y) { extern __shared__ magmaDoubleComplex dot[]; int row = blockDim.x * blockIdx.x + threadIdx.x ; if(row < num_rows ){ for( int i=0; i<num_vecs; i++ ) dot[ threadIdx.x+ i*blockDim.x ] = MAGMA_Z_MAKE(0.0, 0.0); for ( int n = 0; n < num_cols_per_row ; n ++){ int col = d_colind [ num_rows * n + row ]; magmaDoubleComplex val = d_val [ num_rows * n + row ]; if( val != 0){ for( int i=0; i<num_vecs; i++ ) dot[ threadIdx.x + i*blockDim.x ] += val * d_x[col + i * num_cols ]; } } for( int i=0; i<num_vecs; i++ ) d_y[ row + i*num_cols ] = dot[ threadIdx.x + i*blockDim.x ] * alpha + beta * d_y [ row + i*num_cols ]; } } /** Purpose ------- This routine computes Y = alpha * A * X + beta * Y for X and Y sets of num_vec vectors on the GPU. Input format is ELL. Arguments --------- @param transA magma_trans_t transposition parameter for A @param m magma_int_t number of rows in A @param n magma_int_t number of columns in A @param num_vecs mama_int_t number of vectors @param nnz_per_row magma_int_t number of elements in the longest row @param alpha magmaDoubleComplex scalar multiplier @param d_val magmaDoubleComplex* array containing values of A in ELL @param d_colind magma_int_t* columnindices of A in ELL @param d_x magmaDoubleComplex* input vector x @param beta magmaDoubleComplex scalar multiplier @param d_y magmaDoubleComplex* input/output vector y @ingroup magmasparse_zblas ********************************************************************/ extern "C" magma_int_t magma_zmgeelltmv( magma_trans_t transA, magma_int_t m, magma_int_t n, magma_int_t num_vecs, magma_int_t nnz_per_row, magmaDoubleComplex alpha, magmaDoubleComplex *d_val, magma_index_t *d_colind, magmaDoubleComplex *d_x, magmaDoubleComplex beta, magmaDoubleComplex *d_y ){ dim3 grid( (m+BLOCK_SIZE-1)/BLOCK_SIZE, 1, 1); unsigned int MEM_SIZE = num_vecs* BLOCK_SIZE * sizeof( magmaDoubleComplex ); // num_vecs vectors hipLaunchKernelGGL(( zmgeelltmv_kernel), dim3(grid), dim3(BLOCK_SIZE), MEM_SIZE , 0, m, n, num_vecs, nnz_per_row, alpha, d_val, d_colind, d_x, beta, d_y ); return MAGMA_SUCCESS; }
e39d1126bf3fe2aca9453a5d3e8e3f018f24843b.cu
/* -- MAGMA (version 1.5.0-beta3) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date July 2014 @precisions normal z -> c d s */ #include "common_magma.h" #if (GPUSHMEM < 200) #define BLOCK_SIZE 128 #else #define BLOCK_SIZE 512 #endif __global__ void zmgeelltmv_kernel( int num_rows, int num_cols, int num_vecs, int num_cols_per_row, magmaDoubleComplex alpha, magmaDoubleComplex *d_val, magma_index_t *d_colind, magmaDoubleComplex *d_x, magmaDoubleComplex beta, magmaDoubleComplex *d_y) { extern __shared__ magmaDoubleComplex dot[]; int row = blockDim.x * blockIdx.x + threadIdx.x ; if(row < num_rows ){ for( int i=0; i<num_vecs; i++ ) dot[ threadIdx.x+ i*blockDim.x ] = MAGMA_Z_MAKE(0.0, 0.0); for ( int n = 0; n < num_cols_per_row ; n ++){ int col = d_colind [ num_rows * n + row ]; magmaDoubleComplex val = d_val [ num_rows * n + row ]; if( val != 0){ for( int i=0; i<num_vecs; i++ ) dot[ threadIdx.x + i*blockDim.x ] += val * d_x[col + i * num_cols ]; } } for( int i=0; i<num_vecs; i++ ) d_y[ row + i*num_cols ] = dot[ threadIdx.x + i*blockDim.x ] * alpha + beta * d_y [ row + i*num_cols ]; } } /** Purpose ------- This routine computes Y = alpha * A * X + beta * Y for X and Y sets of num_vec vectors on the GPU. Input format is ELL. Arguments --------- @param transA magma_trans_t transposition parameter for A @param m magma_int_t number of rows in A @param n magma_int_t number of columns in A @param num_vecs mama_int_t number of vectors @param nnz_per_row magma_int_t number of elements in the longest row @param alpha magmaDoubleComplex scalar multiplier @param d_val magmaDoubleComplex* array containing values of A in ELL @param d_colind magma_int_t* columnindices of A in ELL @param d_x magmaDoubleComplex* input vector x @param beta magmaDoubleComplex scalar multiplier @param d_y magmaDoubleComplex* input/output vector y @ingroup magmasparse_zblas ********************************************************************/ extern "C" magma_int_t magma_zmgeelltmv( magma_trans_t transA, magma_int_t m, magma_int_t n, magma_int_t num_vecs, magma_int_t nnz_per_row, magmaDoubleComplex alpha, magmaDoubleComplex *d_val, magma_index_t *d_colind, magmaDoubleComplex *d_x, magmaDoubleComplex beta, magmaDoubleComplex *d_y ){ dim3 grid( (m+BLOCK_SIZE-1)/BLOCK_SIZE, 1, 1); unsigned int MEM_SIZE = num_vecs* BLOCK_SIZE * sizeof( magmaDoubleComplex ); // num_vecs vectors zmgeelltmv_kernel<<< grid, BLOCK_SIZE, MEM_SIZE >>> ( m, n, num_vecs, nnz_per_row, alpha, d_val, d_colind, d_x, beta, d_y ); return MAGMA_SUCCESS; }
3cb6736c4c1e9fce4bbb87b52e184481c1efad4a.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "setToZeros.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *data = NULL; hipMalloc(&data, XSIZE*YSIZE); int size = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( setToZeros), dim3(gridBlock),dim3(threadBlock), 0, 0, data,size); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( setToZeros), dim3(gridBlock),dim3(threadBlock), 0, 0, data,size); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( setToZeros), dim3(gridBlock),dim3(threadBlock), 0, 0, data,size); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
3cb6736c4c1e9fce4bbb87b52e184481c1efad4a.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "setToZeros.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *data = NULL; cudaMalloc(&data, XSIZE*YSIZE); int size = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); setToZeros<<<gridBlock,threadBlock>>>(data,size); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { setToZeros<<<gridBlock,threadBlock>>>(data,size); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { setToZeros<<<gridBlock,threadBlock>>>(data,size); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
14ec0731c1fb04ea541b39fd941859a3de897fc9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void kReciprocal(float* mat, float* target, unsigned int len) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < len; i += numThreads) target[i] = 1.f / mat[i]; }
14ec0731c1fb04ea541b39fd941859a3de897fc9.cu
#include "includes.h" __global__ void kReciprocal(float* mat, float* target, unsigned int len) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < len; i += numThreads) target[i] = 1.f / mat[i]; }
bfee0b84e539449c8d89172529f63fd23ef4ae67.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright 2016 Fixstars Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http ://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <cstdio> #include "vertical_path_aggregation.hpp" #include "path_aggregation_common.hpp" namespace sgm { namespace path_aggregation { static constexpr unsigned int DP_BLOCK_SIZE = 16u; static constexpr unsigned int BLOCK_SIZE = WARP_SIZE * 8u; template <int DIRECTION, unsigned int MAX_DISPARITY> __global__ void aggregate_vertical_path_kernel( uint8_t *dest, const feature_type *left, const feature_type *right, int width, int height, unsigned int p1, unsigned int p2, int min_disp) { static const unsigned int SUBGROUP_SIZE = MAX_DISPARITY / DP_BLOCK_SIZE; static const unsigned int PATHS_PER_WARP = WARP_SIZE / SUBGROUP_SIZE; static const unsigned int PATHS_PER_BLOCK = BLOCK_SIZE / SUBGROUP_SIZE; static const unsigned int RIGHT_BUFFER_SIZE = MAX_DISPARITY + PATHS_PER_BLOCK; static const unsigned int RIGHT_BUFFER_ROWS = RIGHT_BUFFER_SIZE / DP_BLOCK_SIZE; static_assert(DIRECTION == 1 || DIRECTION == -1, ""); if(width == 0 || height == 0){ return; } __shared__ feature_type right_buffer[2 * DP_BLOCK_SIZE][RIGHT_BUFFER_ROWS + 1]; DynamicProgramming<DP_BLOCK_SIZE, SUBGROUP_SIZE> dp; const unsigned int warp_id = threadIdx.x / WARP_SIZE; const unsigned int group_id = threadIdx.x % WARP_SIZE / SUBGROUP_SIZE; const unsigned int lane_id = threadIdx.x % SUBGROUP_SIZE; const unsigned int shfl_mask = generate_mask<SUBGROUP_SIZE>() << (group_id * SUBGROUP_SIZE); const unsigned int x = blockIdx.x * PATHS_PER_BLOCK + warp_id * PATHS_PER_WARP + group_id; const unsigned int right_x0 = blockIdx.x * PATHS_PER_BLOCK; const unsigned int dp_offset = lane_id * DP_BLOCK_SIZE; const unsigned int right0_addr = (right_x0 + PATHS_PER_BLOCK - 1) - x + dp_offset; const unsigned int right0_addr_lo = right0_addr % DP_BLOCK_SIZE; const unsigned int right0_addr_hi = right0_addr / DP_BLOCK_SIZE; for(unsigned int iter = 0; iter < height; ++iter){ const unsigned int y = (DIRECTION > 0 ? iter : height - 1 - iter); // Load left to register feature_type left_value; if(x < width){ left_value = left[x + y * width]; } // Load right to smem for(unsigned int i0 = 0; i0 < RIGHT_BUFFER_SIZE; i0 += BLOCK_SIZE){ const unsigned int i = i0 + threadIdx.x; if(i < RIGHT_BUFFER_SIZE){ const int right_x = static_cast<int>(right_x0 + PATHS_PER_BLOCK - 1 - i - min_disp); feature_type right_value = 0; if(0 <= right_x && right_x < static_cast<int>(width)){ right_value = right[right_x + y * width]; } const unsigned int lo = i % DP_BLOCK_SIZE; const unsigned int hi = i / DP_BLOCK_SIZE; right_buffer[lo][hi] = right_value; if(hi > 0){ right_buffer[lo + DP_BLOCK_SIZE][hi - 1] = right_value; } } } __syncthreads(); // Compute if(x < width){ feature_type right_values[DP_BLOCK_SIZE]; for(unsigned int j = 0; j < DP_BLOCK_SIZE; ++j){ right_values[j] = right_buffer[right0_addr_lo + j][right0_addr_hi]; } uint32_t local_costs[DP_BLOCK_SIZE]; for(unsigned int j = 0; j < DP_BLOCK_SIZE; ++j){ local_costs[j] = __popc(left_value ^ right_values[j]); } dp.update(local_costs, p1, p2, shfl_mask); store_uint8_vector<DP_BLOCK_SIZE>( &dest[dp_offset + x * MAX_DISPARITY + y * MAX_DISPARITY * width], dp.dp); } __syncthreads(); } } template <unsigned int MAX_DISPARITY> void enqueue_aggregate_up2down_path( cost_type *dest, const feature_type *left, const feature_type *right, int width, int height, unsigned int p1, unsigned int p2, int min_disp, hipStream_t stream) { static const unsigned int SUBGROUP_SIZE = MAX_DISPARITY / DP_BLOCK_SIZE; static const unsigned int PATHS_PER_BLOCK = BLOCK_SIZE / SUBGROUP_SIZE; const int gdim = (width + PATHS_PER_BLOCK - 1) / PATHS_PER_BLOCK; const int bdim = BLOCK_SIZE; hipLaunchKernelGGL(( aggregate_vertical_path_kernel<1, MAX_DISPARITY>), dim3(gdim), dim3(bdim), 0, stream, dest, left, right, width, height, p1, p2, min_disp); } template <unsigned int MAX_DISPARITY> void enqueue_aggregate_down2up_path( cost_type *dest, const feature_type *left, const feature_type *right, int width, int height, unsigned int p1, unsigned int p2, int min_disp, hipStream_t stream) { static const unsigned int SUBGROUP_SIZE = MAX_DISPARITY / DP_BLOCK_SIZE; static const unsigned int PATHS_PER_BLOCK = BLOCK_SIZE / SUBGROUP_SIZE; const int gdim = (width + PATHS_PER_BLOCK - 1) / PATHS_PER_BLOCK; const int bdim = BLOCK_SIZE; hipLaunchKernelGGL(( aggregate_vertical_path_kernel<-1, MAX_DISPARITY>), dim3(gdim), dim3(bdim), 0, stream, dest, left, right, width, height, p1, p2, min_disp); } template void enqueue_aggregate_up2down_path<64u>( cost_type *dest, const feature_type *left, const feature_type *right, int width, int height, unsigned int p1, unsigned int p2, int min_disp, hipStream_t stream); template void enqueue_aggregate_up2down_path<128u>( cost_type *dest, const feature_type *left, const feature_type *right, int width, int height, unsigned int p1, unsigned int p2, int min_disp, hipStream_t stream); template void enqueue_aggregate_up2down_path<256u>( cost_type *dest, const feature_type *left, const feature_type *right, int width, int height, unsigned int p1, unsigned int p2, int min_disp, hipStream_t stream); template void enqueue_aggregate_down2up_path<64u>( cost_type *dest, const feature_type *left, const feature_type *right, int width, int height, unsigned int p1, unsigned int p2, int min_disp, hipStream_t stream); template void enqueue_aggregate_down2up_path<128u>( cost_type *dest, const feature_type *left, const feature_type *right, int width, int height, unsigned int p1, unsigned int p2, int min_disp, hipStream_t stream); template void enqueue_aggregate_down2up_path<256u>( cost_type *dest, const feature_type *left, const feature_type *right, int width, int height, unsigned int p1, unsigned int p2, int min_disp, hipStream_t stream); } }
bfee0b84e539449c8d89172529f63fd23ef4ae67.cu
/* Copyright 2016 Fixstars Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http ://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <cstdio> #include "vertical_path_aggregation.hpp" #include "path_aggregation_common.hpp" namespace sgm { namespace path_aggregation { static constexpr unsigned int DP_BLOCK_SIZE = 16u; static constexpr unsigned int BLOCK_SIZE = WARP_SIZE * 8u; template <int DIRECTION, unsigned int MAX_DISPARITY> __global__ void aggregate_vertical_path_kernel( uint8_t *dest, const feature_type *left, const feature_type *right, int width, int height, unsigned int p1, unsigned int p2, int min_disp) { static const unsigned int SUBGROUP_SIZE = MAX_DISPARITY / DP_BLOCK_SIZE; static const unsigned int PATHS_PER_WARP = WARP_SIZE / SUBGROUP_SIZE; static const unsigned int PATHS_PER_BLOCK = BLOCK_SIZE / SUBGROUP_SIZE; static const unsigned int RIGHT_BUFFER_SIZE = MAX_DISPARITY + PATHS_PER_BLOCK; static const unsigned int RIGHT_BUFFER_ROWS = RIGHT_BUFFER_SIZE / DP_BLOCK_SIZE; static_assert(DIRECTION == 1 || DIRECTION == -1, ""); if(width == 0 || height == 0){ return; } __shared__ feature_type right_buffer[2 * DP_BLOCK_SIZE][RIGHT_BUFFER_ROWS + 1]; DynamicProgramming<DP_BLOCK_SIZE, SUBGROUP_SIZE> dp; const unsigned int warp_id = threadIdx.x / WARP_SIZE; const unsigned int group_id = threadIdx.x % WARP_SIZE / SUBGROUP_SIZE; const unsigned int lane_id = threadIdx.x % SUBGROUP_SIZE; const unsigned int shfl_mask = generate_mask<SUBGROUP_SIZE>() << (group_id * SUBGROUP_SIZE); const unsigned int x = blockIdx.x * PATHS_PER_BLOCK + warp_id * PATHS_PER_WARP + group_id; const unsigned int right_x0 = blockIdx.x * PATHS_PER_BLOCK; const unsigned int dp_offset = lane_id * DP_BLOCK_SIZE; const unsigned int right0_addr = (right_x0 + PATHS_PER_BLOCK - 1) - x + dp_offset; const unsigned int right0_addr_lo = right0_addr % DP_BLOCK_SIZE; const unsigned int right0_addr_hi = right0_addr / DP_BLOCK_SIZE; for(unsigned int iter = 0; iter < height; ++iter){ const unsigned int y = (DIRECTION > 0 ? iter : height - 1 - iter); // Load left to register feature_type left_value; if(x < width){ left_value = left[x + y * width]; } // Load right to smem for(unsigned int i0 = 0; i0 < RIGHT_BUFFER_SIZE; i0 += BLOCK_SIZE){ const unsigned int i = i0 + threadIdx.x; if(i < RIGHT_BUFFER_SIZE){ const int right_x = static_cast<int>(right_x0 + PATHS_PER_BLOCK - 1 - i - min_disp); feature_type right_value = 0; if(0 <= right_x && right_x < static_cast<int>(width)){ right_value = right[right_x + y * width]; } const unsigned int lo = i % DP_BLOCK_SIZE; const unsigned int hi = i / DP_BLOCK_SIZE; right_buffer[lo][hi] = right_value; if(hi > 0){ right_buffer[lo + DP_BLOCK_SIZE][hi - 1] = right_value; } } } __syncthreads(); // Compute if(x < width){ feature_type right_values[DP_BLOCK_SIZE]; for(unsigned int j = 0; j < DP_BLOCK_SIZE; ++j){ right_values[j] = right_buffer[right0_addr_lo + j][right0_addr_hi]; } uint32_t local_costs[DP_BLOCK_SIZE]; for(unsigned int j = 0; j < DP_BLOCK_SIZE; ++j){ local_costs[j] = __popc(left_value ^ right_values[j]); } dp.update(local_costs, p1, p2, shfl_mask); store_uint8_vector<DP_BLOCK_SIZE>( &dest[dp_offset + x * MAX_DISPARITY + y * MAX_DISPARITY * width], dp.dp); } __syncthreads(); } } template <unsigned int MAX_DISPARITY> void enqueue_aggregate_up2down_path( cost_type *dest, const feature_type *left, const feature_type *right, int width, int height, unsigned int p1, unsigned int p2, int min_disp, cudaStream_t stream) { static const unsigned int SUBGROUP_SIZE = MAX_DISPARITY / DP_BLOCK_SIZE; static const unsigned int PATHS_PER_BLOCK = BLOCK_SIZE / SUBGROUP_SIZE; const int gdim = (width + PATHS_PER_BLOCK - 1) / PATHS_PER_BLOCK; const int bdim = BLOCK_SIZE; aggregate_vertical_path_kernel<1, MAX_DISPARITY><<<gdim, bdim, 0, stream>>>( dest, left, right, width, height, p1, p2, min_disp); } template <unsigned int MAX_DISPARITY> void enqueue_aggregate_down2up_path( cost_type *dest, const feature_type *left, const feature_type *right, int width, int height, unsigned int p1, unsigned int p2, int min_disp, cudaStream_t stream) { static const unsigned int SUBGROUP_SIZE = MAX_DISPARITY / DP_BLOCK_SIZE; static const unsigned int PATHS_PER_BLOCK = BLOCK_SIZE / SUBGROUP_SIZE; const int gdim = (width + PATHS_PER_BLOCK - 1) / PATHS_PER_BLOCK; const int bdim = BLOCK_SIZE; aggregate_vertical_path_kernel<-1, MAX_DISPARITY><<<gdim, bdim, 0, stream>>>( dest, left, right, width, height, p1, p2, min_disp); } template void enqueue_aggregate_up2down_path<64u>( cost_type *dest, const feature_type *left, const feature_type *right, int width, int height, unsigned int p1, unsigned int p2, int min_disp, cudaStream_t stream); template void enqueue_aggregate_up2down_path<128u>( cost_type *dest, const feature_type *left, const feature_type *right, int width, int height, unsigned int p1, unsigned int p2, int min_disp, cudaStream_t stream); template void enqueue_aggregate_up2down_path<256u>( cost_type *dest, const feature_type *left, const feature_type *right, int width, int height, unsigned int p1, unsigned int p2, int min_disp, cudaStream_t stream); template void enqueue_aggregate_down2up_path<64u>( cost_type *dest, const feature_type *left, const feature_type *right, int width, int height, unsigned int p1, unsigned int p2, int min_disp, cudaStream_t stream); template void enqueue_aggregate_down2up_path<128u>( cost_type *dest, const feature_type *left, const feature_type *right, int width, int height, unsigned int p1, unsigned int p2, int min_disp, cudaStream_t stream); template void enqueue_aggregate_down2up_path<256u>( cost_type *dest, const feature_type *left, const feature_type *right, int width, int height, unsigned int p1, unsigned int p2, int min_disp, cudaStream_t stream); } }
dabe7e4610b1a8ee9c1e652f7cca2df3c4c88efc.hip
// !!! This is a file automatically generated by hipify!!! #include <string.h> #include "aes.h" #include <time.h> #include <stdio.h> #include <stdint.h> #include <inttypes.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> __constant__ uint8_t sbox_d[256]= { //0 1 2 3 4 5 6 7 8 9 A B C D E F 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5, 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76, 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0, 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0, 0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc, 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15, 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a, 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75, 0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0, 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84, 0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b, 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf, 0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85, 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8, 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5, 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2, 0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17, 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73, 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88, 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb, 0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c, 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79, 0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9, 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08, 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6, 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a, 0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e, 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e, 0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94, 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf, 0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68, 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16 }; __constant__ uint8_t rsbox_d[256] = { 0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38, 0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb, 0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87, 0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb, 0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d, 0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e, 0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2, 0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25, 0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16, 0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92, 0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda, 0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84, 0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a, 0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06, 0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02, 0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b, 0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea, 0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73, 0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85, 0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e, 0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89, 0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b, 0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20, 0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4, 0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31, 0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f, 0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d, 0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef, 0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0, 0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61, 0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26, 0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d }; __constant__ uint8_t Rcon_d[11] = { 0x8d, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36 }; __constant__ int Nb_d = 4; __constant__ int Nr_d = 14; __constant__ int Nk_d = 8; __constant__ uint32_t ek[60]; // The number of columns comprising a state in AES. This is a constant in AES. Value=4 #define Nb 4 #if defined(AES256) && (AES256 == 1) #define Nk 8 #define Nr 14 #endif typedef uint8_t state_t[4][4]; static const uint8_t sbox[256] = { //0 1 2 3 4 5 6 7 8 9 A B C D E F 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5, 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76, 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0, 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0, 0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc, 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15, 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a, 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75, 0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0, 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84, 0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b, 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf, 0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85, 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8, 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5, 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2, 0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17, 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73, 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88, 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb, 0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c, 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79, 0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9, 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08, 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6, 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a, 0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e, 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e, 0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94, 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf, 0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68, 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16 }; const uint8_t Rcon[11] = { 0x8d, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36 }; #define getSBoxValue(num) (sbox[(num)]); #define device_getSBoxValue(num) (sbox_d[(num)]); inline void cudaDevAssist(hipError_t code, int line, bool abort=true) { if (code != hipSuccess) { fprintf(stderr,"cudaDevAssistant: %s %d\n", hipGetErrorString(code), line); if (abort) exit(code); } } static void KeyExpansion(uint8_t* RoundKey, const uint8_t* Key) { unsigned i, j, k; uint8_t tempa[4]; // Used for the column/row operations // The first round key is the key itself. for (i = 0; i < Nk; ++i) { RoundKey[(i * 4) + 0] = Key[(i * 4) + 0]; RoundKey[(i * 4) + 1] = Key[(i * 4) + 1]; RoundKey[(i * 4) + 2] = Key[(i * 4) + 2]; RoundKey[(i * 4) + 3] = Key[(i * 4) + 3]; } // All other round keys are found from the previous round keys. for (i = Nk; i < Nb * (Nr + 1); ++i) { { k = (i - 1) * 4; tempa[0]=RoundKey[k + 0]; tempa[1]=RoundKey[k + 1]; tempa[2]=RoundKey[k + 2]; tempa[3]=RoundKey[k + 3]; } if (i % Nk == 0) { // This function shifts the 4 bytes in a word to the left once. // [a0,a1,a2,a3] becomes [a1,a2,a3,a0] // Function RotWord() { const uint8_t u8tmp = tempa[0]; tempa[0] = tempa[1]; tempa[1] = tempa[2]; tempa[2] = tempa[3]; tempa[3] = u8tmp; } // SubWord() is a function that takes a four-byte input word and // applies the S-box to each of the four bytes to produce an output word. // Function Subword() { tempa[0] = getSBoxValue(tempa[0]); tempa[1] = getSBoxValue(tempa[1]); tempa[2] = getSBoxValue(tempa[2]); tempa[3] = getSBoxValue(tempa[3]); } tempa[0] = tempa[0] ^ Rcon[i/Nk]; } #if defined(AES256) && (AES256 == 1) if (i % Nk == 4) { // Function Subword() { tempa[0] = getSBoxValue(tempa[0]); tempa[1] = getSBoxValue(tempa[1]); tempa[2] = getSBoxValue(tempa[2]); tempa[3] = getSBoxValue(tempa[3]); } } #endif j = i * 4; k=(i - Nk) * 4; RoundKey[j + 0] = RoundKey[k + 0] ^ tempa[0]; RoundKey[j + 1] = RoundKey[k + 1] ^ tempa[1]; RoundKey[j + 2] = RoundKey[k + 2] ^ tempa[2]; RoundKey[j + 3] = RoundKey[k + 3] ^ tempa[3]; } } #if (defined(CTR) && (CTR == 1)) void AES_CTR_iv(struct AES_ctx* ctx, const uint8_t* key, const uint8_t* iv) { KeyExpansion(ctx->RoundKey, key); memcpy (ctx->Iv, iv, AES_BLOCKLEN); } #endif // This function adds the round key to state. // The round key is added to the state by an XOR function. __device__ void AddRoundKey(uint8_t round, state_t* myState, const uint8_t* RoundKey) { uint8_t i,j; //state_t *devState = (state_t*)cipher; for (i = 0; i < 4; ++i) { for (j = 0; j < 4; ++j) { //(cipher)[i*4+j] ^= RoundKey[(round * Nb_d * 4) + (i * Nb_d) + j]; (*myState)[i][j] ^= RoundKey[(round * Nb_d * 4) + (i * Nb_d) + j]; //(cipher)[i*4+j] = 'c'; } } } __device__ void SubBytes(state_t* myState) { uint8_t i, j; for (i = 0; i < 4; ++i) { for (j = 0; j < 4; ++j) { //(*devState)[j][i] = getSBoxValue((*devState)[j][i]); (*myState)[j][i] = sbox_d[(*myState)[j][i]]; } } } // The ShiftRows() function shifts the rows in the state to the left. // Each row is shifted with different offset. // Offset = Row number. So the first row is not shifted. __device__ void ShiftRows(state_t* myState) { uint8_t temp; // Rotate first row 1 columns to left temp = (*myState)[0][1]; (*myState)[0][1] = (*myState)[1][1]; (*myState)[1][1] = (*myState)[2][1]; (*myState)[2][1] = (*myState)[3][1]; (*myState)[3][1] = temp; // Rotate second row 2 columns to left temp = (*myState)[0][2]; (*myState)[0][2] = (*myState)[2][2]; (*myState)[2][2] = temp; temp = (*myState)[1][2]; (*myState)[1][2] = (*myState)[3][2]; (*myState)[3][2] = temp; // Rotate third row 3 columns to left temp = (*myState)[0][3]; (*myState)[0][3] = (*myState)[3][3]; (*myState)[3][3] = (*myState)[2][3]; (*myState)[2][3] = (*myState)[1][3]; (*myState)[1][3] = temp; } __device__ uint8_t xtime(uint8_t x) { return ((x<<1) ^ (((x>>7) & 1) * 0x1b)); } // MixColumns function mixes the columns of the state matrix __device__ void MixColumns(state_t* myState) { uint8_t i; uint8_t Tmp, Tm, t; for (i = 0; i < 4; ++i) { t = (*myState)[i][0]; Tmp = (*myState)[i][0] ^ (*myState)[i][1] ^ (*myState)[i][2] ^ (*myState)[i][3] ; Tm = (*myState)[i][0] ^ (*myState)[i][1] ; Tm = xtime(Tm); (*myState)[i][0] ^= Tm ^ Tmp ; Tm = (*myState)[i][1] ^ (*myState)[i][2] ; Tm = xtime(Tm); (*myState)[i][1] ^= Tm ^ Tmp ; Tm = (*myState)[i][2] ^ (*myState)[i][3] ; Tm = xtime(Tm); (*myState)[i][2] ^= Tm ^ Tmp ; Tm = (*myState)[i][3] ^ t ; Tm = xtime(Tm); (*myState)[i][3] ^= Tm ^ Tmp ; } } // GPUCipher is the main function that encrypts the PlainText. __global__ void GPUCipher(state_t* devState, const uint8_t* RoundKey, uint8_t* plain_text_d, state_t* myIv, int count) // HT { int id = threadIdx.x; uint8_t round = 0; unsigned i; //uint8_t *myIv= (uint8_t *) (devState); // HT //state_t *myState = (state_t *) (myIv); // HT // for(int x = 0; x < 4; x++){ // for(int y = 0; y < 4; y++){ // (*myState)[x][y] = (*devState)[x][y]; // HT // //printf("My state: %d " ,myState[x][y]); // } // } AddRoundKey(0, devState, RoundKey); //devState -> myState for (round = 1; ; ++round) { SubBytes(devState); // getSBoxValue !!! // HT ShiftRows(devState); // HT if (round == Nr_d) { break; } MixColumns(devState); // HT AddRoundKey(round, devState, RoundKey); // HT } //Add round key to last round AddRoundKey(Nr_d, devState, RoundKey); // HT for(int a=0; a < id; ++a){ for ( i = 0 ; i < 16; ++i) { /* inc will overflow */ if (((uint8_t *)myIv)[i] == 255) { ((uint8_t *)myIv)[i] = 0; continue; } //printf("%d", (int*)myIv[i]); ((uint8_t *)myIv)[i] += 1; break; } plain_text_d[(id * 16) + i] = plain_text_d[(id * 16) + i] ^ ((uint8_t *)devState)[i]; // //printf("Plaint text: %s ",(char*)plain_text_d); } } static void AddRoundKeyCPU(uint8_t round, state_t* state, const uint8_t* RoundKey) { uint8_t i,j; for (i = 0; i < 4; ++i) { for (j = 0; j < 4; ++j) { (*state)[i][j] ^= RoundKey[(round * Nb * 4) + (i * Nb) + j]; } } } static void SubBytesCPU(state_t* state) { uint8_t i, j; for (i = 0; i < 4; ++i) { for (j = 0; j < 4; ++j) { // (*state)[j][i] = getSBoxValue((*state)[j][i]); (*state)[j][i] = sbox[((*state)[j][i])]; } } } static void ShiftRowsCPU(state_t* state) { uint8_t temp; // Rotate first row 1 columns to left temp = (*state)[0][1]; (*state)[0][1] = (*state)[1][1]; (*state)[1][1] = (*state)[2][1]; (*state)[2][1] = (*state)[3][1]; (*state)[3][1] = temp; // Rotate second row 2 columns to left temp = (*state)[0][2]; (*state)[0][2] = (*state)[2][2]; (*state)[2][2] = temp; temp = (*state)[1][2]; (*state)[1][2] = (*state)[3][2]; (*state)[3][2] = temp; // Rotate third row 3 columns to left temp = (*state)[0][3]; (*state)[0][3] = (*state)[3][3]; (*state)[3][3] = (*state)[2][3]; (*state)[2][3] = (*state)[1][3]; (*state)[1][3] = temp; } static uint8_t xtimeCPU(uint8_t x) { return ((x<<1) ^ (((x>>7) & 1) * 0x1b)); } static void MixColumnsCPU(state_t* state) { uint8_t i; uint8_t Tmp, Tm, t; for (i = 0; i < 4; ++i) { t = (*state)[i][0]; Tmp = (*state)[i][0] ^ (*state)[i][1] ^ (*state)[i][2] ^ (*state)[i][3] ; Tm = (*state)[i][0] ^ (*state)[i][1] ; Tm = xtimeCPU(Tm); (*state)[i][0] ^= Tm ^ Tmp ; Tm = (*state)[i][1] ^ (*state)[i][2] ; Tm = xtimeCPU(Tm); (*state)[i][1] ^= Tm ^ Tmp ; Tm = (*state)[i][2] ^ (*state)[i][3] ; Tm = xtimeCPU(Tm); (*state)[i][2] ^= Tm ^ Tmp ; Tm = (*state)[i][3] ^ t ; Tm = xtimeCPU(Tm); (*state)[i][3] ^= Tm ^ Tmp ; } } static void CPUCipher(state_t* state, const uint8_t* RoundKey) { uint8_t round = 0; // Add the First round key to the state before starting the rounds. AddRoundKeyCPU(0, state, RoundKey); // There will be Nr rounds. // The first Nr-1 rounds are identical. // These Nr rounds are executed in the loop below. // Last one without MixColumns() for (round = 1; ; ++round) { SubBytesCPU(state); ShiftRowsCPU(state); if (round == Nr) { break; } MixColumnsCPU(state); AddRoundKeyCPU(round, state, RoundKey); } // Add round key to last round AddRoundKeyCPU(Nr, state, RoundKey); } #if defined(CTR) && (CTR == 1) /* Symmetrical operation: same function for encrypting as for decrypting. */ void AES_CTR_encryption(struct AES_ctx* ctx, uint8_t* buf, uint32_t length, int block_count , int count) { uint8_t buffer[AES_BLOCKLEN]; state_t *devState = NULL; uint8_t *roundKey_d = NULL; uint8_t *plain_text_d = NULL; state_t *myIv = NULL; // HT uint8_t *plain_text_h = NULL; uint8_t *buffer2; //uint8_t *buffer_d = NULL; unsigned i; int bi; if(count > 524288){ //GPU Encryption for (i = 0, bi = AES_BLOCKLEN; i < length; ++i, ++bi) { if (bi == AES_BLOCKLEN) /* we need to regen xor compliment in buffer */ { //printf("\nENC: %s",(char*) buf); memcpy(buffer, ctx->Iv, AES_BLOCKLEN); //printf("\nbuffer: %s",(char*) buffer); hipSetDevice(1); // cudaDevAssist(hipMemcpyToSymbol(Nk_d, &Nk, sizeof(int), 0, hipMemcpyHostToDevice), 535, true); // cudaDevAssist(hipMemcpyToSymbol(Nr_d, &Nr, sizeof(int), 0, hipMemcpyHostToDevice), 543, true); // cudaDevAssist(hipMemcpyToSymbol(Nb_d, &Nb, sizeof(int), 0, hipMemcpyHostToDevice), 903, true); // cudaDevAssist(hipMemcpyToSymbol(sbox_d, &sbox, 256*sizeof(uint8_t), 0, hipMemcpyHostToDevice), 920, true); // cudaDevAssist(hipMemcpyToSymbol(rsbox_d, &rsbox, 256*sizeof(uint8_t), 0, hipMemcpyHostToDevice), 921, true); // cudaDevAssist(hipMemcpyToSymbol(Rcon_d, &Rcon, 11*sizeof(uint8_t), 0, hipMemcpyHostToDevice), 922, true); //hipDeviceSynchronize(); cudaDevAssist(hipMalloc((void**)&devState, 16*sizeof(uint8_t)), 452, true); cudaDevAssist(hipMalloc((void**)&roundKey_d, 240*sizeof(uint8_t)), 453, true); cudaDevAssist(hipMalloc((void**)&plain_text_d, count*sizeof(uint8_t)),446, true); cudaDevAssist(hipMalloc((void**)&myIv, 16*sizeof(uint8_t)),447, true); // Time starting cudaDevAssist(hipMemcpy(devState, &buffer, 16*sizeof(uint8_t), hipMemcpyHostToDevice), 455, true); cudaDevAssist(hipMemcpy(myIv, &buffer, 16*sizeof(uint8_t), hipMemcpyHostToDevice), 455, true); cudaDevAssist(hipMemcpy(roundKey_d, ctx->RoundKey, 240*sizeof(uint8_t), hipMemcpyHostToDevice), 456, true); cudaDevAssist(hipMemcpy(plain_text_d, buf, count*sizeof(uint8_t), hipMemcpyHostToDevice), 457, true); //printf("\nENC2: %s",(char*)roundKey_d); //cudaDevAssist(hipMemcpy(buffer_d, buffer, textLength*sizeof(uint8_t), hipMemcpyHostToDevice), 457, true); cudaDevAssist(hipDeviceSynchronize(), 268, true); //cudaCipher<<<1,1>>>(devState,roundKey_d,buffer_d); hipLaunchKernelGGL(( GPUCipher), dim3(1),dim3(block_count), 0, 0, devState,roundKey_d, plain_text_d,myIv,count); buffer2 = (uint8_t*)malloc(16*sizeof(uint8_t)); plain_text_h = (uint8_t*)malloc(count*sizeof(uint8_t)); cudaDevAssist(hipMemcpy(buffer2, devState, 16*sizeof(uint8_t), hipMemcpyDeviceToHost), 462, true); //cudaDevAssist(hipMemcpy(plain_text_h, plain_text_d, count*sizeof(uint8_t), hipMemcpyDeviceToHost), 463, true); //Time ending /* Increment Iv and handle overflow */ // for (bi = (AES_BLOCKLEN - 1); bi >= 0; --bi) //nce hesapla sonra gnder. // { // /* inc will overflow */ // if (ctx->Iv[bi] == 255) // { // ctx->Iv[bi] = 0; // continue; // } // ctx->Iv[bi] += 1; // break; // } // bi = 0; } //buf[i] = (buf[i] ^ buffer2[bi]); } hipFree(devState); hipFree(roundKey_d); hipFree(plain_text_d); hipFree(myIv); memcpy(buf, plain_text_h,count); } else{ //CPU Encryption for (i = 0, bi = AES_BLOCKLEN; i < length; ++i, ++bi) { if (bi == AES_BLOCKLEN) /* we need to regen xor compliment in buffer */ { memcpy(buffer, ctx->Iv, AES_BLOCKLEN); CPUCipher((state_t*)buffer,ctx->RoundKey); /* Increment Iv and handle overflow */ for (bi = (AES_BLOCKLEN - 1); bi >= 0; --bi) { /* inc will overflow */ if (ctx->Iv[bi] == 255) { ctx->Iv[bi] = 0; continue; } ctx->Iv[bi] += 1; // 12A3 + 1 12A4 + 1 break; } bi = 0; } buf[i] = (buf[i] ^ buffer[bi]); // buf = plain text, buffer = Iv } } } #endif // #if defined(CTR) && (CTR == 1) int main(int argc, const char * argv[]) { struct AES_ctx ctx; clock_t c_start, c_stop; uint8_t iv[16] = { 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff }; FILE *infile, *outfile, *keyfile; printf("Enter the path of input file"); scanf("%s", argv[0]); printf("Enter the path of 32-bit key file"); scanf("%s", argv[2]); // if(size % 16 != 0){ // int addition_part = 16 - (size % 16); // in = (uint8_t*)realloc(in,5*sizeof(int)); // for (int i = size; i <= size + addition_part; ++i) // in[i+1] = 0; // } infile = fopen(argv[0], "r"); fseek(infile, 0, SEEK_END); int size = ftell(infile); fseek(infile, 0, SEEK_SET); uint8_t in[size]; int count = 0; for(int i = 0; i<size; ++i){ fread(&in, sizeof(char), size, infile); count++; } fclose(infile); printf("\nData read from file: %s\n", in); keyfile = fopen(argv[2], "r"); uint8_t key[32]; fread(&key, sizeof(char), 32, keyfile); fclose(keyfile); printf("\nKeyfile: %s \n", key); int block_count = (count / 16) + (count % 16 == 0 ? 0 : 1); // 16 - count%16 kadar 0 eklenicek. printf("File size: %d bytes\n", size); outfile = fopen("/home/emre/cuda-workspace/AES256_CTR/src/output.txt", "w"); // /home/emre/Desktop/Test_files/1kb.txt /home/emre/cuda-workspace/AES256_CTR/src/key.txt int breaking_point = 524288; //This is the breaking point of our project. If file size is less than 512 kb, it runs on CPU, if it is larger than 512 kb, it runs on GPU. if (count >= breaking_point){ printf("GPU initiliaze\n"); printf("Data read from file: %s\n", in); AES_CTR_iv(&ctx, key, iv); c_start = clock(); printf("Elements read: %d", count); printf("\nBlock Count: %d", block_count); AES_CTR_encryption(&ctx, in, strlen((char*)in),block_count,count); printf("\nENC: %s",(char*) in); fwrite(&in, sizeof(char), count, outfile); c_stop = clock(); float diff = (((float)c_stop - (float)c_start) / CLOCKS_PER_SEC ) * 1000; printf("\nDone - Time taken on GPU: %f ms\n", diff); } else{ printf("CPU initiliaze\n"); //printf("keyfile: %s\n", key); printf("Elements read: %d\n", count); AES_CTR_iv(&ctx, key, iv); uint8_t Input[256]; //uint8_t in2[size]; printf("\nEnc:"); c_start = clock(); for(int i = 256, a=0; i<256+size;i+=256,a+=256){ memcpy(Input, in+a,256); AES_CTR_encryption(&ctx, Input, strlen((char*)Input),block_count,count); printf("%s\n",(char*) Input); // don't use this string as an input fwrite(&Input, sizeof(char), 256, outfile); } c_stop = clock(); float diff = (((float)c_stop - (float)c_start) / CLOCKS_PER_SEC ) * 1000; printf("\nDone - Time taken on CPU: %f ms\n", diff); } fclose(outfile); return 0; }
dabe7e4610b1a8ee9c1e652f7cca2df3c4c88efc.cu
#include <string.h> #include "aes.h" #include <time.h> #include <stdio.h> #include <stdint.h> #include <inttypes.h> #include <cuda_runtime.h> #include <cuda.h> __constant__ uint8_t sbox_d[256]= { //0 1 2 3 4 5 6 7 8 9 A B C D E F 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5, 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76, 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0, 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0, 0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc, 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15, 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a, 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75, 0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0, 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84, 0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b, 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf, 0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85, 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8, 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5, 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2, 0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17, 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73, 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88, 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb, 0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c, 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79, 0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9, 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08, 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6, 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a, 0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e, 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e, 0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94, 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf, 0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68, 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16 }; __constant__ uint8_t rsbox_d[256] = { 0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38, 0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb, 0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87, 0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb, 0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d, 0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e, 0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2, 0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25, 0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16, 0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92, 0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda, 0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84, 0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a, 0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06, 0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02, 0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b, 0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea, 0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73, 0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85, 0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e, 0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89, 0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b, 0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20, 0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4, 0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31, 0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f, 0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d, 0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef, 0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0, 0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61, 0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26, 0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d }; __constant__ uint8_t Rcon_d[11] = { 0x8d, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36 }; __constant__ int Nb_d = 4; __constant__ int Nr_d = 14; __constant__ int Nk_d = 8; __constant__ uint32_t ek[60]; // The number of columns comprising a state in AES. This is a constant in AES. Value=4 #define Nb 4 #if defined(AES256) && (AES256 == 1) #define Nk 8 #define Nr 14 #endif typedef uint8_t state_t[4][4]; static const uint8_t sbox[256] = { //0 1 2 3 4 5 6 7 8 9 A B C D E F 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5, 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76, 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0, 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0, 0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc, 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15, 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a, 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75, 0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0, 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84, 0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b, 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf, 0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85, 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8, 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5, 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2, 0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17, 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73, 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88, 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb, 0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c, 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79, 0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9, 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08, 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6, 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a, 0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e, 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e, 0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94, 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf, 0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68, 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16 }; const uint8_t Rcon[11] = { 0x8d, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36 }; #define getSBoxValue(num) (sbox[(num)]); #define device_getSBoxValue(num) (sbox_d[(num)]); inline void cudaDevAssist(cudaError_t code, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"cudaDevAssistant: %s %d\n", cudaGetErrorString(code), line); if (abort) exit(code); } } static void KeyExpansion(uint8_t* RoundKey, const uint8_t* Key) { unsigned i, j, k; uint8_t tempa[4]; // Used for the column/row operations // The first round key is the key itself. for (i = 0; i < Nk; ++i) { RoundKey[(i * 4) + 0] = Key[(i * 4) + 0]; RoundKey[(i * 4) + 1] = Key[(i * 4) + 1]; RoundKey[(i * 4) + 2] = Key[(i * 4) + 2]; RoundKey[(i * 4) + 3] = Key[(i * 4) + 3]; } // All other round keys are found from the previous round keys. for (i = Nk; i < Nb * (Nr + 1); ++i) { { k = (i - 1) * 4; tempa[0]=RoundKey[k + 0]; tempa[1]=RoundKey[k + 1]; tempa[2]=RoundKey[k + 2]; tempa[3]=RoundKey[k + 3]; } if (i % Nk == 0) { // This function shifts the 4 bytes in a word to the left once. // [a0,a1,a2,a3] becomes [a1,a2,a3,a0] // Function RotWord() { const uint8_t u8tmp = tempa[0]; tempa[0] = tempa[1]; tempa[1] = tempa[2]; tempa[2] = tempa[3]; tempa[3] = u8tmp; } // SubWord() is a function that takes a four-byte input word and // applies the S-box to each of the four bytes to produce an output word. // Function Subword() { tempa[0] = getSBoxValue(tempa[0]); tempa[1] = getSBoxValue(tempa[1]); tempa[2] = getSBoxValue(tempa[2]); tempa[3] = getSBoxValue(tempa[3]); } tempa[0] = tempa[0] ^ Rcon[i/Nk]; } #if defined(AES256) && (AES256 == 1) if (i % Nk == 4) { // Function Subword() { tempa[0] = getSBoxValue(tempa[0]); tempa[1] = getSBoxValue(tempa[1]); tempa[2] = getSBoxValue(tempa[2]); tempa[3] = getSBoxValue(tempa[3]); } } #endif j = i * 4; k=(i - Nk) * 4; RoundKey[j + 0] = RoundKey[k + 0] ^ tempa[0]; RoundKey[j + 1] = RoundKey[k + 1] ^ tempa[1]; RoundKey[j + 2] = RoundKey[k + 2] ^ tempa[2]; RoundKey[j + 3] = RoundKey[k + 3] ^ tempa[3]; } } #if (defined(CTR) && (CTR == 1)) void AES_CTR_iv(struct AES_ctx* ctx, const uint8_t* key, const uint8_t* iv) { KeyExpansion(ctx->RoundKey, key); memcpy (ctx->Iv, iv, AES_BLOCKLEN); } #endif // This function adds the round key to state. // The round key is added to the state by an XOR function. __device__ void AddRoundKey(uint8_t round, state_t* myState, const uint8_t* RoundKey) { uint8_t i,j; //state_t *devState = (state_t*)cipher; for (i = 0; i < 4; ++i) { for (j = 0; j < 4; ++j) { //(cipher)[i*4+j] ^= RoundKey[(round * Nb_d * 4) + (i * Nb_d) + j]; (*myState)[i][j] ^= RoundKey[(round * Nb_d * 4) + (i * Nb_d) + j]; //(cipher)[i*4+j] = 'c'; } } } __device__ void SubBytes(state_t* myState) { uint8_t i, j; for (i = 0; i < 4; ++i) { for (j = 0; j < 4; ++j) { //(*devState)[j][i] = getSBoxValue((*devState)[j][i]); (*myState)[j][i] = sbox_d[(*myState)[j][i]]; } } } // The ShiftRows() function shifts the rows in the state to the left. // Each row is shifted with different offset. // Offset = Row number. So the first row is not shifted. __device__ void ShiftRows(state_t* myState) { uint8_t temp; // Rotate first row 1 columns to left temp = (*myState)[0][1]; (*myState)[0][1] = (*myState)[1][1]; (*myState)[1][1] = (*myState)[2][1]; (*myState)[2][1] = (*myState)[3][1]; (*myState)[3][1] = temp; // Rotate second row 2 columns to left temp = (*myState)[0][2]; (*myState)[0][2] = (*myState)[2][2]; (*myState)[2][2] = temp; temp = (*myState)[1][2]; (*myState)[1][2] = (*myState)[3][2]; (*myState)[3][2] = temp; // Rotate third row 3 columns to left temp = (*myState)[0][3]; (*myState)[0][3] = (*myState)[3][3]; (*myState)[3][3] = (*myState)[2][3]; (*myState)[2][3] = (*myState)[1][3]; (*myState)[1][3] = temp; } __device__ uint8_t xtime(uint8_t x) { return ((x<<1) ^ (((x>>7) & 1) * 0x1b)); } // MixColumns function mixes the columns of the state matrix __device__ void MixColumns(state_t* myState) { uint8_t i; uint8_t Tmp, Tm, t; for (i = 0; i < 4; ++i) { t = (*myState)[i][0]; Tmp = (*myState)[i][0] ^ (*myState)[i][1] ^ (*myState)[i][2] ^ (*myState)[i][3] ; Tm = (*myState)[i][0] ^ (*myState)[i][1] ; Tm = xtime(Tm); (*myState)[i][0] ^= Tm ^ Tmp ; Tm = (*myState)[i][1] ^ (*myState)[i][2] ; Tm = xtime(Tm); (*myState)[i][1] ^= Tm ^ Tmp ; Tm = (*myState)[i][2] ^ (*myState)[i][3] ; Tm = xtime(Tm); (*myState)[i][2] ^= Tm ^ Tmp ; Tm = (*myState)[i][3] ^ t ; Tm = xtime(Tm); (*myState)[i][3] ^= Tm ^ Tmp ; } } // GPUCipher is the main function that encrypts the PlainText. __global__ void GPUCipher(state_t* devState, const uint8_t* RoundKey, uint8_t* plain_text_d, state_t* myIv, int count) // HT { int id = threadIdx.x; uint8_t round = 0; unsigned i; //uint8_t *myIv= (uint8_t *) (devState); // HT //state_t *myState = (state_t *) (myIv); // HT // for(int x = 0; x < 4; x++){ // for(int y = 0; y < 4; y++){ // (*myState)[x][y] = (*devState)[x][y]; // HT // //printf("My state: %d " ,myState[x][y]); // } // } AddRoundKey(0, devState, RoundKey); //devState -> myState for (round = 1; ; ++round) { SubBytes(devState); // getSBoxValue !!! // HT ShiftRows(devState); // HT if (round == Nr_d) { break; } MixColumns(devState); // HT AddRoundKey(round, devState, RoundKey); // HT } //Add round key to last round AddRoundKey(Nr_d, devState, RoundKey); // HT for(int a=0; a < id; ++a){ for ( i = 0 ; i < 16; ++i) { /* inc will overflow */ if (((uint8_t *)myIv)[i] == 255) { ((uint8_t *)myIv)[i] = 0; continue; } //printf("%d", (int*)myIv[i]); ((uint8_t *)myIv)[i] += 1; break; } plain_text_d[(id * 16) + i] = plain_text_d[(id * 16) + i] ^ ((uint8_t *)devState)[i]; // //printf("Plaint text: %s ",(char*)plain_text_d); } } static void AddRoundKeyCPU(uint8_t round, state_t* state, const uint8_t* RoundKey) { uint8_t i,j; for (i = 0; i < 4; ++i) { for (j = 0; j < 4; ++j) { (*state)[i][j] ^= RoundKey[(round * Nb * 4) + (i * Nb) + j]; } } } static void SubBytesCPU(state_t* state) { uint8_t i, j; for (i = 0; i < 4; ++i) { for (j = 0; j < 4; ++j) { // (*state)[j][i] = getSBoxValue((*state)[j][i]); (*state)[j][i] = sbox[((*state)[j][i])]; } } } static void ShiftRowsCPU(state_t* state) { uint8_t temp; // Rotate first row 1 columns to left temp = (*state)[0][1]; (*state)[0][1] = (*state)[1][1]; (*state)[1][1] = (*state)[2][1]; (*state)[2][1] = (*state)[3][1]; (*state)[3][1] = temp; // Rotate second row 2 columns to left temp = (*state)[0][2]; (*state)[0][2] = (*state)[2][2]; (*state)[2][2] = temp; temp = (*state)[1][2]; (*state)[1][2] = (*state)[3][2]; (*state)[3][2] = temp; // Rotate third row 3 columns to left temp = (*state)[0][3]; (*state)[0][3] = (*state)[3][3]; (*state)[3][3] = (*state)[2][3]; (*state)[2][3] = (*state)[1][3]; (*state)[1][3] = temp; } static uint8_t xtimeCPU(uint8_t x) { return ((x<<1) ^ (((x>>7) & 1) * 0x1b)); } static void MixColumnsCPU(state_t* state) { uint8_t i; uint8_t Tmp, Tm, t; for (i = 0; i < 4; ++i) { t = (*state)[i][0]; Tmp = (*state)[i][0] ^ (*state)[i][1] ^ (*state)[i][2] ^ (*state)[i][3] ; Tm = (*state)[i][0] ^ (*state)[i][1] ; Tm = xtimeCPU(Tm); (*state)[i][0] ^= Tm ^ Tmp ; Tm = (*state)[i][1] ^ (*state)[i][2] ; Tm = xtimeCPU(Tm); (*state)[i][1] ^= Tm ^ Tmp ; Tm = (*state)[i][2] ^ (*state)[i][3] ; Tm = xtimeCPU(Tm); (*state)[i][2] ^= Tm ^ Tmp ; Tm = (*state)[i][3] ^ t ; Tm = xtimeCPU(Tm); (*state)[i][3] ^= Tm ^ Tmp ; } } static void CPUCipher(state_t* state, const uint8_t* RoundKey) { uint8_t round = 0; // Add the First round key to the state before starting the rounds. AddRoundKeyCPU(0, state, RoundKey); // There will be Nr rounds. // The first Nr-1 rounds are identical. // These Nr rounds are executed in the loop below. // Last one without MixColumns() for (round = 1; ; ++round) { SubBytesCPU(state); ShiftRowsCPU(state); if (round == Nr) { break; } MixColumnsCPU(state); AddRoundKeyCPU(round, state, RoundKey); } // Add round key to last round AddRoundKeyCPU(Nr, state, RoundKey); } #if defined(CTR) && (CTR == 1) /* Symmetrical operation: same function for encrypting as for decrypting. */ void AES_CTR_encryption(struct AES_ctx* ctx, uint8_t* buf, uint32_t length, int block_count , int count) { uint8_t buffer[AES_BLOCKLEN]; state_t *devState = NULL; uint8_t *roundKey_d = NULL; uint8_t *plain_text_d = NULL; state_t *myIv = NULL; // HT uint8_t *plain_text_h = NULL; uint8_t *buffer2; //uint8_t *buffer_d = NULL; unsigned i; int bi; if(count > 524288){ //GPU Encryption for (i = 0, bi = AES_BLOCKLEN; i < length; ++i, ++bi) { if (bi == AES_BLOCKLEN) /* we need to regen xor compliment in buffer */ { //printf("\nENC: %s",(char*) buf); memcpy(buffer, ctx->Iv, AES_BLOCKLEN); //printf("\nbuffer: %s",(char*) buffer); cudaSetDevice(1); // cudaDevAssist(cudaMemcpyToSymbol(Nk_d, &Nk, sizeof(int), 0, cudaMemcpyHostToDevice), 535, true); // cudaDevAssist(cudaMemcpyToSymbol(Nr_d, &Nr, sizeof(int), 0, cudaMemcpyHostToDevice), 543, true); // cudaDevAssist(cudaMemcpyToSymbol(Nb_d, &Nb, sizeof(int), 0, cudaMemcpyHostToDevice), 903, true); // cudaDevAssist(cudaMemcpyToSymbol(sbox_d, &sbox, 256*sizeof(uint8_t), 0, cudaMemcpyHostToDevice), 920, true); // cudaDevAssist(cudaMemcpyToSymbol(rsbox_d, &rsbox, 256*sizeof(uint8_t), 0, cudaMemcpyHostToDevice), 921, true); // cudaDevAssist(cudaMemcpyToSymbol(Rcon_d, &Rcon, 11*sizeof(uint8_t), 0, cudaMemcpyHostToDevice), 922, true); //cudaThreadSynchronize(); cudaDevAssist(cudaMalloc((void**)&devState, 16*sizeof(uint8_t)), 452, true); cudaDevAssist(cudaMalloc((void**)&roundKey_d, 240*sizeof(uint8_t)), 453, true); cudaDevAssist(cudaMalloc((void**)&plain_text_d, count*sizeof(uint8_t)),446, true); cudaDevAssist(cudaMalloc((void**)&myIv, 16*sizeof(uint8_t)),447, true); // Time starting cudaDevAssist(cudaMemcpy(devState, &buffer, 16*sizeof(uint8_t), cudaMemcpyHostToDevice), 455, true); cudaDevAssist(cudaMemcpy(myIv, &buffer, 16*sizeof(uint8_t), cudaMemcpyHostToDevice), 455, true); cudaDevAssist(cudaMemcpy(roundKey_d, ctx->RoundKey, 240*sizeof(uint8_t), cudaMemcpyHostToDevice), 456, true); cudaDevAssist(cudaMemcpy(plain_text_d, buf, count*sizeof(uint8_t), cudaMemcpyHostToDevice), 457, true); //printf("\nENC2: %s",(char*)roundKey_d); //cudaDevAssist(cudaMemcpy(buffer_d, buffer, textLength*sizeof(uint8_t), cudaMemcpyHostToDevice), 457, true); cudaDevAssist(cudaDeviceSynchronize(), 268, true); //cudaCipher<<<1,1>>>(devState,roundKey_d,buffer_d); GPUCipher<<<1,block_count>>>(devState,roundKey_d, plain_text_d,myIv,count); buffer2 = (uint8_t*)malloc(16*sizeof(uint8_t)); plain_text_h = (uint8_t*)malloc(count*sizeof(uint8_t)); cudaDevAssist(cudaMemcpy(buffer2, devState, 16*sizeof(uint8_t), cudaMemcpyDeviceToHost), 462, true); //cudaDevAssist(cudaMemcpy(plain_text_h, plain_text_d, count*sizeof(uint8_t), cudaMemcpyDeviceToHost), 463, true); //Time ending /* Increment Iv and handle overflow */ // for (bi = (AES_BLOCKLEN - 1); bi >= 0; --bi) //Önce hesapla sonra gönder. // { // /* inc will overflow */ // if (ctx->Iv[bi] == 255) // { // ctx->Iv[bi] = 0; // continue; // } // ctx->Iv[bi] += 1; // break; // } // bi = 0; } //buf[i] = (buf[i] ^ buffer2[bi]); } cudaFree(devState); cudaFree(roundKey_d); cudaFree(plain_text_d); cudaFree(myIv); memcpy(buf, plain_text_h,count); } else{ //CPU Encryption for (i = 0, bi = AES_BLOCKLEN; i < length; ++i, ++bi) { if (bi == AES_BLOCKLEN) /* we need to regen xor compliment in buffer */ { memcpy(buffer, ctx->Iv, AES_BLOCKLEN); CPUCipher((state_t*)buffer,ctx->RoundKey); /* Increment Iv and handle overflow */ for (bi = (AES_BLOCKLEN - 1); bi >= 0; --bi) { /* inc will overflow */ if (ctx->Iv[bi] == 255) { ctx->Iv[bi] = 0; continue; } ctx->Iv[bi] += 1; // 12A3 + 1 12A4 + 1 break; } bi = 0; } buf[i] = (buf[i] ^ buffer[bi]); // buf = plain text, buffer = Iv } } } #endif // #if defined(CTR) && (CTR == 1) int main(int argc, const char * argv[]) { struct AES_ctx ctx; clock_t c_start, c_stop; uint8_t iv[16] = { 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff }; FILE *infile, *outfile, *keyfile; printf("Enter the path of input file"); scanf("%s", argv[0]); printf("Enter the path of 32-bit key file"); scanf("%s", argv[2]); // if(size % 16 != 0){ // int addition_part = 16 - (size % 16); // in = (uint8_t*)realloc(in,5*sizeof(int)); // for (int i = size; i <= size + addition_part; ++i) // in[i+1] = 0; // } infile = fopen(argv[0], "r"); fseek(infile, 0, SEEK_END); int size = ftell(infile); fseek(infile, 0, SEEK_SET); uint8_t in[size]; int count = 0; for(int i = 0; i<size; ++i){ fread(&in, sizeof(char), size, infile); count++; } fclose(infile); printf("\nData read from file: %s\n", in); keyfile = fopen(argv[2], "r"); uint8_t key[32]; fread(&key, sizeof(char), 32, keyfile); fclose(keyfile); printf("\nKeyfile: %s \n", key); int block_count = (count / 16) + (count % 16 == 0 ? 0 : 1); // 16 - count%16 kadar 0 eklenicek. printf("File size: %d bytes\n", size); outfile = fopen("/home/emre/cuda-workspace/AES256_CTR/src/output.txt", "w"); // /home/emre/Desktop/Test_files/1kb.txt /home/emre/cuda-workspace/AES256_CTR/src/key.txt int breaking_point = 524288; //This is the breaking point of our project. If file size is less than 512 kb, it runs on CPU, if it is larger than 512 kb, it runs on GPU. if (count >= breaking_point){ printf("GPU initiliaze\n"); printf("Data read from file: %s\n", in); AES_CTR_iv(&ctx, key, iv); c_start = clock(); printf("Elements read: %d", count); printf("\nBlock Count: %d", block_count); AES_CTR_encryption(&ctx, in, strlen((char*)in),block_count,count); printf("\nENC: %s",(char*) in); fwrite(&in, sizeof(char), count, outfile); c_stop = clock(); float diff = (((float)c_stop - (float)c_start) / CLOCKS_PER_SEC ) * 1000; printf("\nDone - Time taken on GPU: %f ms\n", diff); } else{ printf("CPU initiliaze\n"); //printf("keyfile: %s\n", key); printf("Elements read: %d\n", count); AES_CTR_iv(&ctx, key, iv); uint8_t Input[256]; //uint8_t in2[size]; printf("\nEnc:"); c_start = clock(); for(int i = 256, a=0; i<256+size;i+=256,a+=256){ memcpy(Input, in+a,256); AES_CTR_encryption(&ctx, Input, strlen((char*)Input),block_count,count); printf("%s\n",(char*) Input); // don't use this string as an input fwrite(&Input, sizeof(char), 256, outfile); } c_stop = clock(); float diff = (((float)c_stop - (float)c_start) / CLOCKS_PER_SEC ) * 1000; printf("\nDone - Time taken on CPU: %f ms\n", diff); } fclose(outfile); return 0; }
2f24d85add9c34dea78065a65659448393d04b38.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <cutil_inline.h> #include <base.h> #include <image.h> #include <bayer.h> #include <util.h> #include "cuda_utils.h" img *read_image_from_file(char *filename){ img *result = NULL; FILE *fh = fopen(filename,"r"); if (!fh) { FatalSysError("Could not open '%s'", filename); } char buf[1024]; if (fscanf(fh,"%1024s",buf) < 1) { FatalError("'%s' is not a valid PPM file", filename); } if (strcmp(buf,"P6") != 0) { FatalError("'%s' is not a PPM file", filename); } /* Read comments */ int res = fscanf(fh," #%1024[^\n\r]",buf); if (res < 0) { FatalError("Error reading PPM file '%s'",filename); } int height, width, depth; height = width = depth = -1; /* width, height, depth followed by exactly 1 whitespace character */ res = fscanf(fh,"%d %d %d%1[\n\t ]",&width,&height,&depth,buf); if (res < 0) { FatalSysError("Error reading width, height, depth from PPM file '%s'", filename); } if (res < 3) { FatalError("Could not read width, height, depth from PPM file '%s'", filename); } if (depth <= 0 || depth > 255) { FatalError("Error reading PPM file '%s', invalid color depth",filename); } //uint pix_width = (depth <= 255) ? 1 : 2; if (depth > 255) { FatalError("Colour depth %d not supported", depth); } Info("Loading PPM image %dx%d depth %d",width,height,depth); int buf_size = height*width*RGB; uchar *buffer = mallocz<uchar>(buf_size); res = fread(buffer,sizeof(uchar),buf_size,fh); if (res < 0) { FatalSysError("Error reading PPM file '%s'",filename); } if (res < buf_size) { FatalError("Error reading PPM file '%s', file too short, read %d bytes, should be %d", filename, res, buf_size); } result = (img *)malloc(sizeof(img)); result->width = width; result->height = height; result->buffer = buffer; return result; } texture<pixel, 2, hipReadModeElementType> tex; KERNEL void bilinear_kernel(pixel* g_odata, int width, int height); KERNEL void nop_kernel(pixel* g_result, int width, int height); float cuda_bilinear(img *image) { Info("Performing CUDA bilinear interpolation"); uint height = image->height; uint width = image->width; uint buf_size = width * height * RGB * sizeof(pixel); /* Setup raw image array */ hipChannelFormatDesc input_channel = hipCreateChannelDesc<pixel>(); hipArray *d_raw_image = NULL; cutilSafeCall(hipMallocArray(&d_raw_image, &input_channel, width * RGB, height)); assert(d_raw_image); cutilSafeCall(hipMemcpyToArray(d_raw_image, 0, 0, image->buffer, buf_size, hipMemcpyHostToDevice)); /* Setup texture */ cutilSafeCall(hipBindTextureToArray(tex, d_raw_image)); tex.normalized = 0; tex.addressMode[0] = hipAddressModeClamp; tex.addressMode[1] = hipAddressModeClamp; tex.filterMode = hipFilterModePoint; //hipError_t hipTexRefSetAddressMode (textureReference hTexRef, int Dim, hipAddress_mode am) //hipTexRefSetAddressMode(tex,0,HIP_TR_ADDRESS_MODE_MIRROR); //hipTexRefSetAddressMode(tex,1,HIP_TR_ADDRESS_MODE_MIRROR); //hipTexRefSetAddressMode(tex,2,HIP_TR_ADDRESS_MODE_MIRROR); /* Setup output */ pixel *d_result = NULL; cutilSafeCall(hipMalloc((void **)&d_result, buf_size)); dim3 thread_block(8, 8); dim3 block_grid((width + thread_block.x - 1) / thread_block.x, (height + thread_block.y - 1) / thread_block.y); //dim3 thread_block(1,1); //dim3 block_grid(width, height); #ifdef _BENCH Info("Warm up"); hipLaunchKernelGGL(( nop_kernel), dim3(block_grid), dim3(thread_block), 0, 0, d_result, width, height); cutilCheckMsg("Kernel execution failed"); #endif cutilSafeCall(hipDeviceSynchronize()); unsigned int timer = 0; cutilCheckError(cutCreateTimer(&timer)); cutilCheckError(cutStartTimer(timer)); //Info("Running test"); hipLaunchKernelGGL(( bilinear_kernel), dim3(block_grid), dim3(thread_block), 0, 0, d_result, width, height); cutilCheckMsg("Kernel execution failed"); (hipDeviceSynchronize()); cutilCheckError(cutStopTimer(timer)); //printf("%d, %d, %f\n", image->width, image->height, cutGetTimerValue(timer)); Info("Processing time: %f (ms)", cutGetTimerValue(timer)); float time = (cutGetTimerValue(timer)); Info("%.2f Mpixels/sec", (width * height / (time/ 1000.0f) ) / 1e6); cutilCheckError( cutDeleteTimer( timer)); /* Copy result from device */ cutilSafeCall( hipMemcpy( image->buffer, d_result, buf_size, hipMemcpyDeviceToHost)); cutilSafeCall(hipFree(d_result)); cutilSafeCall(hipFreeArray(d_raw_image)); hipDeviceReset(); return time; } #define tex_get_comp(tex,x,y,c) tex2D((tex),(mirror((x),width))*3+(c),(mirror((y),height))) //#define tex_get_comp(tex,x,y,c) tex2D((tex),(x)*3+(c),(y)) KERNEL void bilinear_kernel(pixel* g_result, int width, int height) { int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; if (x >= width || y >= height) { return; } int filter_color = get_filter_color(x,y); float sum = 0; int res_index = (y*width + x)*3; pixel *res_pix = g_result+res_index; /* Copy existing val to output */ res_pix[filter_color] = tex_get_comp(tex,x,y,filter_color); if (filter_color == R || filter_color == B) { /* Red/Blue only pixels */ /* Green channel */ sum = tex_get_comp(tex,x-1,y,G) + tex_get_comp(tex,x+1,y,G) + tex_get_comp(tex,x,y-1,G) + tex_get_comp(tex,x,y+1,G); res_pix[G] = (uchar)(sum / 4); int dest_color = (filter_color == R) ? B : R; /* Red/Blue channel */ sum = tex_get_comp(tex,x-1,y-1,dest_color) + tex_get_comp(tex,x-1,y+1,dest_color) + tex_get_comp(tex,x+1,y-1,dest_color) + tex_get_comp(tex,x+1,y+1,dest_color); res_pix[dest_color] = (uchar)(sum / 4); } else { /* Green only pixels */ /* Red channel */ if (even(y)) { sum = tex_get_comp(tex,x-1,y,R) + tex_get_comp(tex,x+1,y,R); res_pix[R] = (uchar)(sum / 2); sum = tex_get_comp(tex,x,y-1,B) + tex_get_comp(tex,x,y+1,B); res_pix[B] = (uchar)(sum / 2); } else { sum = tex_get_comp(tex,x,y-1,R) + tex_get_comp(tex,x,y+1,R); res_pix[R] = (uchar)(sum / 2); sum = tex_get_comp(tex,x-1,y,B) + tex_get_comp(tex,x+1,y,B); res_pix[B] = (uchar)(sum / 2); } } } int main(int argc, char **argv) { if (argc <= 1){ exit(0); } img* image = read_image_from_file(argv[1]); float t_tex = cuda_bilinear(image); Info("Texture time: %f",t_tex); }
2f24d85add9c34dea78065a65659448393d04b38.cu
#include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <cutil_inline.h> #include <base.h> #include <image.h> #include <bayer.h> #include <util.h> #include "cuda_utils.h" img *read_image_from_file(char *filename){ img *result = NULL; FILE *fh = fopen(filename,"r"); if (!fh) { FatalSysError("Could not open '%s'", filename); } char buf[1024]; if (fscanf(fh,"%1024s",buf) < 1) { FatalError("'%s' is not a valid PPM file", filename); } if (strcmp(buf,"P6") != 0) { FatalError("'%s' is not a PPM file", filename); } /* Read comments */ int res = fscanf(fh," #%1024[^\n\r]",buf); if (res < 0) { FatalError("Error reading PPM file '%s'",filename); } int height, width, depth; height = width = depth = -1; /* width, height, depth followed by exactly 1 whitespace character */ res = fscanf(fh,"%d %d %d%1[\n\t ]",&width,&height,&depth,buf); if (res < 0) { FatalSysError("Error reading width, height, depth from PPM file '%s'", filename); } if (res < 3) { FatalError("Could not read width, height, depth from PPM file '%s'", filename); } if (depth <= 0 || depth > 255) { FatalError("Error reading PPM file '%s', invalid color depth",filename); } //uint pix_width = (depth <= 255) ? 1 : 2; if (depth > 255) { FatalError("Colour depth %d not supported", depth); } Info("Loading PPM image %dx%d depth %d",width,height,depth); int buf_size = height*width*RGB; uchar *buffer = mallocz<uchar>(buf_size); res = fread(buffer,sizeof(uchar),buf_size,fh); if (res < 0) { FatalSysError("Error reading PPM file '%s'",filename); } if (res < buf_size) { FatalError("Error reading PPM file '%s', file too short, read %d bytes, should be %d", filename, res, buf_size); } result = (img *)malloc(sizeof(img)); result->width = width; result->height = height; result->buffer = buffer; return result; } texture<pixel, 2, cudaReadModeElementType> tex; KERNEL void bilinear_kernel(pixel* g_odata, int width, int height); KERNEL void nop_kernel(pixel* g_result, int width, int height); float cuda_bilinear(img *image) { Info("Performing CUDA bilinear interpolation"); uint height = image->height; uint width = image->width; uint buf_size = width * height * RGB * sizeof(pixel); /* Setup raw image array */ cudaChannelFormatDesc input_channel = cudaCreateChannelDesc<pixel>(); cudaArray *d_raw_image = NULL; cutilSafeCall(cudaMallocArray(&d_raw_image, &input_channel, width * RGB, height)); assert(d_raw_image); cutilSafeCall(cudaMemcpyToArray(d_raw_image, 0, 0, image->buffer, buf_size, cudaMemcpyHostToDevice)); /* Setup texture */ cutilSafeCall(cudaBindTextureToArray(tex, d_raw_image)); tex.normalized = 0; tex.addressMode[0] = cudaAddressModeClamp; tex.addressMode[1] = cudaAddressModeClamp; tex.filterMode = cudaFilterModePoint; //CUresult cuTexRefSetAddressMode (CUtexref hTexRef, int Dim, CUaddress_mode am) //cuTexRefSetAddressMode(tex,0,CU_TR_ADDRESS_MODE_MIRROR); //cuTexRefSetAddressMode(tex,1,CU_TR_ADDRESS_MODE_MIRROR); //cuTexRefSetAddressMode(tex,2,CU_TR_ADDRESS_MODE_MIRROR); /* Setup output */ pixel *d_result = NULL; cutilSafeCall(cudaMalloc((void **)&d_result, buf_size)); dim3 thread_block(8, 8); dim3 block_grid((width + thread_block.x - 1) / thread_block.x, (height + thread_block.y - 1) / thread_block.y); //dim3 thread_block(1,1); //dim3 block_grid(width, height); #ifdef _BENCH Info("Warm up"); nop_kernel<<< block_grid, thread_block>>>(d_result, width, height); cutilCheckMsg("Kernel execution failed"); #endif cutilSafeCall(cudaThreadSynchronize()); unsigned int timer = 0; cutilCheckError(cutCreateTimer(&timer)); cutilCheckError(cutStartTimer(timer)); //Info("Running test"); bilinear_kernel<<< block_grid, thread_block>>>(d_result, width, height); cutilCheckMsg("Kernel execution failed"); (cudaThreadSynchronize()); cutilCheckError(cutStopTimer(timer)); //printf("%d, %d, %f\n", image->width, image->height, cutGetTimerValue(timer)); Info("Processing time: %f (ms)", cutGetTimerValue(timer)); float time = (cutGetTimerValue(timer)); Info("%.2f Mpixels/sec", (width * height / (time/ 1000.0f) ) / 1e6); cutilCheckError( cutDeleteTimer( timer)); /* Copy result from device */ cutilSafeCall( cudaMemcpy( image->buffer, d_result, buf_size, cudaMemcpyDeviceToHost)); cutilSafeCall(cudaFree(d_result)); cutilSafeCall(cudaFreeArray(d_raw_image)); cudaThreadExit(); return time; } #define tex_get_comp(tex,x,y,c) tex2D((tex),(mirror((x),width))*3+(c),(mirror((y),height))) //#define tex_get_comp(tex,x,y,c) tex2D((tex),(x)*3+(c),(y)) KERNEL void bilinear_kernel(pixel* g_result, int width, int height) { int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; if (x >= width || y >= height) { return; } int filter_color = get_filter_color(x,y); float sum = 0; int res_index = (y*width + x)*3; pixel *res_pix = g_result+res_index; /* Copy existing val to output */ res_pix[filter_color] = tex_get_comp(tex,x,y,filter_color); if (filter_color == R || filter_color == B) { /* Red/Blue only pixels */ /* Green channel */ sum = tex_get_comp(tex,x-1,y,G) + tex_get_comp(tex,x+1,y,G) + tex_get_comp(tex,x,y-1,G) + tex_get_comp(tex,x,y+1,G); res_pix[G] = (uchar)(sum / 4); int dest_color = (filter_color == R) ? B : R; /* Red/Blue channel */ sum = tex_get_comp(tex,x-1,y-1,dest_color) + tex_get_comp(tex,x-1,y+1,dest_color) + tex_get_comp(tex,x+1,y-1,dest_color) + tex_get_comp(tex,x+1,y+1,dest_color); res_pix[dest_color] = (uchar)(sum / 4); } else { /* Green only pixels */ /* Red channel */ if (even(y)) { sum = tex_get_comp(tex,x-1,y,R) + tex_get_comp(tex,x+1,y,R); res_pix[R] = (uchar)(sum / 2); sum = tex_get_comp(tex,x,y-1,B) + tex_get_comp(tex,x,y+1,B); res_pix[B] = (uchar)(sum / 2); } else { sum = tex_get_comp(tex,x,y-1,R) + tex_get_comp(tex,x,y+1,R); res_pix[R] = (uchar)(sum / 2); sum = tex_get_comp(tex,x-1,y,B) + tex_get_comp(tex,x+1,y,B); res_pix[B] = (uchar)(sum / 2); } } } int main(int argc, char **argv) { if (argc <= 1){ exit(0); } img* image = read_image_from_file(argv[1]); float t_tex = cuda_bilinear(image); Info("Texture time: %f",t_tex); }
0747794437ab736aeb2261d566dd7ed5d677501e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * @file CUDAAgentModel.cu * @authors Paul * @date * @brief * * @see * @warning */ #include "flamegpu/gpu/CUDAAgentModel.h" #include <iostream> #include <algorithm> #include "flamegpu/model/ModelDescription.h" #include "flamegpu/pop/AgentPopulation.h" #include "flamegpu/sim/Simulation.h" #include "flamegpu/runtime/utility/RandomManager.cuh" // include FLAMEGPU kernel wrapper #include "flamegpu/runtime/agent_function.h" /** * CUDAAgentModel class * @brief populates CUDA agent map, CUDA message map */ CUDAAgentModel::CUDAAgentModel(const ModelDescription& description) : model_description(description), agent_map(), curve(cuRVEInstance::getInstance()), message_map(), host_api(*this), rng(RandomManager::getInstance()) { // , function_map() { // create a reference to curve to ensure that it is initialised. This is a singleton class so will only be done once regardless of the number of CUDAgentModels. // populate the CUDA agent map const AgentMap &am = model_description.getAgentMap(); AgentMap::const_iterator it; // const_iterator returns a reference to a constant value (const T&) and prevents modification of the reference value // create new cuda agent and add to the map for (it = am.begin(); it != am.end(); it++) { agent_map.insert(CUDAAgentMap::value_type(it->first, std::unique_ptr<CUDAAgent>(new CUDAAgent(it->second)))); } // insert into map using value_type // populate the CUDA message map const MessageMap &mm = model_description.getMessageMap(); MessageMap::const_iterator it_m; // create new cuda message and add to the map for (it_m = mm.begin(); it_m != mm.end(); it_m++) { message_map.insert(CUDAMessageMap::value_type(it_m->first, std::unique_ptr<CUDAMessage>(new CUDAMessage(it_m->second)))); } /* // populate the CUDA function map const FunctionMap &mm = model_description.getFunctionMap(); FunctioneMap::const_iterator it; for (it = mm.begin(); it != mm.end(); it++) { FunctionMap.insert(CUDAFunctionMap::value_type(it->first, std::unique_ptr<CUDAAgentFunction>(new CUDAAgentFunction(it->second)))); } */ } /** * A destructor. * @brief Destroys the CUDAAgentModel object */ CUDAAgentModel::~CUDAAgentModel() { // unique pointers cleanup by automatically } /** * @brief Sets the initial population data * @param AgentPopulation object * @return none */ void CUDAAgentModel::setInitialPopulationData(AgentPopulation& population) { CUDAAgentMap::iterator it; it = agent_map.find(population.getAgentName()); if (it == agent_map.end()) { THROW InvalidCudaAgent("Error: Agent ('%s') was not found, " "in CUDAAgentModel::setInitialPopulationData()", population.getAgentName().c_str()); } /*! create agent state lists */ it->second->setInitialPopulationData(population); } /** * @brief Sets the population data * @param AgentPopulation object * @return none */ void CUDAAgentModel::setPopulationData(AgentPopulation& population) { CUDAAgentMap::iterator it; it = agent_map.find(population.getAgentName()); if (it == agent_map.end()) { THROW InvalidCudaAgent("Error: Agent ('%s') was not found, " "in CUDAAgentModel::setPopulationData()", population.getAgentName().c_str()); } /*! create agent state lists */ it->second->setPopulationData(population); } void CUDAAgentModel::getPopulationData(AgentPopulation& population) { CUDAAgentMap::iterator it; it = agent_map.find(population.getAgentName()); if (it == agent_map.end()) { THROW InvalidCudaAgent("Error: Agent ('%s') was not found, " "in CUDAAgentModel::getPopulationData()", population.getAgentName().c_str()); } /*!create agent state lists */ it->second->getPopulationData(population); } /** * @brief Loops through agents functions and register all variables * (variable has must also be tied to function name using the namespace thing in curve) */ bool CUDAAgentModel::step(const Simulation& simulation) { int nStreams = 1; std::string message_name; CurveNamespaceHash message_name_inp_hash = 0; CurveNamespaceHash message_name_outp_hash = 0; unsigned int messageList_Size = 0; // TODO: simulation.getMaxFunctionsPerLayer() for (unsigned int i = 0; i < simulation.getLayerCount(); i++) { int temp = static_cast<int>(simulation.getFunctionsAtLayer(i).size()); nStreams = ::max(nStreams, temp); } /*! Stream creations */ hipStream_t *stream = new hipStream_t[nStreams]; /*! Stream initialisation */ for (int j = 0; j < nStreams; j++) gpuErrchk(hipStreamCreate(&stream[j])); /*! for each each sim layer, launch each agent function in its own stream */ for (unsigned int i = 0; i < simulation.getLayerCount(); i++) { const auto& functions = simulation.getFunctionsAtLayer(i); int j = 0; // Sum the total number of threads being launched in the layer unsigned int totalThreads = 0; /*! for each func function - Loop through to do all mapping of agent and message variables */ for (AgentFunctionDescription func_des : functions) { const CUDAAgent& cuda_agent = getCUDAAgent(func_des.getParent().getName()); // check if a function has an input massage if (func_des.hasInputMessage()) { std::string inpMessage_name = func_des.getInputMessageName(); const CUDAMessage& cuda_message = getCUDAMessage(inpMessage_name); printf("inp msg name: %s\n", inpMessage_name.c_str()); cuda_message.mapRuntimeVariables(func_des); } // check if a function has an output massage if (func_des.hasOutputMessage()) { std::string outpMessage_name = func_des.getOutputMessageName(); const CUDAMessage& cuda_message = getCUDAMessage(outpMessage_name); printf("inp msg name: %s\n", outpMessage_name.c_str()); cuda_message.mapRuntimeVariables(func_des); } /** * Configure runtime access of the functions variables within the FLAME_API object */ cuda_agent.mapRuntimeVariables(func_des); // Count total threads being launched totalThreads += cuda_agent.getMaximumListSize(); } // Ensure RandomManager is the correct size to accomodate all threads to be launched rng.resize(totalThreads); // Total threads is now used to provide kernel launches an offset to thread-safe thread-index totalThreads = 0; //! for each func function - Loop through to launch all agent functions for (AgentFunctionDescription func_des : functions) { std::string agent_name = func_des.getParent().getName(); std::string func_name = func_des.getName(); // check if a function has an output massage if (func_des.hasInputMessage()) { std::string inpMessage_name = func_des.getInputMessageName(); const CUDAMessage& cuda_message = getCUDAMessage(inpMessage_name); // message_name = inpMessage_name; // hash message name message_name_inp_hash = curveVariableRuntimeHash(inpMessage_name.c_str()); messageList_Size = cuda_message.getMaximumListSize(); } // check if a function has an output massage if (func_des.hasOutputMessage()) { std::string outpMessage_name = func_des.getOutputMessageName(); // const CUDAMessage& cuda_message = getCUDAMessage(outpMessage_name); // message_name = outpMessage_name; // hash message name message_name_outp_hash = curveVariableRuntimeHash(outpMessage_name.c_str()); } const CUDAAgent& cuda_agent = getCUDAAgent(agent_name); // get the agent function FLAMEGPU_AGENT_FUNCTION_POINTER* agent_func = func_des.getFunction(); // host_pointer FLAMEGPU_AGENT_FUNCTION_POINTER h_func_ptr; // hipMemcpyFromSymbolAsync(&h_func_ptr, *agent_func, sizeof(FLAMEGPU_AGENT_FUNCTION_POINTER),0,hipMemcpyDeviceToHost,stream[j]); hipMemcpyFromSymbolAsync(&h_func_ptr, *agent_func, sizeof(FLAMEGPU_AGENT_FUNCTION_POINTER)); int state_list_size = cuda_agent.getMaximumListSize(); int blockSize = 0; // The launch configurator returned block size int minGridSize = 0; // The minimum grid size needed to achieve the // maximum occupancy for a full device // launch int gridSize = 0; // The actual grid size needed, based on input size // calculate the grid block size for main agent function hipOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, agent_function_wrapper, 0, state_list_size); //! Round up according to CUDAAgent state list size gridSize = (state_list_size + blockSize - 1) / blockSize; // hash agent name CurveNamespaceHash agentname_hash = curveVariableRuntimeHash(agent_name.c_str()); // hash function name CurveNamespaceHash funcname_hash = curveVariableRuntimeHash(func_name.c_str()); // agent_function_wrapper << <gridSize, blockSize, 0, stream[j] >> > (agentname_hash + funcname_hash, h_func_ptr, state_list_size); hipLaunchKernelGGL(( agent_function_wrapper) , dim3(gridSize), dim3(blockSize), 0, stream[j] , agentname_hash + funcname_hash, message_name_inp_hash, message_name_outp_hash, h_func_ptr, state_list_size, messageList_Size, totalThreads); totalThreads += state_list_size; ++j; } // for each func function - Loop through to un-map all agent and message variables for (AgentFunctionDescription func_des : functions) { const CUDAAgent& cuda_agent = getCUDAAgent(func_des.getParent().getName()); // check if a function has an output massage if (func_des.hasInputMessage()) { std::string inpMessage_name = func_des.getInputMessageName(); const CUDAMessage& cuda_message = getCUDAMessage(inpMessage_name); cuda_message.unmapRuntimeVariables(func_des); } // check if a function has an output massage if (func_des.hasOutputMessage()) { std::string outpMessage_name = func_des.getOutputMessageName(); const CUDAMessage& cuda_message = getCUDAMessage(outpMessage_name); cuda_message.unmapRuntimeVariables(func_des); } // const CUDAMessage& cuda_inpMessage = getCUDAMessage(func_des.getInputChild.getMessageName()); // const CUDAMessage& cuda_outpMessage = getCUDAMessage(func_des.getOutputChild.getMessageName()); // unmap the function variables cuda_agent.unmapRuntimeVariables(func_des); } // Execute all host functions attached to layer // TODO: Concurrency? for (auto &stepFn : simulation.getHostFunctionsAtLayer(i)) stepFn(&this->host_api); // hipDeviceSynchronize(); } // stream deletion for (int j = 0; j < nStreams; ++j) gpuErrchk(hipStreamDestroy(stream[j])); delete[] stream; // Execute step functions for (auto &stepFn : simulation.getStepFunctions()) stepFn(&this->host_api); // Execute exit conditions for (auto &exitCdns : simulation.getExitConditions()) if (exitCdns(&this->host_api) == EXIT) return false; return true; } /** * @brief initialize CUDA params (e.g: set CUDA device) * @warning not tested */ void CUDAAgentModel::init(void) { // (int argc, char** argv) { hipError_t cudaStatus; int device; int device_count; // default device device = 0; cudaStatus = hipGetDeviceCount(&device_count); if (cudaStatus != hipSuccess) { THROW InvalidCUDAdevice("Error finding CUDA devices! Do you have a CUDA-capable GPU installed?"); } if (device_count == 0) { THROW InvalidCUDAdevice("Error no CUDA devices found!"); } // Select device cudaStatus = hipSetDevice(device); if (cudaStatus != hipSuccess) { THROW InvalidCUDAdevice("Error setting CUDA device to '%d', only %d available!", device, device_count); } } /** * @brief simulates functions * @param object * @return none * @todo not yet completed * @warning not tested */ void CUDAAgentModel::simulate(const Simulation& simulation) { if (agent_map.size() == 0) { THROW InvalidCudaAgentMapSize("Simulation has no agents, in CUDAAgentModel::simulate()."); // recheck if this is really required } // CUDAAgentMap::iterator it; // check any CUDAAgents with population size == 0 // if they have executable functions then these can be ignored // if they have agent creations then buffer space must be allocated for them // Execute init functions for (auto &initFn : simulation.getInitFunctions()) initFn(&this->host_api); for (unsigned int i = 0; simulation.getSimulationSteps() == 0 ? true : i < simulation.getSimulationSteps(); i++) { // std::cout <<"step: " << i << std::endl; if (!step(simulation)) break; } // Execute exit functions for (auto &exitFn : simulation.getExitFunctions()) exitFn(&this->host_api); } const CUDAAgent& CUDAAgentModel::getCUDAAgent(std::string agent_name) const { CUDAAgentMap::const_iterator it; it = agent_map.find(agent_name); if (it == agent_map.end()) { THROW InvalidCudaAgent("CUDA agent ('%s') not found, in CUDAAgentModel::getCUDAAgent().", agent_name.c_str()); } return *(it->second); } const CUDAMessage& CUDAAgentModel::getCUDAMessage(std::string message_name) const { CUDAMessageMap::const_iterator it; it = message_map.find(message_name); if (it == message_map.end()) { THROW InvalidCudaMessage("CUDA message ('%s') not found, in CUDAAgentModel::getCUDAMessage().", message_name.c_str()); } return *(it->second); }
0747794437ab736aeb2261d566dd7ed5d677501e.cu
/** * @file CUDAAgentModel.cu * @authors Paul * @date * @brief * * @see * @warning */ #include "flamegpu/gpu/CUDAAgentModel.h" #include <iostream> #include <algorithm> #include "flamegpu/model/ModelDescription.h" #include "flamegpu/pop/AgentPopulation.h" #include "flamegpu/sim/Simulation.h" #include "flamegpu/runtime/utility/RandomManager.cuh" // include FLAMEGPU kernel wrapper #include "flamegpu/runtime/agent_function.h" /** * CUDAAgentModel class * @brief populates CUDA agent map, CUDA message map */ CUDAAgentModel::CUDAAgentModel(const ModelDescription& description) : model_description(description), agent_map(), curve(cuRVEInstance::getInstance()), message_map(), host_api(*this), rng(RandomManager::getInstance()) { // , function_map() { // create a reference to curve to ensure that it is initialised. This is a singleton class so will only be done once regardless of the number of CUDAgentModels. // populate the CUDA agent map const AgentMap &am = model_description.getAgentMap(); AgentMap::const_iterator it; // const_iterator returns a reference to a constant value (const T&) and prevents modification of the reference value // create new cuda agent and add to the map for (it = am.begin(); it != am.end(); it++) { agent_map.insert(CUDAAgentMap::value_type(it->first, std::unique_ptr<CUDAAgent>(new CUDAAgent(it->second)))); } // insert into map using value_type // populate the CUDA message map const MessageMap &mm = model_description.getMessageMap(); MessageMap::const_iterator it_m; // create new cuda message and add to the map for (it_m = mm.begin(); it_m != mm.end(); it_m++) { message_map.insert(CUDAMessageMap::value_type(it_m->first, std::unique_ptr<CUDAMessage>(new CUDAMessage(it_m->second)))); } /* // populate the CUDA function map const FunctionMap &mm = model_description.getFunctionMap(); FunctioneMap::const_iterator it; for (it = mm.begin(); it != mm.end(); it++) { FunctionMap.insert(CUDAFunctionMap::value_type(it->first, std::unique_ptr<CUDAAgentFunction>(new CUDAAgentFunction(it->second)))); } */ } /** * A destructor. * @brief Destroys the CUDAAgentModel object */ CUDAAgentModel::~CUDAAgentModel() { // unique pointers cleanup by automatically } /** * @brief Sets the initial population data * @param AgentPopulation object * @return none */ void CUDAAgentModel::setInitialPopulationData(AgentPopulation& population) { CUDAAgentMap::iterator it; it = agent_map.find(population.getAgentName()); if (it == agent_map.end()) { THROW InvalidCudaAgent("Error: Agent ('%s') was not found, " "in CUDAAgentModel::setInitialPopulationData()", population.getAgentName().c_str()); } /*! create agent state lists */ it->second->setInitialPopulationData(population); } /** * @brief Sets the population data * @param AgentPopulation object * @return none */ void CUDAAgentModel::setPopulationData(AgentPopulation& population) { CUDAAgentMap::iterator it; it = agent_map.find(population.getAgentName()); if (it == agent_map.end()) { THROW InvalidCudaAgent("Error: Agent ('%s') was not found, " "in CUDAAgentModel::setPopulationData()", population.getAgentName().c_str()); } /*! create agent state lists */ it->second->setPopulationData(population); } void CUDAAgentModel::getPopulationData(AgentPopulation& population) { CUDAAgentMap::iterator it; it = agent_map.find(population.getAgentName()); if (it == agent_map.end()) { THROW InvalidCudaAgent("Error: Agent ('%s') was not found, " "in CUDAAgentModel::getPopulationData()", population.getAgentName().c_str()); } /*!create agent state lists */ it->second->getPopulationData(population); } /** * @brief Loops through agents functions and register all variables * (variable has must also be tied to function name using the namespace thing in curve) */ bool CUDAAgentModel::step(const Simulation& simulation) { int nStreams = 1; std::string message_name; CurveNamespaceHash message_name_inp_hash = 0; CurveNamespaceHash message_name_outp_hash = 0; unsigned int messageList_Size = 0; // TODO: simulation.getMaxFunctionsPerLayer() for (unsigned int i = 0; i < simulation.getLayerCount(); i++) { int temp = static_cast<int>(simulation.getFunctionsAtLayer(i).size()); nStreams = std::max(nStreams, temp); } /*! Stream creations */ cudaStream_t *stream = new cudaStream_t[nStreams]; /*! Stream initialisation */ for (int j = 0; j < nStreams; j++) gpuErrchk(cudaStreamCreate(&stream[j])); /*! for each each sim layer, launch each agent function in its own stream */ for (unsigned int i = 0; i < simulation.getLayerCount(); i++) { const auto& functions = simulation.getFunctionsAtLayer(i); int j = 0; // Sum the total number of threads being launched in the layer unsigned int totalThreads = 0; /*! for each func function - Loop through to do all mapping of agent and message variables */ for (AgentFunctionDescription func_des : functions) { const CUDAAgent& cuda_agent = getCUDAAgent(func_des.getParent().getName()); // check if a function has an input massage if (func_des.hasInputMessage()) { std::string inpMessage_name = func_des.getInputMessageName(); const CUDAMessage& cuda_message = getCUDAMessage(inpMessage_name); printf("inp msg name: %s\n", inpMessage_name.c_str()); cuda_message.mapRuntimeVariables(func_des); } // check if a function has an output massage if (func_des.hasOutputMessage()) { std::string outpMessage_name = func_des.getOutputMessageName(); const CUDAMessage& cuda_message = getCUDAMessage(outpMessage_name); printf("inp msg name: %s\n", outpMessage_name.c_str()); cuda_message.mapRuntimeVariables(func_des); } /** * Configure runtime access of the functions variables within the FLAME_API object */ cuda_agent.mapRuntimeVariables(func_des); // Count total threads being launched totalThreads += cuda_agent.getMaximumListSize(); } // Ensure RandomManager is the correct size to accomodate all threads to be launched rng.resize(totalThreads); // Total threads is now used to provide kernel launches an offset to thread-safe thread-index totalThreads = 0; //! for each func function - Loop through to launch all agent functions for (AgentFunctionDescription func_des : functions) { std::string agent_name = func_des.getParent().getName(); std::string func_name = func_des.getName(); // check if a function has an output massage if (func_des.hasInputMessage()) { std::string inpMessage_name = func_des.getInputMessageName(); const CUDAMessage& cuda_message = getCUDAMessage(inpMessage_name); // message_name = inpMessage_name; // hash message name message_name_inp_hash = curveVariableRuntimeHash(inpMessage_name.c_str()); messageList_Size = cuda_message.getMaximumListSize(); } // check if a function has an output massage if (func_des.hasOutputMessage()) { std::string outpMessage_name = func_des.getOutputMessageName(); // const CUDAMessage& cuda_message = getCUDAMessage(outpMessage_name); // message_name = outpMessage_name; // hash message name message_name_outp_hash = curveVariableRuntimeHash(outpMessage_name.c_str()); } const CUDAAgent& cuda_agent = getCUDAAgent(agent_name); // get the agent function FLAMEGPU_AGENT_FUNCTION_POINTER* agent_func = func_des.getFunction(); // host_pointer FLAMEGPU_AGENT_FUNCTION_POINTER h_func_ptr; // cudaMemcpyFromSymbolAsync(&h_func_ptr, *agent_func, sizeof(FLAMEGPU_AGENT_FUNCTION_POINTER),0,cudaMemcpyDeviceToHost,stream[j]); cudaMemcpyFromSymbolAsync(&h_func_ptr, *agent_func, sizeof(FLAMEGPU_AGENT_FUNCTION_POINTER)); int state_list_size = cuda_agent.getMaximumListSize(); int blockSize = 0; // The launch configurator returned block size int minGridSize = 0; // The minimum grid size needed to achieve the // maximum occupancy for a full device // launch int gridSize = 0; // The actual grid size needed, based on input size // calculate the grid block size for main agent function cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, agent_function_wrapper, 0, state_list_size); //! Round up according to CUDAAgent state list size gridSize = (state_list_size + blockSize - 1) / blockSize; // hash agent name CurveNamespaceHash agentname_hash = curveVariableRuntimeHash(agent_name.c_str()); // hash function name CurveNamespaceHash funcname_hash = curveVariableRuntimeHash(func_name.c_str()); // agent_function_wrapper << <gridSize, blockSize, 0, stream[j] >> > (agentname_hash + funcname_hash, h_func_ptr, state_list_size); agent_function_wrapper <<<gridSize, blockSize, 0, stream[j] >>>(agentname_hash + funcname_hash, message_name_inp_hash, message_name_outp_hash, h_func_ptr, state_list_size, messageList_Size, totalThreads); totalThreads += state_list_size; ++j; } // for each func function - Loop through to un-map all agent and message variables for (AgentFunctionDescription func_des : functions) { const CUDAAgent& cuda_agent = getCUDAAgent(func_des.getParent().getName()); // check if a function has an output massage if (func_des.hasInputMessage()) { std::string inpMessage_name = func_des.getInputMessageName(); const CUDAMessage& cuda_message = getCUDAMessage(inpMessage_name); cuda_message.unmapRuntimeVariables(func_des); } // check if a function has an output massage if (func_des.hasOutputMessage()) { std::string outpMessage_name = func_des.getOutputMessageName(); const CUDAMessage& cuda_message = getCUDAMessage(outpMessage_name); cuda_message.unmapRuntimeVariables(func_des); } // const CUDAMessage& cuda_inpMessage = getCUDAMessage(func_des.getInputChild.getMessageName()); // const CUDAMessage& cuda_outpMessage = getCUDAMessage(func_des.getOutputChild.getMessageName()); // unmap the function variables cuda_agent.unmapRuntimeVariables(func_des); } // Execute all host functions attached to layer // TODO: Concurrency? for (auto &stepFn : simulation.getHostFunctionsAtLayer(i)) stepFn(&this->host_api); // cudaDeviceSynchronize(); } // stream deletion for (int j = 0; j < nStreams; ++j) gpuErrchk(cudaStreamDestroy(stream[j])); delete[] stream; // Execute step functions for (auto &stepFn : simulation.getStepFunctions()) stepFn(&this->host_api); // Execute exit conditions for (auto &exitCdns : simulation.getExitConditions()) if (exitCdns(&this->host_api) == EXIT) return false; return true; } /** * @brief initialize CUDA params (e.g: set CUDA device) * @warning not tested */ void CUDAAgentModel::init(void) { // (int argc, char** argv) { cudaError_t cudaStatus; int device; int device_count; // default device device = 0; cudaStatus = cudaGetDeviceCount(&device_count); if (cudaStatus != cudaSuccess) { THROW InvalidCUDAdevice("Error finding CUDA devices! Do you have a CUDA-capable GPU installed?"); } if (device_count == 0) { THROW InvalidCUDAdevice("Error no CUDA devices found!"); } // Select device cudaStatus = cudaSetDevice(device); if (cudaStatus != cudaSuccess) { THROW InvalidCUDAdevice("Error setting CUDA device to '%d', only %d available!", device, device_count); } } /** * @brief simulates functions * @param object * @return none * @todo not yet completed * @warning not tested */ void CUDAAgentModel::simulate(const Simulation& simulation) { if (agent_map.size() == 0) { THROW InvalidCudaAgentMapSize("Simulation has no agents, in CUDAAgentModel::simulate()."); // recheck if this is really required } // CUDAAgentMap::iterator it; // check any CUDAAgents with population size == 0 // if they have executable functions then these can be ignored // if they have agent creations then buffer space must be allocated for them // Execute init functions for (auto &initFn : simulation.getInitFunctions()) initFn(&this->host_api); for (unsigned int i = 0; simulation.getSimulationSteps() == 0 ? true : i < simulation.getSimulationSteps(); i++) { // std::cout <<"step: " << i << std::endl; if (!step(simulation)) break; } // Execute exit functions for (auto &exitFn : simulation.getExitFunctions()) exitFn(&this->host_api); } const CUDAAgent& CUDAAgentModel::getCUDAAgent(std::string agent_name) const { CUDAAgentMap::const_iterator it; it = agent_map.find(agent_name); if (it == agent_map.end()) { THROW InvalidCudaAgent("CUDA agent ('%s') not found, in CUDAAgentModel::getCUDAAgent().", agent_name.c_str()); } return *(it->second); } const CUDAMessage& CUDAAgentModel::getCUDAMessage(std::string message_name) const { CUDAMessageMap::const_iterator it; it = message_map.find(message_name); if (it == message_map.end()) { THROW InvalidCudaMessage("CUDA message ('%s') not found, in CUDAAgentModel::getCUDAMessage().", message_name.c_str()); } return *(it->second); }
8c69fb002e456de45d20a8dc441d8d24169ffedc.hip
// !!! This is a file automatically generated by hipify!!! #include <stdlib.h> #include <stdio.h> #include <limits.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <driver_functions.h> #include "CycleTimer.h" #define THREADS_PER_BLOCK 512 #define CHUNK_SIZE 8 // can be also thought as nodes per chunk // #define WARP_SIZE 32 #define WARP_SIZE 4 #define WARPS_PER_BLOCK (THREADS_PER_BLOCK / WARP_SIZE) #define NODES_PER_BLOCK (THREADS_PER_BLOCK / WARP_SIZE * CHUNK_SIZE) extern uint N, M; extern uint *nodes, *edges, *weights, *dists; #define DEBUG #ifdef DEBUG #define cudaCheckError(ans) cudaAssert((ans), __FILE__, __LINE__); inline void cudaAssert(hipError_t code, const char *file, int line, bool abort=true) { if (code != hipSuccess) { fprintf(stderr, "CUDA Error: %s at %s:%d\n", hipGetErrorString(code), file, line); if (abort) exit(code); } } #else #define cudaCheckError(ans) ans #endif // BASELINE VERSION ******************************** __global__ void baseline_BF_kernel(uint *nodes, uint *edges, uint *weights, uint *dists, uint num_nodes) { uint v = blockIdx.x * blockDim.x + threadIdx.x; if (v >= num_nodes) return; for (uint i = nodes[v]; i < nodes[v+1]; i++) { uint u = edges[i]; // updating an edge from v to u uint new_dist = dists[v] + weights[i]; if (new_dist < dists[u]) dists[u] = new_dist; } } // END BASELINE VERSION ******************************** // WARP-BASED VERSION ******************************** __inline__ __device__ void warp_memcpy(uint start, uint offset, uint end, uint *warp_array, uint *array) { for (uint i = start+offset; i < end; i += WARP_SIZE) warp_array[i-start] = array[i]; } __inline__ __device__ void warp_update_neighbors(uint start, uint end, uint *edges, uint *dists, uint *warp_dists, uint *weights, uint v) { for (uint i = start; i < end; i += WARP_SIZE) { uint u = edges[i]; // updating an edge from v to u uint new_dist = warp_dists[v] + weights[i]; atomicMin(&(dists[u]), new_dist); } } __global__ void warp_BF_kernel(uint *nodes, uint *edges, uint *weights, uint *dists, uint num_nodes) { uint warp_offset = threadIdx.x % WARP_SIZE; uint warp_id = threadIdx.x / WARP_SIZE; // this is the range of indexes of nodes for which this warp is responsible uint chunkStart = blockIdx.x * NODES_PER_BLOCK + warp_id * CHUNK_SIZE; if (chunkStart >= num_nodes) return; uint chunkEnd = chunkStart + CHUNK_SIZE; if (chunkEnd > num_nodes) chunkEnd = num_nodes; // shared memory across threads in a block __shared__ uint block_nodes[NODES_PER_BLOCK + WARPS_PER_BLOCK]; __shared__ uint block_dists[NODES_PER_BLOCK]; // pointers to the start of the region corresponding to this warp uint *warp_nodes = block_nodes + warp_id * (CHUNK_SIZE+1); uint *warp_dists = block_dists + warp_id * CHUNK_SIZE; warp_memcpy(chunkStart, warp_offset, chunkEnd+1, warp_nodes, nodes); warp_memcpy(chunkStart, warp_offset, chunkEnd, warp_dists, dists); // iterate over my work for (uint v = 0; v < chunkEnd - chunkStart; v++) { uint nbr_start = warp_nodes[v]; uint nbr_end = warp_nodes[v+1]; warp_update_neighbors(nbr_start + warp_offset, nbr_end, edges, dists, warp_dists, weights, v); } } // END WARP-BASED VERSION ******************************** // main function void bellman_ford_cuda(bool use_warp) { uint *device_nodes, *device_edges, *device_weights, *device_dists; int blocks; if (!use_warp) blocks = (N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK; else blocks = (N + NODES_PER_BLOCK - 1) / NODES_PER_BLOCK; cudaCheckError(hipMalloc(&device_nodes, (N+1) * sizeof(uint))); cudaCheckError(hipMalloc(&device_edges, M * sizeof(uint))); cudaCheckError(hipMalloc(&device_weights, M * sizeof(uint))); cudaCheckError(hipMalloc(&device_dists, N * sizeof(uint))); // start timing after allocation of device memory double startTime = CycleTimer::currentSeconds(); cudaCheckError(hipMemcpy(device_nodes, nodes, (N+1) * sizeof(uint), hipMemcpyHostToDevice)); cudaCheckError(hipMemcpy(device_edges, edges, M * sizeof(uint), hipMemcpyHostToDevice)); cudaCheckError(hipMemcpy(device_weights, weights, M * sizeof(uint), hipMemcpyHostToDevice)); cudaCheckError(hipMemcpy(device_dists, dists, N * sizeof(uint), hipMemcpyHostToDevice)); hipError_t errCode = hipPeekAtLastError(); if (errCode != hipSuccess) fprintf(stderr, "WARNING: A CUDA error occured before launching: code=%d, %s\n", errCode, hipGetErrorString(errCode)); // run kernel double kernelStartTime = CycleTimer::currentSeconds(); for (uint i = 0; i < N-1; i++) { if (!use_warp) hipLaunchKernelGGL(( baseline_BF_kernel), dim3(blocks), dim3(THREADS_PER_BLOCK), 0, 0, device_nodes, device_edges, device_weights, device_dists, N); else hipLaunchKernelGGL(( warp_BF_kernel), dim3(blocks), dim3(THREADS_PER_BLOCK), 0, 0, device_nodes, device_edges, device_weights, device_dists, N); cudaCheckError ( hipDeviceSynchronize() ); } double kernelEndTime = CycleTimer::currentSeconds(); hipMemcpy(dists, device_dists, N * sizeof(uint), hipMemcpyDeviceToHost); // end timing after result has been copied back into host memory double endTime = CycleTimer::currentSeconds(); errCode = hipPeekAtLastError(); if (errCode != hipSuccess) fprintf(stderr, "WARNING: A CUDA error occured after launching: code=%d, %s\n", errCode, hipGetErrorString(errCode)); double overallDuration = endTime - startTime; double kernelDuration = kernelEndTime - kernelStartTime; if (!use_warp) printf("CUDA Baseline\n"); else printf("CUDA Warp\n"); printf("\tOverall: %.3f ms\n", 1000.f * overallDuration); printf("\tKernel: %.3f ms\n", 1000.f * kernelDuration); hipFree(device_nodes); hipFree(device_edges); hipFree(device_weights); hipFree(device_dists); }
8c69fb002e456de45d20a8dc441d8d24169ffedc.cu
#include <stdlib.h> #include <stdio.h> #include <limits.h> #include <cuda.h> #include <cuda_runtime.h> #include <driver_functions.h> #include "CycleTimer.h" #define THREADS_PER_BLOCK 512 #define CHUNK_SIZE 8 // can be also thought as nodes per chunk // #define WARP_SIZE 32 #define WARP_SIZE 4 #define WARPS_PER_BLOCK (THREADS_PER_BLOCK / WARP_SIZE) #define NODES_PER_BLOCK (THREADS_PER_BLOCK / WARP_SIZE * CHUNK_SIZE) extern uint N, M; extern uint *nodes, *edges, *weights, *dists; #define DEBUG #ifdef DEBUG #define cudaCheckError(ans) cudaAssert((ans), __FILE__, __LINE__); inline void cudaAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr, "CUDA Error: %s at %s:%d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } #else #define cudaCheckError(ans) ans #endif // BASELINE VERSION ******************************** __global__ void baseline_BF_kernel(uint *nodes, uint *edges, uint *weights, uint *dists, uint num_nodes) { uint v = blockIdx.x * blockDim.x + threadIdx.x; if (v >= num_nodes) return; for (uint i = nodes[v]; i < nodes[v+1]; i++) { uint u = edges[i]; // updating an edge from v to u uint new_dist = dists[v] + weights[i]; if (new_dist < dists[u]) dists[u] = new_dist; } } // END BASELINE VERSION ******************************** // WARP-BASED VERSION ******************************** __inline__ __device__ void warp_memcpy(uint start, uint offset, uint end, uint *warp_array, uint *array) { for (uint i = start+offset; i < end; i += WARP_SIZE) warp_array[i-start] = array[i]; } __inline__ __device__ void warp_update_neighbors(uint start, uint end, uint *edges, uint *dists, uint *warp_dists, uint *weights, uint v) { for (uint i = start; i < end; i += WARP_SIZE) { uint u = edges[i]; // updating an edge from v to u uint new_dist = warp_dists[v] + weights[i]; atomicMin(&(dists[u]), new_dist); } } __global__ void warp_BF_kernel(uint *nodes, uint *edges, uint *weights, uint *dists, uint num_nodes) { uint warp_offset = threadIdx.x % WARP_SIZE; uint warp_id = threadIdx.x / WARP_SIZE; // this is the range of indexes of nodes for which this warp is responsible uint chunkStart = blockIdx.x * NODES_PER_BLOCK + warp_id * CHUNK_SIZE; if (chunkStart >= num_nodes) return; uint chunkEnd = chunkStart + CHUNK_SIZE; if (chunkEnd > num_nodes) chunkEnd = num_nodes; // shared memory across threads in a block __shared__ uint block_nodes[NODES_PER_BLOCK + WARPS_PER_BLOCK]; __shared__ uint block_dists[NODES_PER_BLOCK]; // pointers to the start of the region corresponding to this warp uint *warp_nodes = block_nodes + warp_id * (CHUNK_SIZE+1); uint *warp_dists = block_dists + warp_id * CHUNK_SIZE; warp_memcpy(chunkStart, warp_offset, chunkEnd+1, warp_nodes, nodes); warp_memcpy(chunkStart, warp_offset, chunkEnd, warp_dists, dists); // iterate over my work for (uint v = 0; v < chunkEnd - chunkStart; v++) { uint nbr_start = warp_nodes[v]; uint nbr_end = warp_nodes[v+1]; warp_update_neighbors(nbr_start + warp_offset, nbr_end, edges, dists, warp_dists, weights, v); } } // END WARP-BASED VERSION ******************************** // main function void bellman_ford_cuda(bool use_warp) { uint *device_nodes, *device_edges, *device_weights, *device_dists; int blocks; if (!use_warp) blocks = (N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK; else blocks = (N + NODES_PER_BLOCK - 1) / NODES_PER_BLOCK; cudaCheckError(cudaMalloc(&device_nodes, (N+1) * sizeof(uint))); cudaCheckError(cudaMalloc(&device_edges, M * sizeof(uint))); cudaCheckError(cudaMalloc(&device_weights, M * sizeof(uint))); cudaCheckError(cudaMalloc(&device_dists, N * sizeof(uint))); // start timing after allocation of device memory double startTime = CycleTimer::currentSeconds(); cudaCheckError(cudaMemcpy(device_nodes, nodes, (N+1) * sizeof(uint), cudaMemcpyHostToDevice)); cudaCheckError(cudaMemcpy(device_edges, edges, M * sizeof(uint), cudaMemcpyHostToDevice)); cudaCheckError(cudaMemcpy(device_weights, weights, M * sizeof(uint), cudaMemcpyHostToDevice)); cudaCheckError(cudaMemcpy(device_dists, dists, N * sizeof(uint), cudaMemcpyHostToDevice)); cudaError_t errCode = cudaPeekAtLastError(); if (errCode != cudaSuccess) fprintf(stderr, "WARNING: A CUDA error occured before launching: code=%d, %s\n", errCode, cudaGetErrorString(errCode)); // run kernel double kernelStartTime = CycleTimer::currentSeconds(); for (uint i = 0; i < N-1; i++) { if (!use_warp) baseline_BF_kernel<<<blocks, THREADS_PER_BLOCK>>>(device_nodes, device_edges, device_weights, device_dists, N); else warp_BF_kernel<<<blocks, THREADS_PER_BLOCK>>>(device_nodes, device_edges, device_weights, device_dists, N); cudaCheckError ( cudaDeviceSynchronize() ); } double kernelEndTime = CycleTimer::currentSeconds(); cudaMemcpy(dists, device_dists, N * sizeof(uint), cudaMemcpyDeviceToHost); // end timing after result has been copied back into host memory double endTime = CycleTimer::currentSeconds(); errCode = cudaPeekAtLastError(); if (errCode != cudaSuccess) fprintf(stderr, "WARNING: A CUDA error occured after launching: code=%d, %s\n", errCode, cudaGetErrorString(errCode)); double overallDuration = endTime - startTime; double kernelDuration = kernelEndTime - kernelStartTime; if (!use_warp) printf("CUDA Baseline\n"); else printf("CUDA Warp\n"); printf("\tOverall: %.3f ms\n", 1000.f * overallDuration); printf("\tKernel: %.3f ms\n", 1000.f * kernelDuration); cudaFree(device_nodes); cudaFree(device_edges); cudaFree(device_weights); cudaFree(device_dists); }