hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
6d8d51aafe9ca0097d5c52079c48bcde17e357bf.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __device__ void exchange(float &a, float &b){ float temp = a; a = b; b = temp; } __global__ void flip_3D(float* coords, size_t dim_z, size_t dim_y, size_t dim_x, int do_z, int do_y, int do_x){ size_t index = blockIdx.x * blockDim.x + threadIdx.x; size_t total = dim_x * dim_y * dim_z; size_t total_xy = dim_x * dim_y; size_t id_x = index % dim_x; size_t id_y = (index / dim_x) % dim_x; size_t id_z = index / (dim_x * dim_y); if(index < total){ if(do_x && id_x < (dim_x / 2)){ exchange(coords[2 * total + id_z * total_xy + id_y * dim_x + id_x], coords[2 * total + id_z * total_xy + id_y * dim_x + dim_x-1 - id_x]); __syncthreads(); } if(do_y && id_y < (dim_y / 2)){ exchange(coords[total + id_z * total_xy + id_y * dim_x + id_x], coords[total + id_z * total_xy + (dim_y-1 - id_y) * dim_x + id_x]); __syncthreads(); } if(do_z && id_z < (dim_z / 2)){ exchange(coords[id_z * total_xy + id_y * dim_x + id_x], coords[(dim_z-1 -id_z) * total_xy + id_y * dim_x + id_x]); __syncthreads(); } } }
6d8d51aafe9ca0097d5c52079c48bcde17e357bf.cu
#include "includes.h" __device__ void exchange(float &a, float &b){ float temp = a; a = b; b = temp; } __global__ void flip_3D(float* coords, size_t dim_z, size_t dim_y, size_t dim_x, int do_z, int do_y, int do_x){ size_t index = blockIdx.x * blockDim.x + threadIdx.x; size_t total = dim_x * dim_y * dim_z; size_t total_xy = dim_x * dim_y; size_t id_x = index % dim_x; size_t id_y = (index / dim_x) % dim_x; size_t id_z = index / (dim_x * dim_y); if(index < total){ if(do_x && id_x < (dim_x / 2)){ exchange(coords[2 * total + id_z * total_xy + id_y * dim_x + id_x], coords[2 * total + id_z * total_xy + id_y * dim_x + dim_x-1 - id_x]); __syncthreads(); } if(do_y && id_y < (dim_y / 2)){ exchange(coords[total + id_z * total_xy + id_y * dim_x + id_x], coords[total + id_z * total_xy + (dim_y-1 - id_y) * dim_x + id_x]); __syncthreads(); } if(do_z && id_z < (dim_z / 2)){ exchange(coords[id_z * total_xy + id_y * dim_x + id_x], coords[(dim_z-1 -id_z) * total_xy + id_y * dim_x + id_x]); __syncthreads(); } } }
f64806f67276623db3d508c88ca1036e247d15fb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "caffe2/core/context_gpu.h" #include "caffe2/operators/cosine_embedding_criterion_op.h" namespace caffe2 { namespace { __global__ void CECKernel( const int N, const float* S, const int* Y, const float margin, float* output) { CUDA_1D_KERNEL_LOOP(i, N) { output[i] = Y[i] == 1 ? (1. - S[i]) : fmaxf(0.f, S[i] - margin); } } __global__ void CECGradientKernel( const int N, const float* S, const int* Y, const float* dOutput, const float margin, float* dS) { CUDA_1D_KERNEL_LOOP(i, N) { dS[i] = dOutput[i] * (Y[i] == 1 ? -1 : static_cast<float>(S[i] >= margin)); } } } // namespace template <> bool CosineEmbeddingCriterionOp<CUDAContext>::RunOnDevice() { auto& S = Input(0); auto& Y = Input(1); auto* output = Output(0); CAFFE_ENFORCE(S.size() == Y.size(), "The embedding and label should have the same size."); output->ResizeLike(S); const float* Sdata = S.data<float>(); const int* Ydata = Y.data<int>(); float* output_data = output->mutable_data<float>(); hipLaunchKernelGGL(( CECKernel), dim3(CAFFE_GET_BLOCKS(S.size())), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), S.size(), Sdata, Ydata, margin_, output_data); return true; } template <> bool CosineEmbeddingCriterionGradientOp<CUDAContext>::RunOnDevice() { auto& S = Input(0); auto& Y = Input(1); auto& dOutput = Input(2); auto* dS = Output(0); dS->ResizeLike(S); const float* Sdata = S.data<float>(); const int* Ydata = Y.data<int>(); const float* dOutput_data = dOutput.data<float>(); float* dSdata = dS->mutable_data<float>(); hipLaunchKernelGGL(( CECGradientKernel), dim3(CAFFE_GET_BLOCKS(S.size())), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), S.size(), Sdata, Ydata, dOutput_data, margin_, dSdata); return true; } REGISTER_CUDA_OPERATOR( CosineEmbeddingCriterion, CosineEmbeddingCriterionOp<CUDAContext>); REGISTER_CUDA_OPERATOR( CosineEmbeddingCriterionGradient, CosineEmbeddingCriterionGradientOp<CUDAContext>); } // namespace caffe2
f64806f67276623db3d508c88ca1036e247d15fb.cu
#include "caffe2/core/context_gpu.h" #include "caffe2/operators/cosine_embedding_criterion_op.h" namespace caffe2 { namespace { __global__ void CECKernel( const int N, const float* S, const int* Y, const float margin, float* output) { CUDA_1D_KERNEL_LOOP(i, N) { output[i] = Y[i] == 1 ? (1. - S[i]) : fmaxf(0.f, S[i] - margin); } } __global__ void CECGradientKernel( const int N, const float* S, const int* Y, const float* dOutput, const float margin, float* dS) { CUDA_1D_KERNEL_LOOP(i, N) { dS[i] = dOutput[i] * (Y[i] == 1 ? -1 : static_cast<float>(S[i] >= margin)); } } } // namespace template <> bool CosineEmbeddingCriterionOp<CUDAContext>::RunOnDevice() { auto& S = Input(0); auto& Y = Input(1); auto* output = Output(0); CAFFE_ENFORCE(S.size() == Y.size(), "The embedding and label should have the same size."); output->ResizeLike(S); const float* Sdata = S.data<float>(); const int* Ydata = Y.data<int>(); float* output_data = output->mutable_data<float>(); CECKernel<<<CAFFE_GET_BLOCKS(S.size()), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( S.size(), Sdata, Ydata, margin_, output_data); return true; } template <> bool CosineEmbeddingCriterionGradientOp<CUDAContext>::RunOnDevice() { auto& S = Input(0); auto& Y = Input(1); auto& dOutput = Input(2); auto* dS = Output(0); dS->ResizeLike(S); const float* Sdata = S.data<float>(); const int* Ydata = Y.data<int>(); const float* dOutput_data = dOutput.data<float>(); float* dSdata = dS->mutable_data<float>(); CECGradientKernel<<<CAFFE_GET_BLOCKS(S.size()), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( S.size(), Sdata, Ydata, dOutput_data, margin_, dSdata); return true; } REGISTER_CUDA_OPERATOR( CosineEmbeddingCriterion, CosineEmbeddingCriterionOp<CUDAContext>); REGISTER_CUDA_OPERATOR( CosineEmbeddingCriterionGradient, CosineEmbeddingCriterionGradientOp<CUDAContext>); } // namespace caffe2
0225bc3756fdb57d829c656695619e4a492abd7d.hip
// !!! This is a file automatically generated by hipify!!! #include "functions.h" StopWatchInterface *timer = NULL; int initCUDA() { int result = 0; result = findCudaDevice(0, NULL); return result; } void cleanCUDA() { hipDeviceReset(); } ///use to confirm myTimer is good for CUDA timing bool testTimer1(cv::Mat &image) { sdkCreateTimer(&timer); sdkStartTimer(&timer); if (image.type() != CV_8UC3) { return false; } cv::Mat dst_image(image.rows, image.cols, CV_8UC3); sdkStopTimer(&timer); //sdkResetTimer(&timer); printf("allocate host dst memory time elpse %3.1f ms\n", sdkGetTimerValue(&timer)); sdkStartTimer(&timer); uchar *devPtr; size_t size = image.cols*image.rows*sizeof(char)* 3; checkCudaErrors(hipMalloc((void**) &devPtr, size)); sdkStopTimer(&timer); printf("allocate device dst memory time elpse %3.1f ms\n", sdkGetTimerValue(&timer)); sdkStartTimer(&timer); checkCudaErrors(hipMemcpy(devPtr, image.data, size, hipMemcpyHostToDevice)); sdkStopTimer(&timer); printf("hipMemcpyHostToDevice time elpse %3.1f ms\n", sdkGetTimerValue(&timer)); sdkStartTimer(&timer); checkCudaErrors(hipMemcpy(dst_image.data,devPtr, size, hipMemcpyDeviceToHost)); sdkStopTimer(&timer); printf("hipMemcpyDeviceToHost time elpse %3.1f ms\n", sdkGetTimerValue(&timer)); sdkStartTimer(&timer); checkCudaErrors(hipDeviceSynchronize()); sdkStopTimer(&timer); printf("hipDeviceSynchronize time elpse %3.1f ms\n", sdkGetTimerValue(&timer)); sdkStartTimer(&timer); checkCudaErrors(hipFree(devPtr)); sdkStopTimer(&timer); printf("hipFree time elpse %3.1f ms\n", sdkGetTimerValue(&timer)); sdkStartTimer(&timer); checkCudaErrors(hipGetLastError()); sdkStopTimer(&timer); printf("hipGetLastError time elpse %3.1f ms\n", sdkGetTimerValue(&timer)); sdkDeleteTimer(&timer); cv::namedWindow("dst_image_Display window", cv::WINDOW_AUTOSIZE);// Create a window for display. cv::imshow("dst_image_Display window", dst_image); // Show our image inside it. cv::waitKey(0); // Wait for a keystroke in the window return true; } bool testTimer2(cv::Mat &image) { if (image.type() != CV_8UC3) { return false; } cv::Mat dst_image; TIMED("malloc host dst memory") { dst_image.create(image.rows, image.cols, CV_8UC3); } uchar *devPtr; size_t size = image.cols*image.rows*sizeof(char)* 3; TIMED("malloc device dst memory") { checkCudaErrors(hipMalloc((void**)&devPtr, size)); } TIMED("hipMemcpyHostToDevice") { checkCudaErrors(hipMemcpy(devPtr, image.data, size, hipMemcpyHostToDevice)); } TIMED("hipMemcpyDeviceToHost") { checkCudaErrors(hipMemcpy(dst_image.data, devPtr, size, hipMemcpyDeviceToHost)); checkCudaErrors(hipDeviceSynchronize()); } TIMED("hipDeviceSynchronize") { } TIMED("cudaCudaFree") { checkCudaErrors(hipFree(devPtr)); } TIMED("hipGetLastError") { checkCudaErrors(hipGetLastError()); } cv::namedWindow("dst_image_Display window", cv::WINDOW_AUTOSIZE);// Create a window for display. cv::imshow("dst_image_Display window", dst_image); // Show our image inside it. cv::waitKey(0); // Wait for a keystroke in the window return true; }
0225bc3756fdb57d829c656695619e4a492abd7d.cu
#include "functions.h" StopWatchInterface *timer = NULL; int initCUDA() { int result = 0; result = findCudaDevice(0, NULL); return result; } void cleanCUDA() { cudaDeviceReset(); } ///use to confirm myTimer is good for CUDA timing bool testTimer1(cv::Mat &image) { sdkCreateTimer(&timer); sdkStartTimer(&timer); if (image.type() != CV_8UC3) { return false; } cv::Mat dst_image(image.rows, image.cols, CV_8UC3); sdkStopTimer(&timer); //sdkResetTimer(&timer); printf("allocate host dst memory time elpse %3.1f ms\n", sdkGetTimerValue(&timer)); sdkStartTimer(&timer); uchar *devPtr; size_t size = image.cols*image.rows*sizeof(char)* 3; checkCudaErrors(cudaMalloc((void**) &devPtr, size)); sdkStopTimer(&timer); printf("allocate device dst memory time elpse %3.1f ms\n", sdkGetTimerValue(&timer)); sdkStartTimer(&timer); checkCudaErrors(cudaMemcpy(devPtr, image.data, size, cudaMemcpyHostToDevice)); sdkStopTimer(&timer); printf("cudaMemcpyHostToDevice time elpse %3.1f ms\n", sdkGetTimerValue(&timer)); sdkStartTimer(&timer); checkCudaErrors(cudaMemcpy(dst_image.data,devPtr, size, cudaMemcpyDeviceToHost)); sdkStopTimer(&timer); printf("cudaMemcpyDeviceToHost time elpse %3.1f ms\n", sdkGetTimerValue(&timer)); sdkStartTimer(&timer); checkCudaErrors(cudaDeviceSynchronize()); sdkStopTimer(&timer); printf("cudaDeviceSynchronize time elpse %3.1f ms\n", sdkGetTimerValue(&timer)); sdkStartTimer(&timer); checkCudaErrors(cudaFree(devPtr)); sdkStopTimer(&timer); printf("cudaFree time elpse %3.1f ms\n", sdkGetTimerValue(&timer)); sdkStartTimer(&timer); checkCudaErrors(cudaGetLastError()); sdkStopTimer(&timer); printf("cudaGetLastError time elpse %3.1f ms\n", sdkGetTimerValue(&timer)); sdkDeleteTimer(&timer); cv::namedWindow("dst_image_Display window", cv::WINDOW_AUTOSIZE);// Create a window for display. cv::imshow("dst_image_Display window", dst_image); // Show our image inside it. cv::waitKey(0); // Wait for a keystroke in the window return true; } bool testTimer2(cv::Mat &image) { if (image.type() != CV_8UC3) { return false; } cv::Mat dst_image; TIMED("malloc host dst memory") { dst_image.create(image.rows, image.cols, CV_8UC3); } uchar *devPtr; size_t size = image.cols*image.rows*sizeof(char)* 3; TIMED("malloc device dst memory") { checkCudaErrors(cudaMalloc((void**)&devPtr, size)); } TIMED("cudaMemcpyHostToDevice") { checkCudaErrors(cudaMemcpy(devPtr, image.data, size, cudaMemcpyHostToDevice)); } TIMED("cudaMemcpyDeviceToHost") { checkCudaErrors(cudaMemcpy(dst_image.data, devPtr, size, cudaMemcpyDeviceToHost)); checkCudaErrors(cudaDeviceSynchronize()); } TIMED("cudaDeviceSynchronize") { } TIMED("cudaCudaFree") { checkCudaErrors(cudaFree(devPtr)); } TIMED("cudaGetLastError") { checkCudaErrors(cudaGetLastError()); } cv::namedWindow("dst_image_Display window", cv::WINDOW_AUTOSIZE);// Create a window for display. cv::imshow("dst_image_Display window", dst_image); // Show our image inside it. cv::waitKey(0); // Wait for a keystroke in the window return true; }
6f9c408a6d5557e69b8e0f457009d7c8198fcbfc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <math.h> #include <string.h> #include <opencv2/opencv.hpp> #include "convolution.h" #include "helpers.h" using namespace std; using namespace cv; void testConvolution() { cv::Mat img = getRawImage("./Lena.pgm"); img.convertTo(img, CV_32FC1); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 2, 2, "global_only", true, "results/kernel2x2_size2x2_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 3, 2, "global_only", true, "results/kernel2x2_size3x3_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 4, 2, "global_only", true, "results/kernel2x2_size4x4_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 5, 2, "global_only", true, "results/kernel2x2_size5x5_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 6, 2, "global_only", true, "results/kernel2x2_size6x6_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 7, 2, "global_only", true, "results/kernel2x2_size7x7_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 8, 2, "global_only", true, "results/kernel2x2_size8x8_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 3, 3, "global_only", true, "results/kernel3x3_size3x3_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 4, 3, "global_only", true, "results/kernel3x3_size4x4_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 5, 3, "global_only", true, "results/kernel3x3_size5x5_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 6, 3, "global_only", true, "results/kernel3x3_size6x6_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 7, 3, "global_only", true, "results/kernel3x3_size7x7_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 8, 3, "global_only", true, "results/kernel3x3_size8x8_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 9, 3, "global_only", true, "results/kernel3x3_size9x9_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 4, 4, "global_only", true, "results/kernel4x4_size4x4_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 5, 4, "global_only", true, "results/kernel4x4_size5x5_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 6, 4, "global_only", true, "results/kernel4x4_size6x6_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 7, 4, "global_only", true, "results/kernel4x4_size7x7_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 8, 4, "global_only", true, "results/kernel4x4_size8x8_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 9, 4, "global_only", true, "results/kernel4x4_size9x9_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 10, 4, "global_only", true, "results/kernel4x4_size10x10_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 5, 5, "global_only", true, "results/kernel5x5_size5x5_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 6, 5, "global_only", true, "results/kernel5x5_size6x6_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 7, 5, "global_only", true, "results/kernel5x5_size7x7_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 8, 5, "global_only", true, "results/kernel5x5_size8x8_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 9, 5, "global_only", true, "results/kernel5x5_size9x9_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 10, 5, "global_only", true, "results/kernel5x5_size10x10_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 11, 5, "global_only", true, "results/kernel5x5_size11x11_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 6, 6, "global_only", true, "results/kernel6x6_size6x6_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 7, 6, "global_only", true, "results/kernel6x6_size7x7_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 8, 6, "global_only", true, "results/kernel6x6_size8x8_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 9, 6, "global_only", true, "results/kernel6x6_size9x9_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 10, 6, "global_only", true, "results/kernel6x6_size10x10_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 11, 6, "global_only", true, "results/kernel6x6_size11x11_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 12, 6, "global_only", true, "results/kernel6x6_size12x12_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 7, 7, "global_only", true, "results/kernel7x7_size7x7_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 8, 7, "global_only", true, "results/kernel7x7_size8x8_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 9, 7, "global_only", true, "results/kernel7x7_size9x9_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 10, 7, "global_only", true, "results/kernel7x7_size10x10_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 11, 7, "global_only", true, "results/kernel7x7_size11x11_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 12, 7, "global_only", true, "results/kernel7x7_size12x12_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 13, 7, "global_only", true, "results/kernel7x7_size13x13_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 2, 2, "global_register", true, "results/kernel2x2_size2x2_global_register.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 3, 2, "global_register", true, "results/kernel2x2_size3x3_global_register.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 4, 2, "global_register", true, "results/kernel2x2_size4x4_global_register.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 5, 2, "global_register", true, "results/kernel2x2_size5x5_global_register.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 6, 2, "global_register", true, "results/kernel2x2_size6x6_global_register.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 7, 2, "global_register", true, "results/kernel2x2_size7x7_global_register.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 3, 3, "global_register", true, "results/kernel3x3_size3x3_global_register.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 4, 3, "global_register", true, "results/kernel3x3_size4x4_global_register.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 5, 3, "global_register", true, "results/kernel3x3_size5x5_global_register.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 6, 3, "global_register", true, "results/kernel3x3_size6x6_global_register.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 7, 3, "global_register", true, "results/kernel3x3_size7x7_global_register.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 4, 4, "global_register", true, "results/kernel4x4_size4x4_global_register.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 5, 4, "global_register", true, "results/kernel4x4_size5x5_global_register.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 6, 4, "global_register", true, "results/kernel4x4_size6x6_global_register.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 7, 4, "global_register", true, "results/kernel4x4_size7x7_global_register.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 5, 5, "global_register", true, "results/kernel5x5_size5x5_global_register.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 6, 5, "global_register", true, "results/kernel5x5_size6x6_global_register.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 7, 5, "global_register", true, "results/kernel5x5_size7x7_global_register.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 6, 6, "global_register", true, "results/kernel6x6_size6x6_global_register.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 7, 6, "global_register", true, "results/kernel6x6_size7x7_global_register.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 7, 7, "global_register", true, "results/kernel7x7_size7x7_global_register.png"); convolutionWrapper_texCache((float*)&img.data[0], img.cols, img.rows, 2, 2, "texCache_only", true, "results/kernel2x2_size2x2_texCache_only.png"); convolutionWrapper_texCache((float*)&img.data[0], img.cols, img.rows, 3, 3, "texCache_only", true, "results/kernel3x3_size3x3_texCache_only.png"); convolutionWrapper_texCache((float*)&img.data[0], img.cols, img.rows, 4, 4, "texCache_only", true, "results/kernel4x4_size4x4_texCache_only.png"); convolutionWrapper_texCache((float*)&img.data[0], img.cols, img.rows, 5, 5, "texCache_only", true, "results/kernel5x5_size5x5_texCache_only.png"); convolutionWrapper_texCache((float*)&img.data[0], img.cols, img.rows, 6, 6, "texCache_only", true, "results/kernel6x6_size6x6_texCache_only.png"); convolutionWrapper_texCache((float*)&img.data[0], img.cols, img.rows, 7, 7, "texCache_only", true, "results/kernel7x7_size7x7_texCache_only.png"); convolutionWrapper_texCache((float*)&img.data[0], img.cols, img.rows, 2, 2, "texCache_register", true, "results/kernel2x2_size2x2_texCache_register.png"); convolutionWrapper_texCache((float*)&img.data[0], img.cols, img.rows, 3, 2, "texCache_register", true, "results/kernel2x2_size3x3_texCache_register.png"); convolutionWrapper_texCache((float*)&img.data[0], img.cols, img.rows, 4, 2, "texCache_register", true, "results/kernel2x2_size4x4_texCache_register.png"); convolutionWrapper_texCache((float*)&img.data[0], img.cols, img.rows, 5, 2, "texCache_register", true, "results/kernel2x2_size5x5_texCache_register.png"); convolutionWrapper_texCache((float*)&img.data[0], img.cols, img.rows, 6, 2, "texCache_register", true, "results/kernel2x2_size6x6_texCache_register.png"); convolutionWrapper_texCache((float*)&img.data[0], img.cols, img.rows, 7, 2, "texCache_register", true, "results/kernel2x2_size7x7_texCache_register.png"); convolutionWrapper_texCache((float*)&img.data[0], img.cols, img.rows, 3, 3, "texCache_register", true, "results/kernel3x3_size3x3_texCache_register.png"); convolutionWrapper_texCache((float*)&img.data[0], img.cols, img.rows, 4, 3, "texCache_register", true, "results/kernel3x3_size4x4_texCache_register.png"); convolutionWrapper_texCache((float*)&img.data[0], img.cols, img.rows, 5, 3, "texCache_register", true, "results/kernel3x3_size5x5_texCache_register.png"); convolutionWrapper_texCache((float*)&img.data[0], img.cols, img.rows, 6, 3, "texCache_register", true, "results/kernel3x3_size6x6_texCache_register.png"); convolutionWrapper_texCache((float*)&img.data[0], img.cols, img.rows, 7, 3, "texCache_register", true, "results/kernel3x3_size7x7_texCache_register.png"); convolutionWrapper_texCache((float*)&img.data[0], img.cols, img.rows, 4, 4, "texCache_register", true, "results/kernel4x4_size4x4_texCache_register.png"); convolutionWrapper_texCache((float*)&img.data[0], img.cols, img.rows, 5, 4, "texCache_register", true, "results/kernel4x4_size5x5_texCache_register.png"); convolutionWrapper_texCache((float*)&img.data[0], img.cols, img.rows, 6, 4, "texCache_register", true, "results/kernel4x4_size6x6_texCache_register.png"); convolutionWrapper_texCache((float*)&img.data[0], img.cols, img.rows, 7, 4, "texCache_register", true, "results/kernel4x4_size7x7_texCache_register.png"); convolutionWrapper_texCache((float*)&img.data[0], img.cols, img.rows, 5, 5, "texCache_register", true, "results/kernel5x5_size5x5_texCache_register.png"); convolutionWrapper_texCache((float*)&img.data[0], img.cols, img.rows, 6, 5, "texCache_register", true, "results/kernel5x5_size6x6_texCache_register.png"); convolutionWrapper_texCache((float*)&img.data[0], img.cols, img.rows, 7, 5, "texCache_register", true, "results/kernel5x5_size7x7_texCache_register.png"); convolutionWrapper_texCache((float*)&img.data[0], img.cols, img.rows, 6, 6, "texCache_register", true, "results/kernel6x6_size6x6_texCache_register.png"); convolutionWrapper_texCache((float*)&img.data[0], img.cols, img.rows, 7, 6, "texCache_register", true, "results/kernel6x6_size7x7_texCache_register.png"); convolutionWrapper_texCache((float*)&img.data[0], img.cols, img.rows, 7, 7, "texCache_register", true, "results/kernel7x7_size7x7_texCache_register.png"); } void testConvolution_withDummyImg(int height, int width) { float* img = getDummyImg(height, width); FILE * pFile = fopen("perf.txt", "w"); fprintf(pFile, "kernelSize amountToLoad memoryScheme responseTime\n"); int nRuns = 10; float responseTime = 0; responseTime = convolutionWrapper(img, width, height, 3, 3, "global_register", false); //warmup printf("memoryScheme = %s \n", "global_only"); for(int kernelSize=2; kernelSize<8; kernelSize++) { for(int sqrtConvsPerThread=1; sqrtConvsPerThread<8; sqrtConvsPerThread++) { int amountToLoad = sqrtConvsPerThread+kernelSize-1; //actually, prefetching nothing in this version responseTime = 0; for(int i=0; i<nRuns; i++) { float tmpTime = convolutionWrapper(img, width, height, amountToLoad, kernelSize, "global_only", false); responseTime += tmpTime; } responseTime = responseTime/nRuns; fprintf(pFile, "%d, %d, %s, %f \n", kernelSize, amountToLoad, "global_only", responseTime); printf("kernelSize = %d. amountToLoad = %d. time per Convolution = %f seconds \n", kernelSize, amountToLoad, responseTime); hipDeviceSynchronize(); } printf("\n"); } printf("memoryScheme = %s \n", "global_register"); for(int kernelSize=2; kernelSize<8; kernelSize++) { for(int amountToLoad=kernelSize; amountToLoad<8; amountToLoad++) { responseTime = 0; for(int i=0; i<nRuns; i++) { float tmpTime = convolutionWrapper(img, width, height, amountToLoad, kernelSize, "global_register", false); responseTime += tmpTime; } responseTime = responseTime/nRuns; fprintf(pFile, "%d, %d, %s, %f \n", kernelSize, amountToLoad, "global_register", responseTime); printf("kernelSize = %d. amountToLoad = %d. time per Convolution = %f seconds \n", kernelSize, amountToLoad, responseTime); hipDeviceSynchronize(); } printf("\n"); } printf("memoryScheme = %s \n", "texCache_only"); for(int kernelSize=2; kernelSize<8; kernelSize++) { int amountToLoad = kernelSize; responseTime = 0; for(int i=0; i<nRuns; i++) { float tmpTime = convolutionWrapper_texCache(img, width, height, amountToLoad, kernelSize, "texCache_only", false); responseTime += tmpTime; } responseTime = responseTime/nRuns; fprintf(pFile, "%d, %d, %s, %f \n", kernelSize, amountToLoad, "texCache_only", responseTime); printf("kernelSize = %d. amountToLoad = %d. time per Convolution = %f seconds \n", kernelSize, amountToLoad, responseTime); hipDeviceSynchronize(); printf("\n"); } printf("memoryScheme = %s \n", "texCache_register"); for(int kernelSize=2; kernelSize<8; kernelSize++) { for(int amountToLoad=kernelSize; amountToLoad<8; amountToLoad++) { responseTime = 0; for(int i=0; i<nRuns; i++) { float tmpTime = convolutionWrapper_texCache(img, width, height, amountToLoad, kernelSize, "texCache_register", false); responseTime += tmpTime; } responseTime = responseTime/nRuns; fprintf(pFile, "%d, %d, %s, %f \n", kernelSize, amountToLoad, "texCache_register", responseTime); printf("kernelSize = %d. amountToLoad = %d. time per Convolution = %f seconds \n", kernelSize, amountToLoad, responseTime); hipDeviceSynchronize(); } printf("\n"); } fclose(pFile); }
6f9c408a6d5557e69b8e0f457009d7c8198fcbfc.cu
#include "cuda.h" #include <stdio.h> #include <stdlib.h> #include <math.h> #include <string.h> #include <opencv2/opencv.hpp> #include "convolution.h" #include "helpers.h" using namespace std; using namespace cv; void testConvolution() { cv::Mat img = getRawImage("./Lena.pgm"); img.convertTo(img, CV_32FC1); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 2, 2, "global_only", true, "results/kernel2x2_size2x2_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 3, 2, "global_only", true, "results/kernel2x2_size3x3_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 4, 2, "global_only", true, "results/kernel2x2_size4x4_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 5, 2, "global_only", true, "results/kernel2x2_size5x5_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 6, 2, "global_only", true, "results/kernel2x2_size6x6_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 7, 2, "global_only", true, "results/kernel2x2_size7x7_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 8, 2, "global_only", true, "results/kernel2x2_size8x8_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 3, 3, "global_only", true, "results/kernel3x3_size3x3_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 4, 3, "global_only", true, "results/kernel3x3_size4x4_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 5, 3, "global_only", true, "results/kernel3x3_size5x5_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 6, 3, "global_only", true, "results/kernel3x3_size6x6_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 7, 3, "global_only", true, "results/kernel3x3_size7x7_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 8, 3, "global_only", true, "results/kernel3x3_size8x8_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 9, 3, "global_only", true, "results/kernel3x3_size9x9_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 4, 4, "global_only", true, "results/kernel4x4_size4x4_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 5, 4, "global_only", true, "results/kernel4x4_size5x5_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 6, 4, "global_only", true, "results/kernel4x4_size6x6_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 7, 4, "global_only", true, "results/kernel4x4_size7x7_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 8, 4, "global_only", true, "results/kernel4x4_size8x8_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 9, 4, "global_only", true, "results/kernel4x4_size9x9_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 10, 4, "global_only", true, "results/kernel4x4_size10x10_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 5, 5, "global_only", true, "results/kernel5x5_size5x5_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 6, 5, "global_only", true, "results/kernel5x5_size6x6_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 7, 5, "global_only", true, "results/kernel5x5_size7x7_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 8, 5, "global_only", true, "results/kernel5x5_size8x8_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 9, 5, "global_only", true, "results/kernel5x5_size9x9_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 10, 5, "global_only", true, "results/kernel5x5_size10x10_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 11, 5, "global_only", true, "results/kernel5x5_size11x11_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 6, 6, "global_only", true, "results/kernel6x6_size6x6_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 7, 6, "global_only", true, "results/kernel6x6_size7x7_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 8, 6, "global_only", true, "results/kernel6x6_size8x8_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 9, 6, "global_only", true, "results/kernel6x6_size9x9_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 10, 6, "global_only", true, "results/kernel6x6_size10x10_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 11, 6, "global_only", true, "results/kernel6x6_size11x11_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 12, 6, "global_only", true, "results/kernel6x6_size12x12_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 7, 7, "global_only", true, "results/kernel7x7_size7x7_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 8, 7, "global_only", true, "results/kernel7x7_size8x8_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 9, 7, "global_only", true, "results/kernel7x7_size9x9_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 10, 7, "global_only", true, "results/kernel7x7_size10x10_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 11, 7, "global_only", true, "results/kernel7x7_size11x11_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 12, 7, "global_only", true, "results/kernel7x7_size12x12_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 13, 7, "global_only", true, "results/kernel7x7_size13x13_global_only.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 2, 2, "global_register", true, "results/kernel2x2_size2x2_global_register.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 3, 2, "global_register", true, "results/kernel2x2_size3x3_global_register.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 4, 2, "global_register", true, "results/kernel2x2_size4x4_global_register.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 5, 2, "global_register", true, "results/kernel2x2_size5x5_global_register.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 6, 2, "global_register", true, "results/kernel2x2_size6x6_global_register.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 7, 2, "global_register", true, "results/kernel2x2_size7x7_global_register.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 3, 3, "global_register", true, "results/kernel3x3_size3x3_global_register.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 4, 3, "global_register", true, "results/kernel3x3_size4x4_global_register.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 5, 3, "global_register", true, "results/kernel3x3_size5x5_global_register.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 6, 3, "global_register", true, "results/kernel3x3_size6x6_global_register.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 7, 3, "global_register", true, "results/kernel3x3_size7x7_global_register.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 4, 4, "global_register", true, "results/kernel4x4_size4x4_global_register.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 5, 4, "global_register", true, "results/kernel4x4_size5x5_global_register.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 6, 4, "global_register", true, "results/kernel4x4_size6x6_global_register.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 7, 4, "global_register", true, "results/kernel4x4_size7x7_global_register.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 5, 5, "global_register", true, "results/kernel5x5_size5x5_global_register.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 6, 5, "global_register", true, "results/kernel5x5_size6x6_global_register.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 7, 5, "global_register", true, "results/kernel5x5_size7x7_global_register.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 6, 6, "global_register", true, "results/kernel6x6_size6x6_global_register.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 7, 6, "global_register", true, "results/kernel6x6_size7x7_global_register.png"); convolutionWrapper((float*)&img.data[0], img.cols, img.rows, 7, 7, "global_register", true, "results/kernel7x7_size7x7_global_register.png"); convolutionWrapper_texCache((float*)&img.data[0], img.cols, img.rows, 2, 2, "texCache_only", true, "results/kernel2x2_size2x2_texCache_only.png"); convolutionWrapper_texCache((float*)&img.data[0], img.cols, img.rows, 3, 3, "texCache_only", true, "results/kernel3x3_size3x3_texCache_only.png"); convolutionWrapper_texCache((float*)&img.data[0], img.cols, img.rows, 4, 4, "texCache_only", true, "results/kernel4x4_size4x4_texCache_only.png"); convolutionWrapper_texCache((float*)&img.data[0], img.cols, img.rows, 5, 5, "texCache_only", true, "results/kernel5x5_size5x5_texCache_only.png"); convolutionWrapper_texCache((float*)&img.data[0], img.cols, img.rows, 6, 6, "texCache_only", true, "results/kernel6x6_size6x6_texCache_only.png"); convolutionWrapper_texCache((float*)&img.data[0], img.cols, img.rows, 7, 7, "texCache_only", true, "results/kernel7x7_size7x7_texCache_only.png"); convolutionWrapper_texCache((float*)&img.data[0], img.cols, img.rows, 2, 2, "texCache_register", true, "results/kernel2x2_size2x2_texCache_register.png"); convolutionWrapper_texCache((float*)&img.data[0], img.cols, img.rows, 3, 2, "texCache_register", true, "results/kernel2x2_size3x3_texCache_register.png"); convolutionWrapper_texCache((float*)&img.data[0], img.cols, img.rows, 4, 2, "texCache_register", true, "results/kernel2x2_size4x4_texCache_register.png"); convolutionWrapper_texCache((float*)&img.data[0], img.cols, img.rows, 5, 2, "texCache_register", true, "results/kernel2x2_size5x5_texCache_register.png"); convolutionWrapper_texCache((float*)&img.data[0], img.cols, img.rows, 6, 2, "texCache_register", true, "results/kernel2x2_size6x6_texCache_register.png"); convolutionWrapper_texCache((float*)&img.data[0], img.cols, img.rows, 7, 2, "texCache_register", true, "results/kernel2x2_size7x7_texCache_register.png"); convolutionWrapper_texCache((float*)&img.data[0], img.cols, img.rows, 3, 3, "texCache_register", true, "results/kernel3x3_size3x3_texCache_register.png"); convolutionWrapper_texCache((float*)&img.data[0], img.cols, img.rows, 4, 3, "texCache_register", true, "results/kernel3x3_size4x4_texCache_register.png"); convolutionWrapper_texCache((float*)&img.data[0], img.cols, img.rows, 5, 3, "texCache_register", true, "results/kernel3x3_size5x5_texCache_register.png"); convolutionWrapper_texCache((float*)&img.data[0], img.cols, img.rows, 6, 3, "texCache_register", true, "results/kernel3x3_size6x6_texCache_register.png"); convolutionWrapper_texCache((float*)&img.data[0], img.cols, img.rows, 7, 3, "texCache_register", true, "results/kernel3x3_size7x7_texCache_register.png"); convolutionWrapper_texCache((float*)&img.data[0], img.cols, img.rows, 4, 4, "texCache_register", true, "results/kernel4x4_size4x4_texCache_register.png"); convolutionWrapper_texCache((float*)&img.data[0], img.cols, img.rows, 5, 4, "texCache_register", true, "results/kernel4x4_size5x5_texCache_register.png"); convolutionWrapper_texCache((float*)&img.data[0], img.cols, img.rows, 6, 4, "texCache_register", true, "results/kernel4x4_size6x6_texCache_register.png"); convolutionWrapper_texCache((float*)&img.data[0], img.cols, img.rows, 7, 4, "texCache_register", true, "results/kernel4x4_size7x7_texCache_register.png"); convolutionWrapper_texCache((float*)&img.data[0], img.cols, img.rows, 5, 5, "texCache_register", true, "results/kernel5x5_size5x5_texCache_register.png"); convolutionWrapper_texCache((float*)&img.data[0], img.cols, img.rows, 6, 5, "texCache_register", true, "results/kernel5x5_size6x6_texCache_register.png"); convolutionWrapper_texCache((float*)&img.data[0], img.cols, img.rows, 7, 5, "texCache_register", true, "results/kernel5x5_size7x7_texCache_register.png"); convolutionWrapper_texCache((float*)&img.data[0], img.cols, img.rows, 6, 6, "texCache_register", true, "results/kernel6x6_size6x6_texCache_register.png"); convolutionWrapper_texCache((float*)&img.data[0], img.cols, img.rows, 7, 6, "texCache_register", true, "results/kernel6x6_size7x7_texCache_register.png"); convolutionWrapper_texCache((float*)&img.data[0], img.cols, img.rows, 7, 7, "texCache_register", true, "results/kernel7x7_size7x7_texCache_register.png"); } void testConvolution_withDummyImg(int height, int width) { float* img = getDummyImg(height, width); FILE * pFile = fopen("perf.txt", "w"); fprintf(pFile, "kernelSize amountToLoad memoryScheme responseTime\n"); int nRuns = 10; float responseTime = 0; responseTime = convolutionWrapper(img, width, height, 3, 3, "global_register", false); //warmup printf("memoryScheme = %s \n", "global_only"); for(int kernelSize=2; kernelSize<8; kernelSize++) { for(int sqrtConvsPerThread=1; sqrtConvsPerThread<8; sqrtConvsPerThread++) { int amountToLoad = sqrtConvsPerThread+kernelSize-1; //actually, prefetching nothing in this version responseTime = 0; for(int i=0; i<nRuns; i++) { float tmpTime = convolutionWrapper(img, width, height, amountToLoad, kernelSize, "global_only", false); responseTime += tmpTime; } responseTime = responseTime/nRuns; fprintf(pFile, "%d, %d, %s, %f \n", kernelSize, amountToLoad, "global_only", responseTime); printf("kernelSize = %d. amountToLoad = %d. time per Convolution = %f seconds \n", kernelSize, amountToLoad, responseTime); cudaDeviceSynchronize(); } printf("\n"); } printf("memoryScheme = %s \n", "global_register"); for(int kernelSize=2; kernelSize<8; kernelSize++) { for(int amountToLoad=kernelSize; amountToLoad<8; amountToLoad++) { responseTime = 0; for(int i=0; i<nRuns; i++) { float tmpTime = convolutionWrapper(img, width, height, amountToLoad, kernelSize, "global_register", false); responseTime += tmpTime; } responseTime = responseTime/nRuns; fprintf(pFile, "%d, %d, %s, %f \n", kernelSize, amountToLoad, "global_register", responseTime); printf("kernelSize = %d. amountToLoad = %d. time per Convolution = %f seconds \n", kernelSize, amountToLoad, responseTime); cudaDeviceSynchronize(); } printf("\n"); } printf("memoryScheme = %s \n", "texCache_only"); for(int kernelSize=2; kernelSize<8; kernelSize++) { int amountToLoad = kernelSize; responseTime = 0; for(int i=0; i<nRuns; i++) { float tmpTime = convolutionWrapper_texCache(img, width, height, amountToLoad, kernelSize, "texCache_only", false); responseTime += tmpTime; } responseTime = responseTime/nRuns; fprintf(pFile, "%d, %d, %s, %f \n", kernelSize, amountToLoad, "texCache_only", responseTime); printf("kernelSize = %d. amountToLoad = %d. time per Convolution = %f seconds \n", kernelSize, amountToLoad, responseTime); cudaDeviceSynchronize(); printf("\n"); } printf("memoryScheme = %s \n", "texCache_register"); for(int kernelSize=2; kernelSize<8; kernelSize++) { for(int amountToLoad=kernelSize; amountToLoad<8; amountToLoad++) { responseTime = 0; for(int i=0; i<nRuns; i++) { float tmpTime = convolutionWrapper_texCache(img, width, height, amountToLoad, kernelSize, "texCache_register", false); responseTime += tmpTime; } responseTime = responseTime/nRuns; fprintf(pFile, "%d, %d, %s, %f \n", kernelSize, amountToLoad, "texCache_register", responseTime); printf("kernelSize = %d. amountToLoad = %d. time per Convolution = %f seconds \n", kernelSize, amountToLoad, responseTime); cudaDeviceSynchronize(); } printf("\n"); } fclose(pFile); }
cc079a10e57feb95ffcc7e092000703a989ebc4c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> __global__ void unique_idx_calc_threadIdx(int * input) { int tid = threadIdx.x; printf("threadIdx: %d, value: %d \n", tid, input[tid]); } int main() { int array_size = 8; int array_byte_size = sizeof(int) * array_size; int h_data[] = { 23, 9, 4, 53, 65, 12, 1, 33 }; for (size_t i = 0; i < array_size; i++) { printf("%d ", h_data[i]); } printf("\n \n"); int * d_data; hipMalloc((void**) &d_data, array_byte_size); hipMemcpy(d_data, h_data, array_byte_size, hipMemcpyHostToDevice); dim3 block(4); dim3 grid(1); unique_idx_calc_threadIdx << < grid, block >> > (d_data); hipDeviceSynchronize(); hipDeviceReset(); return 0; }
cc079a10e57feb95ffcc7e092000703a989ebc4c.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> __global__ void unique_idx_calc_threadIdx(int * input) { int tid = threadIdx.x; printf("threadIdx: %d, value: %d \n", tid, input[tid]); } int main() { int array_size = 8; int array_byte_size = sizeof(int) * array_size; int h_data[] = { 23, 9, 4, 53, 65, 12, 1, 33 }; for (size_t i = 0; i < array_size; i++) { printf("%d ", h_data[i]); } printf("\n \n"); int * d_data; cudaMalloc((void**) &d_data, array_byte_size); cudaMemcpy(d_data, h_data, array_byte_size, cudaMemcpyHostToDevice); dim3 block(4); dim3 grid(1); unique_idx_calc_threadIdx << < grid, block >> > (d_data); cudaDeviceSynchronize(); cudaDeviceReset(); return 0; }
a14a2038cbcb084343d4070d0660039afafce3fb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //------------------------------------------------------------------------------------------------------------------------------------ // To compile use this following command // nvcc julia_set.cu -lglfw -Xlinker -framework,Cocoa,-framework,OpenGL,-framework,IOKit,-framework,CoreVideo // // Reference: Title: CUDA by Example: An Introduction to General-Purpose GPU Programming 1st Author: Jason Sanders, Edward Kandrot Pages: 312 Publisher: Addison-Wesley Professional 2010 ISBN: 0131387685 9780131387683 //----------------------------------------------------------------------------------------------------------------------------------- #include <GLFW/glfw3.h> #include <stdio.h> #include <stdlib.h> #define DIM 2000 struct hipComplex { float r; float i; __device__ hipComplex(float a, float b) : r(a), i(b) { } __device__ float magnitude2(void) { return r * r + i * i; } __device__ hipComplex operator*(const hipComplex& a) { return hipComplex(r * a.r - i * a.i, i * a.r + r * a.i); } __device__ hipComplex operator+(const hipComplex& a) { return hipComplex(r + a.r, i + a.i); } }; __device__ int julia(int x, int y) { const float scale = 1.5; int dim_2 = DIM / 2; float jx = scale * (float) (dim_2 - x) / dim_2; float jy = scale * (float) (dim_2 - y) / dim_2; hipComplex c(-0.8, 0.156); hipComplex a(jx, jy); for(int i = 0; i < 200; i++) { a = a * a + c; if (a.magnitude2() > 1000) { return 0; } } return 1; } __global__ void kernel(unsigned char *ptr) { int x = blockIdx.x; int y = blockIdx.y; int offset = x + y * gridDim.x; int juliavalue = julia(x, y); ptr[offset * 4 + 0] = 127 * juliavalue; ptr[offset * 4 + 1] = 255 * juliavalue; ptr[offset * 4 + 2] = 0; ptr[offset * 4 + 3] = 255; } struct CPUBitmap { unsigned char *pixels; int x, y; void *dataBlock; CPUBitmap( int width, int height, void *d = NULL ) { pixels = new unsigned char[width * height * 4]; x = width; y = height; dataBlock = d; } ~CPUBitmap() { delete [] pixels; } unsigned char* get_ptr( void ) const { return pixels; } long image_size( void ) const { return x * y * 4; } void display_and_exit() { CPUBitmap** bitmap = get_bitmap_ptr(); *bitmap = this; GLFWwindow* window; if(!glfwInit()) { printf("Failed on GLFW init\n"); exit(EXIT_FAILURE); } window = glfwCreateWindow(x, y, "Julia Set", NULL, NULL); if(!window) { glfwTerminate(); printf("Failed to create window\n"); exit(EXIT_FAILURE); } glfwMakeContextCurrent(window); glfwSetKeyCallback(window, key_callback); while(!glfwWindowShouldClose(window)) { CPUBitmap* bitmap = *(get_bitmap_ptr()); glClearColor( 0.0, 0.0, 0.0, 1.0 ); glClear( GL_COLOR_BUFFER_BIT ); glDrawPixels(bitmap->x, bitmap->y, GL_RGBA, GL_UNSIGNED_BYTE, bitmap->pixels); glfwSwapBuffers(window); glfwPollEvents(); } glfwTerminate(); } // static method used for glut callbacks static CPUBitmap** get_bitmap_ptr( void ) { static CPUBitmap *gBitmap; return &gBitmap; } static void key_callback(GLFWwindow* window, int key, int scancode, int action, int mods) { if (key == GLFW_KEY_ESCAPE && action == GLFW_PRESS){ glfwSetWindowShouldClose(window, GL_TRUE); glfwTerminate(); exit(EXIT_SUCCESS); } } }; void handleError(hipError_t error, int lineNo) { if(error != hipSuccess) { printf("Error: %s\n in file %s at line no %d\n", hipGetErrorString(error), __FILE__, __LINE__); exit(EXIT_FAILURE); } } int main(int argc, char *argv[]) { CPUBitmap bitmap(DIM, DIM); unsigned char *dev_bitmap; handleError(hipMalloc((void **) &dev_bitmap, bitmap.image_size()), __LINE__); dim3 grid(DIM, DIM); hipLaunchKernelGGL(( kernel), dim3(grid),dim3(1), 0, 0, dev_bitmap); handleError(hipMemcpy(bitmap.get_ptr(), dev_bitmap,bitmap.image_size(), hipMemcpyDeviceToHost), __LINE__); bitmap.display_and_exit(); handleError(hipFree(dev_bitmap), __LINE__); return 0; }
a14a2038cbcb084343d4070d0660039afafce3fb.cu
//------------------------------------------------------------------------------------------------------------------------------------ // To compile use this following command // nvcc julia_set.cu -lglfw -Xlinker -framework,Cocoa,-framework,OpenGL,-framework,IOKit,-framework,CoreVideo // // Reference: Title: CUDA by Example: An Introduction to General-Purpose GPU Programming 1st Author: Jason Sanders, Edward Kandrot Pages: 312 Publisher: Addison-Wesley Professional ©2010 ISBN: 0131387685 9780131387683 //----------------------------------------------------------------------------------------------------------------------------------- #include <GLFW/glfw3.h> #include <stdio.h> #include <stdlib.h> #define DIM 2000 struct cuComplex { float r; float i; __device__ cuComplex(float a, float b) : r(a), i(b) { } __device__ float magnitude2(void) { return r * r + i * i; } __device__ cuComplex operator*(const cuComplex& a) { return cuComplex(r * a.r - i * a.i, i * a.r + r * a.i); } __device__ cuComplex operator+(const cuComplex& a) { return cuComplex(r + a.r, i + a.i); } }; __device__ int julia(int x, int y) { const float scale = 1.5; int dim_2 = DIM / 2; float jx = scale * (float) (dim_2 - x) / dim_2; float jy = scale * (float) (dim_2 - y) / dim_2; cuComplex c(-0.8, 0.156); cuComplex a(jx, jy); for(int i = 0; i < 200; i++) { a = a * a + c; if (a.magnitude2() > 1000) { return 0; } } return 1; } __global__ void kernel(unsigned char *ptr) { int x = blockIdx.x; int y = blockIdx.y; int offset = x + y * gridDim.x; int juliavalue = julia(x, y); ptr[offset * 4 + 0] = 127 * juliavalue; ptr[offset * 4 + 1] = 255 * juliavalue; ptr[offset * 4 + 2] = 0; ptr[offset * 4 + 3] = 255; } struct CPUBitmap { unsigned char *pixels; int x, y; void *dataBlock; CPUBitmap( int width, int height, void *d = NULL ) { pixels = new unsigned char[width * height * 4]; x = width; y = height; dataBlock = d; } ~CPUBitmap() { delete [] pixels; } unsigned char* get_ptr( void ) const { return pixels; } long image_size( void ) const { return x * y * 4; } void display_and_exit() { CPUBitmap** bitmap = get_bitmap_ptr(); *bitmap = this; GLFWwindow* window; if(!glfwInit()) { printf("Failed on GLFW init\n"); exit(EXIT_FAILURE); } window = glfwCreateWindow(x, y, "Julia Set", NULL, NULL); if(!window) { glfwTerminate(); printf("Failed to create window\n"); exit(EXIT_FAILURE); } glfwMakeContextCurrent(window); glfwSetKeyCallback(window, key_callback); while(!glfwWindowShouldClose(window)) { CPUBitmap* bitmap = *(get_bitmap_ptr()); glClearColor( 0.0, 0.0, 0.0, 1.0 ); glClear( GL_COLOR_BUFFER_BIT ); glDrawPixels(bitmap->x, bitmap->y, GL_RGBA, GL_UNSIGNED_BYTE, bitmap->pixels); glfwSwapBuffers(window); glfwPollEvents(); } glfwTerminate(); } // static method used for glut callbacks static CPUBitmap** get_bitmap_ptr( void ) { static CPUBitmap *gBitmap; return &gBitmap; } static void key_callback(GLFWwindow* window, int key, int scancode, int action, int mods) { if (key == GLFW_KEY_ESCAPE && action == GLFW_PRESS){ glfwSetWindowShouldClose(window, GL_TRUE); glfwTerminate(); exit(EXIT_SUCCESS); } } }; void handleError(cudaError_t error, int lineNo) { if(error != cudaSuccess) { printf("Error: %s\n in file %s at line no %d\n", cudaGetErrorString(error), __FILE__, __LINE__); exit(EXIT_FAILURE); } } int main(int argc, char *argv[]) { CPUBitmap bitmap(DIM, DIM); unsigned char *dev_bitmap; handleError(cudaMalloc((void **) &dev_bitmap, bitmap.image_size()), __LINE__); dim3 grid(DIM, DIM); kernel<<<grid,1>>>(dev_bitmap); handleError(cudaMemcpy(bitmap.get_ptr(), dev_bitmap,bitmap.image_size(), cudaMemcpyDeviceToHost), __LINE__); bitmap.display_and_exit(); handleError(cudaFree(dev_bitmap), __LINE__); return 0; }
14dbf5b3f7fb5ebaf2ec6c853b9615fec89981c4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <torch/serialize/tensor.h> #include <ATen/ATen.h> #include "ATen/NativeFunctions.h" #include <ATen/hip/HIPContext.h> template <typename scalar> __device__ __forceinline__ scalar fmin(scalar a, scalar b) { return a > b ? b : a; } template <typename scalar> __device__ __forceinline__ scalar fmax(scalar a, scalar b) { return a > b ? a : b; } template <typename scalar> __device__ __forceinline__ scalar IoU(const scalar* box_x, const scalar* box_y) { // Calculate IoU between the boxes. scalar rightmost_l = fmax(box_x[0], box_y[0]); scalar leftmost_r = fmin(box_x[0] + box_x[2], box_y[0] + box_y[2]); scalar delta_x = fmax((scalar)0., leftmost_r - rightmost_l); scalar bottommost_tp = fmax(box_x[1], box_y[1]); scalar topmost_b = fmin(box_x[1] + box_x[3], box_y[1] + box_y[3]); scalar delta_y = fmax((scalar)0., topmost_b - bottommost_tp); scalar uni = box_x[2] * box_x[3] + box_y[2] * box_y[3]; return delta_x * delta_y / (uni - delta_x * delta_y); } template <typename scalar> __global__ void nms_kernel(unsigned char* mask, const scalar* boxes, const int64_t* inds, const int64_t num_boxes, double thresh) { //A pretty straightforward implementation, analogous to the standard serial //version but with the IoUs computed and mask updated in parallel. We access //the box data through an array of sorted indices rather than physically //sorting it: unless one has an inordinate number of boxes (O(10^5), whereas //for example in the faster rcnn paper they feed 6000 per batch) the //data will fit in L2 so sorting it won't actually reduce the number of //messy reads from global. int col = 0; while(col < num_boxes-1) { for(int i = threadIdx.x; i < num_boxes-1; i+=blockDim.x) if(i >= col) { scalar iou = IoU(&boxes[4*inds[i+1+num_boxes*blockIdx.x] + 4*num_boxes*blockIdx.x], &boxes[4*inds[col+num_boxes*blockIdx.x] + 4*num_boxes*blockIdx.x]); mask[i+1+blockIdx.x*num_boxes] *= (iou>thresh) ? 0 : 1; } __syncthreads(); ++col; while((col < num_boxes - 1) && (mask[col+blockIdx.x*num_boxes]==0)) ++col; } } std::vector<at::Tensor> Non_Max_Suppression_CUDA( const at::Tensor& input, const at::Tensor& scores, double thresh) { AT_ASSERT(input.ndimension() == 3); AT_ASSERT(scores.ndimension() == 2); AT_ASSERT(input.size(0) == scores.size(0)); AT_ASSERT(input.size(1) == scores.size(1)); AT_ASSERT(input.size(2) == 4); AT_ASSERT(input.is_contiguous()); AT_ASSERT(scores.is_contiguous()); AT_ASSERT(input.type().scalarType() == at::kFloat || input.type().scalarType() == at::kDouble) AT_ASSERT(scores.type().scalarType() == at::kFloat || scores.type().scalarType() == at::kDouble) auto num_boxes = input.size(1); auto batch_size = input.size(0); //auto mask = input.type().toScalarType(at::kByte).tensor({batch_size, num_boxes}); auto mask = torch::zeros({batch_size, num_boxes}, input.type().toScalarType(at::kByte)); mask.fill_(1); //need the indices of the boxes sorted by score. at::Tensor sorted_inds = std::get<1>(scores.sort(-1, true)); dim3 mask_block(512); //would be nice to have 1024 here for gpus that support it, //but not sure how to do this cleanly without calling //hipGetDeviceProperties in the funcion body... dim3 mask_grid(batch_size); if(input.type().scalarType() == at::kFloat) { hipLaunchKernelGGL(( nms_kernel), dim3(mask_grid), dim3(mask_block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), mask.data<unsigned char>(), input.data<float>(), sorted_inds.data<int64_t>(), num_boxes, thresh); AT_ASSERT(hipGetLastError() == hipSuccess); } else { hipLaunchKernelGGL(( nms_kernel), dim3(mask_grid), dim3(mask_block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), mask.data<unsigned char>(), input.data<double>(), sorted_inds.data<int64_t>(), num_boxes, thresh); AT_ASSERT(hipGetLastError() == hipSuccess); } //It's not entirely clear what the best thing to return is here. The algorithm will //produce a different number of boxes for each batch, so there is no obvious way of //way of returning the surving boxes/indices as a tensor. Returning a mask on the //sorted boxes together with the sorted indices seems reasonable; that way, the user //can easily take the N highest-scoring surviving boxes to form a tensor if they wish. return {mask, sorted_inds}; }
14dbf5b3f7fb5ebaf2ec6c853b9615fec89981c4.cu
#include <torch/serialize/tensor.h> #include <ATen/ATen.h> #include "ATen/NativeFunctions.h" #include <ATen/cuda/CUDAContext.h> template <typename scalar> __device__ __forceinline__ scalar fmin(scalar a, scalar b) { return a > b ? b : a; } template <typename scalar> __device__ __forceinline__ scalar fmax(scalar a, scalar b) { return a > b ? a : b; } template <typename scalar> __device__ __forceinline__ scalar IoU(const scalar* box_x, const scalar* box_y) { // Calculate IoU between the boxes. scalar rightmost_l = fmax(box_x[0], box_y[0]); scalar leftmost_r = fmin(box_x[0] + box_x[2], box_y[0] + box_y[2]); scalar delta_x = fmax((scalar)0., leftmost_r - rightmost_l); scalar bottommost_tp = fmax(box_x[1], box_y[1]); scalar topmost_b = fmin(box_x[1] + box_x[3], box_y[1] + box_y[3]); scalar delta_y = fmax((scalar)0., topmost_b - bottommost_tp); scalar uni = box_x[2] * box_x[3] + box_y[2] * box_y[3]; return delta_x * delta_y / (uni - delta_x * delta_y); } template <typename scalar> __global__ void nms_kernel(unsigned char* mask, const scalar* boxes, const int64_t* inds, const int64_t num_boxes, double thresh) { //A pretty straightforward implementation, analogous to the standard serial //version but with the IoUs computed and mask updated in parallel. We access //the box data through an array of sorted indices rather than physically //sorting it: unless one has an inordinate number of boxes (O(10^5), whereas //for example in the faster rcnn paper they feed 6000 per batch) the //data will fit in L2 so sorting it won't actually reduce the number of //messy reads from global. int col = 0; while(col < num_boxes-1) { for(int i = threadIdx.x; i < num_boxes-1; i+=blockDim.x) if(i >= col) { scalar iou = IoU(&boxes[4*inds[i+1+num_boxes*blockIdx.x] + 4*num_boxes*blockIdx.x], &boxes[4*inds[col+num_boxes*blockIdx.x] + 4*num_boxes*blockIdx.x]); mask[i+1+blockIdx.x*num_boxes] *= (iou>thresh) ? 0 : 1; } __syncthreads(); ++col; while((col < num_boxes - 1) && (mask[col+blockIdx.x*num_boxes]==0)) ++col; } } std::vector<at::Tensor> Non_Max_Suppression_CUDA( const at::Tensor& input, const at::Tensor& scores, double thresh) { AT_ASSERT(input.ndimension() == 3); AT_ASSERT(scores.ndimension() == 2); AT_ASSERT(input.size(0) == scores.size(0)); AT_ASSERT(input.size(1) == scores.size(1)); AT_ASSERT(input.size(2) == 4); AT_ASSERT(input.is_contiguous()); AT_ASSERT(scores.is_contiguous()); AT_ASSERT(input.type().scalarType() == at::kFloat || input.type().scalarType() == at::kDouble) AT_ASSERT(scores.type().scalarType() == at::kFloat || scores.type().scalarType() == at::kDouble) auto num_boxes = input.size(1); auto batch_size = input.size(0); //auto mask = input.type().toScalarType(at::kByte).tensor({batch_size, num_boxes}); auto mask = torch::zeros({batch_size, num_boxes}, input.type().toScalarType(at::kByte)); mask.fill_(1); //need the indices of the boxes sorted by score. at::Tensor sorted_inds = std::get<1>(scores.sort(-1, true)); dim3 mask_block(512); //would be nice to have 1024 here for gpus that support it, //but not sure how to do this cleanly without calling //cudaGetDeviceProperties in the funcion body... dim3 mask_grid(batch_size); if(input.type().scalarType() == at::kFloat) { nms_kernel<<<mask_grid, mask_block, 0, at::cuda::getCurrentCUDAStream()>>>( mask.data<unsigned char>(), input.data<float>(), sorted_inds.data<int64_t>(), num_boxes, thresh); AT_ASSERT(cudaGetLastError() == cudaSuccess); } else { nms_kernel<<<mask_grid, mask_block, 0, at::cuda::getCurrentCUDAStream()>>>( mask.data<unsigned char>(), input.data<double>(), sorted_inds.data<int64_t>(), num_boxes, thresh); AT_ASSERT(cudaGetLastError() == cudaSuccess); } //It's not entirely clear what the best thing to return is here. The algorithm will //produce a different number of boxes for each batch, so there is no obvious way of //way of returning the surving boxes/indices as a tensor. Returning a mask on the //sorted boxes together with the sorted indices seems reasonable; that way, the user //can easily take the N highest-scoring surviving boxes to form a tensor if they wish. return {mask, sorted_inds}; }
7298485427c1b2154a3937234c534feada5ed0f0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by ops.py // __constant__ int dims_tea_leaf_yeqx_kernel [2][1]; static int dims_tea_leaf_yeqx_kernel_h [2][1] = {0}; //user function __device__ void tea_leaf_yeqx_kernel_gpu (ACC<double> & p, const ACC<double> & x) { p(0,0) = x(0,0); } __global__ void ops_tea_leaf_yeqx_kernel( double* __restrict arg0, double* __restrict arg1, int size0, int size1 ){ int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_tea_leaf_yeqx_kernel[0][0]; arg1 += idx_x * 1*1 + idx_y * 1*1 * dims_tea_leaf_yeqx_kernel[1][0]; if (idx_x < size0 && idx_y < size1) { ACC<double> argp0(dims_tea_leaf_yeqx_kernel[0][0], arg0); const ACC<double> argp1(dims_tea_leaf_yeqx_kernel[1][0], arg1); tea_leaf_yeqx_kernel_gpu(argp0, argp1); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_tea_leaf_yeqx_kernel(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1) { #else void ops_par_loop_tea_leaf_yeqx_kernel_execute(ops_kernel_descriptor *desc) { int dim = desc->dim; #if OPS_MPI ops_block block = desc->block; #endif int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; #endif //Timing double t1,t2,c1,c2; ops_arg args[2] = { arg0, arg1}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args,2,range,30)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(30,"tea_leaf_yeqx_kernel"); OPS_kernels[30].count++; ops_timers_core(&c1,&t1); } //compute locally allocated range for the sub-block int start[2]; int end[2]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; #endif //OPS_MPI #ifdef OPS_MPI int arg_idx[2]; #endif #ifdef OPS_MPI if (compute_ranges(args, 2,block, range, start, end, arg_idx) < 0) return; #else //OPS_MPI for ( int n=0; n<2; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif int xdim0 = args[0].dat->size[0]; int xdim1 = args[1].dat->size[0]; if (xdim0 != dims_tea_leaf_yeqx_kernel_h[0][0] || xdim1 != dims_tea_leaf_yeqx_kernel_h[1][0]) { dims_tea_leaf_yeqx_kernel_h[0][0] = xdim0; dims_tea_leaf_yeqx_kernel_h[1][0] = xdim1; cutilSafeCall(hipMemcpyToSymbol( dims_tea_leaf_yeqx_kernel, dims_tea_leaf_yeqx_kernel_h, sizeof(dims_tea_leaf_yeqx_kernel))); } int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, 1); dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); char *p_a[2]; //set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]); p_a[1] = (char *)args[1].data_d + base1; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 2); ops_halo_exchanges(args,2,range); #endif if (OPS_diags > 1) { ops_timers_core(&c2,&t2); OPS_kernels[30].mpi_time += t2-t1; } //call kernel wrapper function, passing in pointers to data if (x_size > 0 && y_size > 0) hipLaunchKernelGGL(( ops_tea_leaf_yeqx_kernel), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1],x_size, y_size); cutilSafeCall(hipGetLastError()); if (OPS_diags>1) { cutilSafeCall(hipDeviceSynchronize()); ops_timers_core(&c1,&t1); OPS_kernels[30].time += t1-t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 2); ops_set_halo_dirtybit3(&args[0],range); #endif if (OPS_diags > 1) { //Update kernel record ops_timers_core(&c2,&t2); OPS_kernels[30].mpi_time += t2-t1; OPS_kernels[30].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[30].transfer += ops_compute_transfer(dim, start, end, &arg1); } } #ifdef OPS_LAZY void ops_par_loop_tea_leaf_yeqx_kernel(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 30; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 30; for ( int i=0; i<4; i++ ){ desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 2; desc->args = (ops_arg*)malloc(2*sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->function = ops_par_loop_tea_leaf_yeqx_kernel_execute; if (OPS_diags > 1) { ops_timing_realloc(30,"tea_leaf_yeqx_kernel"); } ops_enqueue_kernel(desc); } #endif
7298485427c1b2154a3937234c534feada5ed0f0.cu
// // auto-generated by ops.py // __constant__ int dims_tea_leaf_yeqx_kernel [2][1]; static int dims_tea_leaf_yeqx_kernel_h [2][1] = {0}; //user function __device__ void tea_leaf_yeqx_kernel_gpu (ACC<double> & p, const ACC<double> & x) { p(0,0) = x(0,0); } __global__ void ops_tea_leaf_yeqx_kernel( double* __restrict arg0, double* __restrict arg1, int size0, int size1 ){ int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_tea_leaf_yeqx_kernel[0][0]; arg1 += idx_x * 1*1 + idx_y * 1*1 * dims_tea_leaf_yeqx_kernel[1][0]; if (idx_x < size0 && idx_y < size1) { ACC<double> argp0(dims_tea_leaf_yeqx_kernel[0][0], arg0); const ACC<double> argp1(dims_tea_leaf_yeqx_kernel[1][0], arg1); tea_leaf_yeqx_kernel_gpu(argp0, argp1); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_tea_leaf_yeqx_kernel(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1) { #else void ops_par_loop_tea_leaf_yeqx_kernel_execute(ops_kernel_descriptor *desc) { int dim = desc->dim; #if OPS_MPI ops_block block = desc->block; #endif int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; #endif //Timing double t1,t2,c1,c2; ops_arg args[2] = { arg0, arg1}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args,2,range,30)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(30,"tea_leaf_yeqx_kernel"); OPS_kernels[30].count++; ops_timers_core(&c1,&t1); } //compute locally allocated range for the sub-block int start[2]; int end[2]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; #endif //OPS_MPI #ifdef OPS_MPI int arg_idx[2]; #endif #ifdef OPS_MPI if (compute_ranges(args, 2,block, range, start, end, arg_idx) < 0) return; #else //OPS_MPI for ( int n=0; n<2; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif int xdim0 = args[0].dat->size[0]; int xdim1 = args[1].dat->size[0]; if (xdim0 != dims_tea_leaf_yeqx_kernel_h[0][0] || xdim1 != dims_tea_leaf_yeqx_kernel_h[1][0]) { dims_tea_leaf_yeqx_kernel_h[0][0] = xdim0; dims_tea_leaf_yeqx_kernel_h[1][0] = xdim1; cutilSafeCall(cudaMemcpyToSymbol( dims_tea_leaf_yeqx_kernel, dims_tea_leaf_yeqx_kernel_h, sizeof(dims_tea_leaf_yeqx_kernel))); } int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, 1); dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); char *p_a[2]; //set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]); p_a[1] = (char *)args[1].data_d + base1; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 2); ops_halo_exchanges(args,2,range); #endif if (OPS_diags > 1) { ops_timers_core(&c2,&t2); OPS_kernels[30].mpi_time += t2-t1; } //call kernel wrapper function, passing in pointers to data if (x_size > 0 && y_size > 0) ops_tea_leaf_yeqx_kernel<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1],x_size, y_size); cutilSafeCall(cudaGetLastError()); if (OPS_diags>1) { cutilSafeCall(cudaDeviceSynchronize()); ops_timers_core(&c1,&t1); OPS_kernels[30].time += t1-t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 2); ops_set_halo_dirtybit3(&args[0],range); #endif if (OPS_diags > 1) { //Update kernel record ops_timers_core(&c2,&t2); OPS_kernels[30].mpi_time += t2-t1; OPS_kernels[30].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[30].transfer += ops_compute_transfer(dim, start, end, &arg1); } } #ifdef OPS_LAZY void ops_par_loop_tea_leaf_yeqx_kernel(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 30; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 30; for ( int i=0; i<4; i++ ){ desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 2; desc->args = (ops_arg*)malloc(2*sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->function = ops_par_loop_tea_leaf_yeqx_kernel_execute; if (OPS_diags > 1) { ops_timing_realloc(30,"tea_leaf_yeqx_kernel"); } ops_enqueue_kernel(desc); } #endif
04e1e13bd89a88aaa9e7abc7e7768f69966be8e0.hip
// !!! This is a file automatically generated by hipify!!! #include <assert.h> #include <math.h> #include <float.h> #include <stdexcept> #include <tuple> #include <vector> #include <sys/time.h> #include "src/fastertransformer/kernels/logprob_kernels.h" #include "src/fastertransformer/utils/allocator.h" #include "src/fastertransformer/utils/cuda_utils.h" #include "src/fastertransformer/utils/logger.h" #include "src/fastertransformer/utils/memory_utils.h" #include "tests/unittests/gtest_utils.h" using namespace fastertransformer; //////////////////////////////////////////////////////////////////////////////////// struct LogProbKernelTestParam { size_t max_input_length; size_t batch_size; size_t vocab_size; size_t beam_width; std::string toString() { return fmtstr("LogProbKernelTestParam[max_input_length=%ld, batch=%ld, vocab=%ld, beam_width=%ld]", max_input_length, batch_size, vocab_size, beam_width); } }; /////////////////////////////////// Unittests ////////////////////////////////////////// template<typename T> class LogProbKernelTest : public FtTestBase { protected: void computeCumLogProbs(float* cum_log_probs, float* log_probs, const T* logits, const int* input_ids, const int* input_lengths, const size_t max_input_length, const size_t batch_size, const size_t vocab_size, const size_t vocab_size_padded) { for (size_t step = 0; step < max_input_length; ++step) { for (size_t i = 0; i < batch_size; ++i) { if ((int)step == 0) { if (log_probs != nullptr) { log_probs[i] = 0.0f; } cum_log_probs[i] = 0.0f; } else if ((int)step < input_lengths[i]) { size_t step_offset = (step - 1) * batch_size * vocab_size_padded; const T* vec = logits + step_offset + i * vocab_size_padded; float max_logits = -FLT_MAX; for (size_t v = 0; v < vocab_size; ++v) { float val = static_cast<float>(vec[v]); if (val > max_logits) { max_logits = val; } } float sum = 0.0f; for (size_t v = 0; v < vocab_size; ++v) { sum += expf(static_cast<float>(vec[v]) - max_logits); } int token_id = input_ids[step * batch_size + i]; float log_prob = static_cast<float>(vec[token_id]) - max_logits - log(sum); if (log_probs != nullptr) { log_probs[step * batch_size + i] = log_prob; } cum_log_probs[i] += log_prob; } } } } void computeCumLogProbsBatchFirst(float* cum_log_probs, float* log_probs, const T* logits, const int* input_ids, const int* input_lengths, const size_t max_input_length, const size_t batch_size, const size_t vocab_size, const size_t vocab_size_padded) { for (size_t i = 0; i < batch_size; ++i) { size_t batch_offset = i * max_input_length * vocab_size_padded; for (size_t step = 0; step < max_input_length; ++step) { if ((int)step == 0) { if (log_probs != nullptr) { log_probs[i * max_input_length] = 0.0f; } cum_log_probs[i] = 0.0f; } else if ((int)step < input_lengths[i]) { const T* vec = logits + batch_offset + (step - 1) * vocab_size_padded; float max_logits = -FLT_MAX; for (size_t v = 0; v < vocab_size; ++v) { float val = static_cast<float>(vec[v]); if (val > max_logits) { max_logits = val; } } float sum = 0.0f; for (size_t v = 0; v < vocab_size; ++v) { sum += expf(static_cast<float>(vec[v]) - max_logits); } int token_id = input_ids[i * max_input_length + step]; float log_prob = static_cast<float>(vec[token_id]) - max_logits - log(sum); if (log_probs != nullptr) { log_probs[i * max_input_length + step] = log_prob; } cum_log_probs[i] += log_prob; } } } } public: void runTest(LogProbKernelTestParam param) { size_t max_input_length = param.max_input_length; size_t batchxbeam = param.batch_size * param.beam_width; size_t vocab_size = param.vocab_size; // Make multiple of 8 as GPT does. size_t vocab_size_padded = static_cast<size_t>(ceil(vocab_size / 8.f) * 8); // input values T* h_logits = new T[max_input_length * batchxbeam * vocab_size]; int* h_input_ids = new int[max_input_length * batchxbeam]; int* h_input_lengths = new int[batchxbeam]; // output buffers float* expected_cum_log_probs = new float[batchxbeam]; // initialize host buffers initRandom(h_logits, max_input_length * batchxbeam * vocab_size, -10.0f / vocab_size, -1.0f); initRandomInt(h_input_ids, max_input_length * batchxbeam, 0, vocab_size); initRandomInt(h_input_lengths, batchxbeam, 1, max_input_length + 1); memset(expected_cum_log_probs, 0, sizeof(float) * batchxbeam); // device buffers T* d_logits = reinterpret_cast<T*>(allocator->malloc(sizeof(T) * max_input_length * batchxbeam * vocab_size)); int *d_input_ids = reinterpret_cast<int*>(allocator->malloc(sizeof(int) * max_input_length * batchxbeam)); int *d_input_lengths = reinterpret_cast<int*>(allocator->malloc(sizeof(int) * batchxbeam)); float* d_cum_log_probs = reinterpret_cast<float*>(allocator->malloc(sizeof(float) * batchxbeam)); // initialize device buffers cudaH2Dcpy(d_logits, h_logits, max_input_length * batchxbeam * vocab_size); cudaH2Dcpy(d_input_ids, h_input_ids, max_input_length * batchxbeam); cudaH2Dcpy(d_input_lengths, h_input_lengths, batchxbeam); deviceFill(d_cum_log_probs, batchxbeam, 0.0f); size_t workspace_size = sizeof(float) * max_input_length * batchxbeam; void* workspace = allocator->malloc(workspace_size); invokeLogProbFromLogits(d_cum_log_probs, d_logits, d_input_ids, d_input_lengths, max_input_length, batchxbeam, vocab_size, vocab_size_padded, workspace, workspace_size, stream, false); computeCumLogProbs(expected_cum_log_probs, nullptr, h_logits, h_input_ids, h_input_lengths, max_input_length, batchxbeam, vocab_size, vocab_size_padded); bool passed = checkResult(param.toString(), d_cum_log_probs, expected_cum_log_probs, batchxbeam); EXPECT_TRUE(passed); FT_LOG_DEBUG("free host buffers"); delete[] expected_cum_log_probs; delete[] h_input_lengths; delete[] h_input_ids; delete[] h_logits; } void runBatchFirstTest(LogProbKernelTestParam param) { size_t max_input_length = param.max_input_length; size_t batchxbeam = param.batch_size * param.beam_width; size_t vocab_size = param.vocab_size; // Make multiple of 8 as GPT does. size_t vocab_size_padded = static_cast<size_t>(ceil(vocab_size / 8.f) * 8); // input values T* h_logits = new T[max_input_length * batchxbeam * vocab_size_padded]; int* h_input_ids = new int[max_input_length * batchxbeam]; int* h_input_lengths = new int[batchxbeam]; // output buffers float* expected_cum_log_probs = new float[batchxbeam]; // initialize host buffers initRandom(h_logits, max_input_length * batchxbeam * vocab_size_padded, -10.0f / vocab_size, -1.0f); initRandomInt(h_input_ids, max_input_length * batchxbeam, 0, vocab_size); initRandomInt(h_input_lengths, batchxbeam, 1, max_input_length + 1); memset(expected_cum_log_probs, 0, sizeof(float) * batchxbeam); // device buffers T* d_logits = reinterpret_cast<T*>(allocator->malloc(sizeof(T) * max_input_length * batchxbeam * vocab_size_padded)); int *d_input_ids = reinterpret_cast<int*>(allocator->malloc(sizeof(int) * max_input_length * batchxbeam)); int *d_input_lengths = reinterpret_cast<int*>(allocator->malloc(sizeof(int) * batchxbeam)); float* d_cum_log_probs = reinterpret_cast<float*>(allocator->malloc(sizeof(float) * batchxbeam)); // initialize device buffers cudaH2Dcpy(d_logits, h_logits, max_input_length * batchxbeam * vocab_size_padded); cudaH2Dcpy(d_input_ids, h_input_ids, max_input_length * batchxbeam); cudaH2Dcpy(d_input_lengths, h_input_lengths, batchxbeam); check_cuda_error(hipMemset(d_cum_log_probs, 0, sizeof(float) * batchxbeam)); size_t workspace_size = sizeof(float) * max_input_length * batchxbeam; void* workspace = allocator->malloc(workspace_size); invokeLogProbFromLogits(d_cum_log_probs, d_logits, d_input_ids, d_input_lengths, max_input_length, batchxbeam, vocab_size, vocab_size_padded, workspace, workspace_size, stream, true); computeCumLogProbsBatchFirst(expected_cum_log_probs, nullptr, h_logits, h_input_ids, h_input_lengths, max_input_length, batchxbeam, vocab_size, vocab_size_padded); std::string tag = param.toString() + (std::is_same<T, float>::value ? " (fp32)" : " (fp16)"); bool passed = checkResult(tag.c_str(), d_cum_log_probs, expected_cum_log_probs, batchxbeam); EXPECT_TRUE(passed); delete[] expected_cum_log_probs; delete[] h_input_lengths; delete[] h_input_ids; delete[] h_logits; } }; TYPED_TEST_SUITE(LogProbKernelTest, FloatAndHalfTypes); TYPED_TEST(LogProbKernelTest, SingleStep) { this->runTest({1, 32, 16, 1}); } TYPED_TEST(LogProbKernelTest, AccumLongStep129) { this->runTest({129, 8, 50211, 1}); } TYPED_TEST(LogProbKernelTest, AccumLongStep1023) { this->runTest({1023, 8, 5001, 1}); } TYPED_TEST(LogProbKernelTest, AccumLongStep4096) { this->runTest({4096, 8, 5001, 1}); } TYPED_TEST(LogProbKernelTest, BatchFirstSingleStep) { this->runBatchFirstTest({1, 32, 16, 1}); } TYPED_TEST(LogProbKernelTest, BatchFirstAccumLongStep129) { this->runBatchFirstTest({129, 8, 50211, 1}); } TYPED_TEST(LogProbKernelTest, BatchFirstAccumLongStep1023) { this->runBatchFirstTest({1023, 8, 5001, 1}); } TYPED_TEST(LogProbKernelTest, BatchFirstAccumLongStep4096) { this->runBatchFirstTest({4096, 8, 5001, 1}); }
04e1e13bd89a88aaa9e7abc7e7768f69966be8e0.cu
#include <assert.h> #include <math.h> #include <float.h> #include <stdexcept> #include <tuple> #include <vector> #include <sys/time.h> #include "src/fastertransformer/kernels/logprob_kernels.h" #include "src/fastertransformer/utils/allocator.h" #include "src/fastertransformer/utils/cuda_utils.h" #include "src/fastertransformer/utils/logger.h" #include "src/fastertransformer/utils/memory_utils.h" #include "tests/unittests/gtest_utils.h" using namespace fastertransformer; //////////////////////////////////////////////////////////////////////////////////// struct LogProbKernelTestParam { size_t max_input_length; size_t batch_size; size_t vocab_size; size_t beam_width; std::string toString() { return fmtstr("LogProbKernelTestParam[max_input_length=%ld, batch=%ld, vocab=%ld, beam_width=%ld]", max_input_length, batch_size, vocab_size, beam_width); } }; /////////////////////////////////// Unittests ////////////////////////////////////////// template<typename T> class LogProbKernelTest : public FtTestBase { protected: void computeCumLogProbs(float* cum_log_probs, float* log_probs, const T* logits, const int* input_ids, const int* input_lengths, const size_t max_input_length, const size_t batch_size, const size_t vocab_size, const size_t vocab_size_padded) { for (size_t step = 0; step < max_input_length; ++step) { for (size_t i = 0; i < batch_size; ++i) { if ((int)step == 0) { if (log_probs != nullptr) { log_probs[i] = 0.0f; } cum_log_probs[i] = 0.0f; } else if ((int)step < input_lengths[i]) { size_t step_offset = (step - 1) * batch_size * vocab_size_padded; const T* vec = logits + step_offset + i * vocab_size_padded; float max_logits = -FLT_MAX; for (size_t v = 0; v < vocab_size; ++v) { float val = static_cast<float>(vec[v]); if (val > max_logits) { max_logits = val; } } float sum = 0.0f; for (size_t v = 0; v < vocab_size; ++v) { sum += expf(static_cast<float>(vec[v]) - max_logits); } int token_id = input_ids[step * batch_size + i]; float log_prob = static_cast<float>(vec[token_id]) - max_logits - log(sum); if (log_probs != nullptr) { log_probs[step * batch_size + i] = log_prob; } cum_log_probs[i] += log_prob; } } } } void computeCumLogProbsBatchFirst(float* cum_log_probs, float* log_probs, const T* logits, const int* input_ids, const int* input_lengths, const size_t max_input_length, const size_t batch_size, const size_t vocab_size, const size_t vocab_size_padded) { for (size_t i = 0; i < batch_size; ++i) { size_t batch_offset = i * max_input_length * vocab_size_padded; for (size_t step = 0; step < max_input_length; ++step) { if ((int)step == 0) { if (log_probs != nullptr) { log_probs[i * max_input_length] = 0.0f; } cum_log_probs[i] = 0.0f; } else if ((int)step < input_lengths[i]) { const T* vec = logits + batch_offset + (step - 1) * vocab_size_padded; float max_logits = -FLT_MAX; for (size_t v = 0; v < vocab_size; ++v) { float val = static_cast<float>(vec[v]); if (val > max_logits) { max_logits = val; } } float sum = 0.0f; for (size_t v = 0; v < vocab_size; ++v) { sum += expf(static_cast<float>(vec[v]) - max_logits); } int token_id = input_ids[i * max_input_length + step]; float log_prob = static_cast<float>(vec[token_id]) - max_logits - log(sum); if (log_probs != nullptr) { log_probs[i * max_input_length + step] = log_prob; } cum_log_probs[i] += log_prob; } } } } public: void runTest(LogProbKernelTestParam param) { size_t max_input_length = param.max_input_length; size_t batchxbeam = param.batch_size * param.beam_width; size_t vocab_size = param.vocab_size; // Make multiple of 8 as GPT does. size_t vocab_size_padded = static_cast<size_t>(ceil(vocab_size / 8.f) * 8); // input values T* h_logits = new T[max_input_length * batchxbeam * vocab_size]; int* h_input_ids = new int[max_input_length * batchxbeam]; int* h_input_lengths = new int[batchxbeam]; // output buffers float* expected_cum_log_probs = new float[batchxbeam]; // initialize host buffers initRandom(h_logits, max_input_length * batchxbeam * vocab_size, -10.0f / vocab_size, -1.0f); initRandomInt(h_input_ids, max_input_length * batchxbeam, 0, vocab_size); initRandomInt(h_input_lengths, batchxbeam, 1, max_input_length + 1); memset(expected_cum_log_probs, 0, sizeof(float) * batchxbeam); // device buffers T* d_logits = reinterpret_cast<T*>(allocator->malloc(sizeof(T) * max_input_length * batchxbeam * vocab_size)); int *d_input_ids = reinterpret_cast<int*>(allocator->malloc(sizeof(int) * max_input_length * batchxbeam)); int *d_input_lengths = reinterpret_cast<int*>(allocator->malloc(sizeof(int) * batchxbeam)); float* d_cum_log_probs = reinterpret_cast<float*>(allocator->malloc(sizeof(float) * batchxbeam)); // initialize device buffers cudaH2Dcpy(d_logits, h_logits, max_input_length * batchxbeam * vocab_size); cudaH2Dcpy(d_input_ids, h_input_ids, max_input_length * batchxbeam); cudaH2Dcpy(d_input_lengths, h_input_lengths, batchxbeam); deviceFill(d_cum_log_probs, batchxbeam, 0.0f); size_t workspace_size = sizeof(float) * max_input_length * batchxbeam; void* workspace = allocator->malloc(workspace_size); invokeLogProbFromLogits(d_cum_log_probs, d_logits, d_input_ids, d_input_lengths, max_input_length, batchxbeam, vocab_size, vocab_size_padded, workspace, workspace_size, stream, false); computeCumLogProbs(expected_cum_log_probs, nullptr, h_logits, h_input_ids, h_input_lengths, max_input_length, batchxbeam, vocab_size, vocab_size_padded); bool passed = checkResult(param.toString(), d_cum_log_probs, expected_cum_log_probs, batchxbeam); EXPECT_TRUE(passed); FT_LOG_DEBUG("free host buffers"); delete[] expected_cum_log_probs; delete[] h_input_lengths; delete[] h_input_ids; delete[] h_logits; } void runBatchFirstTest(LogProbKernelTestParam param) { size_t max_input_length = param.max_input_length; size_t batchxbeam = param.batch_size * param.beam_width; size_t vocab_size = param.vocab_size; // Make multiple of 8 as GPT does. size_t vocab_size_padded = static_cast<size_t>(ceil(vocab_size / 8.f) * 8); // input values T* h_logits = new T[max_input_length * batchxbeam * vocab_size_padded]; int* h_input_ids = new int[max_input_length * batchxbeam]; int* h_input_lengths = new int[batchxbeam]; // output buffers float* expected_cum_log_probs = new float[batchxbeam]; // initialize host buffers initRandom(h_logits, max_input_length * batchxbeam * vocab_size_padded, -10.0f / vocab_size, -1.0f); initRandomInt(h_input_ids, max_input_length * batchxbeam, 0, vocab_size); initRandomInt(h_input_lengths, batchxbeam, 1, max_input_length + 1); memset(expected_cum_log_probs, 0, sizeof(float) * batchxbeam); // device buffers T* d_logits = reinterpret_cast<T*>(allocator->malloc(sizeof(T) * max_input_length * batchxbeam * vocab_size_padded)); int *d_input_ids = reinterpret_cast<int*>(allocator->malloc(sizeof(int) * max_input_length * batchxbeam)); int *d_input_lengths = reinterpret_cast<int*>(allocator->malloc(sizeof(int) * batchxbeam)); float* d_cum_log_probs = reinterpret_cast<float*>(allocator->malloc(sizeof(float) * batchxbeam)); // initialize device buffers cudaH2Dcpy(d_logits, h_logits, max_input_length * batchxbeam * vocab_size_padded); cudaH2Dcpy(d_input_ids, h_input_ids, max_input_length * batchxbeam); cudaH2Dcpy(d_input_lengths, h_input_lengths, batchxbeam); check_cuda_error(cudaMemset(d_cum_log_probs, 0, sizeof(float) * batchxbeam)); size_t workspace_size = sizeof(float) * max_input_length * batchxbeam; void* workspace = allocator->malloc(workspace_size); invokeLogProbFromLogits(d_cum_log_probs, d_logits, d_input_ids, d_input_lengths, max_input_length, batchxbeam, vocab_size, vocab_size_padded, workspace, workspace_size, stream, true); computeCumLogProbsBatchFirst(expected_cum_log_probs, nullptr, h_logits, h_input_ids, h_input_lengths, max_input_length, batchxbeam, vocab_size, vocab_size_padded); std::string tag = param.toString() + (std::is_same<T, float>::value ? " (fp32)" : " (fp16)"); bool passed = checkResult(tag.c_str(), d_cum_log_probs, expected_cum_log_probs, batchxbeam); EXPECT_TRUE(passed); delete[] expected_cum_log_probs; delete[] h_input_lengths; delete[] h_input_ids; delete[] h_logits; } }; TYPED_TEST_SUITE(LogProbKernelTest, FloatAndHalfTypes); TYPED_TEST(LogProbKernelTest, SingleStep) { this->runTest({1, 32, 16, 1}); } TYPED_TEST(LogProbKernelTest, AccumLongStep129) { this->runTest({129, 8, 50211, 1}); } TYPED_TEST(LogProbKernelTest, AccumLongStep1023) { this->runTest({1023, 8, 5001, 1}); } TYPED_TEST(LogProbKernelTest, AccumLongStep4096) { this->runTest({4096, 8, 5001, 1}); } TYPED_TEST(LogProbKernelTest, BatchFirstSingleStep) { this->runBatchFirstTest({1, 32, 16, 1}); } TYPED_TEST(LogProbKernelTest, BatchFirstAccumLongStep129) { this->runBatchFirstTest({129, 8, 50211, 1}); } TYPED_TEST(LogProbKernelTest, BatchFirstAccumLongStep1023) { this->runBatchFirstTest({1023, 8, 5001, 1}); } TYPED_TEST(LogProbKernelTest, BatchFirstAccumLongStep4096) { this->runBatchFirstTest({4096, 8, 5001, 1}); }
2be34f4859dfbeee848858c23661b27b162b468e.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <time.h> #include <sys/time.h> #include <hip/hip_runtime.h> #define LINE 100000 void readfile(int num[LINE]){ int temp; int i; FILE *fp; fp = fopen("number.txt", "r"); i = 0; if(fp == NULL){ printf("Error loading file!!\n"); exit(1); }else{ while(!feof(fp)){ fscanf(fp, "%d", &temp); num[i] = temp; i++; } } fclose(fp); } void printfile(int num[LINE]){ int i; FILE *fp = fopen("update.txt", "w"); for (i = 0; i < LINE; i++) fprintf(fp, "%d ", num[i]); fclose(fp); } void copyData(int num[LINE], int num1[LINE]){ int i; for(i = 0; i < LINE; i++) num1[i] = num[i]; } __global__ void even(int *dnum, int n){ int k = threadIdx.x + blockIdx.x * blockDim.x; int temp; k = k * 2; if(k <= n - 2){ if(dnum[k] > dnum[k + 1]){ temp = dnum[k]; dnum[k] = dnum[k + 1]; dnum[k + 1] = temp; } } } __global__ void odd(int *dnum, int n){ int k = threadIdx.x + blockIdx.x * blockDim.x; int temp; k = k * 2 + 1; if(k <= n - 2){ if(dnum[k] > dnum[k + 1]){ temp = dnum[k]; dnum[k] = dnum[k + 1]; dnum[k + 1] = temp; } } } void docuda(int *dnum, int threads){ int i, add; add = 0; if(LINE % threads != 0) add = 1; else add = 0; for(i = 0; i < LINE; i++){ hipLaunchKernelGGL(( even), dim3(LINE / threads + add), dim3(threads), 0, 0, dnum, LINE); hipLaunchKernelGGL(( odd), dim3(LINE / threads + add), dim3(threads), 0, 0, dnum, LINE); } } void cuda(int num[LINE], int num1[LINE]){ int threads, block, i, add; int *dnum; struct timeval tv; struct timezone tz; double start, end, time, time1, time2, average; start = 0; end = 0; time = 0; time1 = 0; time2 = 0; average = 0; threads = 2; block = 0; printf("Time execution for parallel bubble sort using CUDA based on threads per block\n"); printf("====================================================================================================\n"); printf(" Block size Number of threads 1st time 2nd time 3rd time average \n"); printf("====================================================================================================\n"); while (threads <= 1024){ if(LINE % threads != 0) add = 1; else add = 0; block = LINE / threads + add; for (i = 0; i < 3; i++){ copyData(num, num1); hipMalloc(&dnum, LINE*sizeof(int)); hipMemcpy(dnum, num, LINE*sizeof(int), hipMemcpyHostToDevice); gettimeofday(&tv, &tz); start = (double)tv.tv_sec + (double)tv.tv_usec / 1000000; docuda(dnum, threads); gettimeofday(&tv, &tz); end = (double)tv.tv_sec + (double)tv.tv_usec / 1000000; hipMemcpy(num, dnum, LINE*sizeof(int), hipMemcpyDeviceToHost); if (i == 0) time = end - start; else if (i == 1) time1 = end - start; else if (i == 2) time2 = end - start; } average = (time + time1 + time2) / 3; printf(" %i %i %fs %fs %fs %fs\n", block, threads, time, time1, time2, average); threads = threads * 2; } } int main(){ int num[LINE]; int num1[LINE]; printf("Getting data...\n"); readfile(num); printf("Sorting data...\n\n"); cuda(num, num1); printfile(num); printf("\nParallel bubble sort in CUDA sucessfully.\n"); return 0; }
2be34f4859dfbeee848858c23661b27b162b468e.cu
#include <stdio.h> #include <stdlib.h> #include <time.h> #include <sys/time.h> #include <cuda.h> #define LINE 100000 void readfile(int num[LINE]){ int temp; int i; FILE *fp; fp = fopen("number.txt", "r"); i = 0; if(fp == NULL){ printf("Error loading file!!\n"); exit(1); }else{ while(!feof(fp)){ fscanf(fp, "%d", &temp); num[i] = temp; i++; } } fclose(fp); } void printfile(int num[LINE]){ int i; FILE *fp = fopen("update.txt", "w"); for (i = 0; i < LINE; i++) fprintf(fp, "%d ", num[i]); fclose(fp); } void copyData(int num[LINE], int num1[LINE]){ int i; for(i = 0; i < LINE; i++) num1[i] = num[i]; } __global__ void even(int *dnum, int n){ int k = threadIdx.x + blockIdx.x * blockDim.x; int temp; k = k * 2; if(k <= n - 2){ if(dnum[k] > dnum[k + 1]){ temp = dnum[k]; dnum[k] = dnum[k + 1]; dnum[k + 1] = temp; } } } __global__ void odd(int *dnum, int n){ int k = threadIdx.x + blockIdx.x * blockDim.x; int temp; k = k * 2 + 1; if(k <= n - 2){ if(dnum[k] > dnum[k + 1]){ temp = dnum[k]; dnum[k] = dnum[k + 1]; dnum[k + 1] = temp; } } } void docuda(int *dnum, int threads){ int i, add; add = 0; if(LINE % threads != 0) add = 1; else add = 0; for(i = 0; i < LINE; i++){ even<<<LINE / threads + add, threads>>>(dnum, LINE); odd<<<LINE / threads + add, threads>>>(dnum, LINE); } } void cuda(int num[LINE], int num1[LINE]){ int threads, block, i, add; int *dnum; struct timeval tv; struct timezone tz; double start, end, time, time1, time2, average; start = 0; end = 0; time = 0; time1 = 0; time2 = 0; average = 0; threads = 2; block = 0; printf("Time execution for parallel bubble sort using CUDA based on threads per block\n"); printf("====================================================================================================\n"); printf(" Block size Number of threads 1st time 2nd time 3rd time average \n"); printf("====================================================================================================\n"); while (threads <= 1024){ if(LINE % threads != 0) add = 1; else add = 0; block = LINE / threads + add; for (i = 0; i < 3; i++){ copyData(num, num1); cudaMalloc(&dnum, LINE*sizeof(int)); cudaMemcpy(dnum, num, LINE*sizeof(int), cudaMemcpyHostToDevice); gettimeofday(&tv, &tz); start = (double)tv.tv_sec + (double)tv.tv_usec / 1000000; docuda(dnum, threads); gettimeofday(&tv, &tz); end = (double)tv.tv_sec + (double)tv.tv_usec / 1000000; cudaMemcpy(num, dnum, LINE*sizeof(int), cudaMemcpyDeviceToHost); if (i == 0) time = end - start; else if (i == 1) time1 = end - start; else if (i == 2) time2 = end - start; } average = (time + time1 + time2) / 3; printf(" %i %i %fs %fs %fs %fs\n", block, threads, time, time1, time2, average); threads = threads * 2; } } int main(){ int num[LINE]; int num1[LINE]; printf("Getting data...\n"); readfile(num); printf("Sorting data...\n\n"); cuda(num, num1); printfile(num); printf("\nParallel bubble sort in CUDA sucessfully.\n"); return 0; }
acae9923a43b90605194bdac9c9d4a05a564983b.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <cutil.h> #include <sys/time.h> #include "radixsort.h" #include "random.hpp" #include "timer.h" #include "bsearch_cuda.h" int main(int argc, char *argv[]) { if (argc < 4) { fprintf(stderr, "usage: %s <D size> <Q size> <I/T Size>" "<seed> <device>\n", argv[0]); return 1; } CUDA_SAFE_CALL( hipSetDevice( atoi(argv[5] ) ) ); //CUDA_SAFE_CALL( hipFree(NULL) ); int D_size = atoi(argv[1]); int Q_size = atoi(argv[2]); int I_size = atoi(argv[3]); int T_size = I_size; int seed = atoi(argv[4]); hipError_t err; //{{{ gen Q and D RNG_rand48 D_r(seed); D_r.generate(D_size); unsigned int *D_d = (unsigned int *)D_r.get_random_numbers(); RNG_rand48 Q_r(seed); Q_r.generate(Q_size); unsigned int *Q_d = (unsigned int *)Q_r.get_random_numbers(); hipDeviceSynchronize(); err = hipGetLastError(); if(err != hipSuccess) fprintf(stderr, "rand errors: %s.\n", hipGetErrorString( err) ); //}}} //{{{ sort D start(); nvRadixSort::RadixSort sort_D_d(D_size, true); sort_D_d.sort((unsigned int*)D_d, 0, D_size, 32); hipDeviceSynchronize(); err = hipGetLastError(); if(err != hipSuccess) fprintf(stderr, "sort d: %s.\n", hipGetErrorString( err) ); stop(); unsigned long sort_d_time = report(); //}}} unsigned int *D_h = (unsigned int *)malloc( D_size * sizeof(unsigned int)); hipMemcpy(D_h, D_d, (D_size) * sizeof(unsigned int), hipMemcpyDeviceToHost); int block_size = 256; dim3 dimBlock(block_size); int grid_size = ( Q_size + block_size - 1) / (block_size * 1); dim3 dimGrid( grid_size ); //{{{ index int index_grid_size = ( I_size + block_size - 1) / (block_size * 1); dim3 index_dimGrid( index_grid_size ); unsigned int *I_d; hipMalloc((void **)&I_d, (I_size)*sizeof(unsigned int)); start(); hipLaunchKernelGGL(( gen_index) , dim3(index_dimGrid), dim3(dimBlock), 0, 0, D_d, D_size, I_d, I_size); hipDeviceSynchronize(); err = hipGetLastError(); if(err != hipSuccess) fprintf(stderr, "index: %s.\n", hipGetErrorString( err) ); stop(); unsigned long index_time = report(); unsigned int *I_h = (unsigned int *)malloc( I_size * sizeof(unsigned int)); hipMemcpy(I_h, I_d, (I_size) * sizeof(unsigned int), hipMemcpyDeviceToHost); hipFree(I_d); //}}} //{{{ tree int tree_grid_size = ( T_size + block_size - 1) / (block_size * 1); dim3 tree_dimGrid( tree_grid_size ); unsigned int *T_d; hipMalloc((void **)&T_d, (T_size)*sizeof(unsigned int)); start(); hipLaunchKernelGGL(( gen_tree) , dim3(tree_dimGrid), dim3(dimBlock), 0, 0, D_d, D_size, T_d, T_size); hipDeviceSynchronize(); err = hipGetLastError(); if(err != hipSuccess) fprintf(stderr, "tree: %s.\n", hipGetErrorString( err) ); stop(); unsigned long tree_time = report(); unsigned int *T_h = (unsigned int *)malloc( T_size * sizeof(unsigned int)); hipMemcpy(T_h, T_d, (T_size) * sizeof(unsigned int), hipMemcpyDeviceToHost); hipFree(T_d); //}}} int i; for (i = 0; i < I_size; i++) printf( "%d\t" "i:%u,%u\t" "t:%u,%u\n", i, //_i_to_I(i,I_size,D_size), //_i_to_T(i,T_size,D_size), I_h[i],D_h[ _i_to_I(i,I_size,D_size) ], T_h[i],D_h[ _i_to_T(i,T_size,D_size) ] ); return 0; }
acae9923a43b90605194bdac9c9d4a05a564983b.cu
#include <stdio.h> #include <stdlib.h> #include <cuda.h> #include <cutil.h> #include <sys/time.h> #include "radixsort.h" #include "random.hpp" #include "timer.h" #include "bsearch_cuda.h" int main(int argc, char *argv[]) { if (argc < 4) { fprintf(stderr, "usage: %s <D size> <Q size> <I/T Size>" "<seed> <device>\n", argv[0]); return 1; } CUDA_SAFE_CALL( cudaSetDevice( atoi(argv[5] ) ) ); //CUDA_SAFE_CALL( cudaFree(NULL) ); int D_size = atoi(argv[1]); int Q_size = atoi(argv[2]); int I_size = atoi(argv[3]); int T_size = I_size; int seed = atoi(argv[4]); cudaError_t err; //{{{ gen Q and D RNG_rand48 D_r(seed); D_r.generate(D_size); unsigned int *D_d = (unsigned int *)D_r.get_random_numbers(); RNG_rand48 Q_r(seed); Q_r.generate(Q_size); unsigned int *Q_d = (unsigned int *)Q_r.get_random_numbers(); cudaThreadSynchronize(); err = cudaGetLastError(); if(err != cudaSuccess) fprintf(stderr, "rand errors: %s.\n", cudaGetErrorString( err) ); //}}} //{{{ sort D start(); nvRadixSort::RadixSort sort_D_d(D_size, true); sort_D_d.sort((unsigned int*)D_d, 0, D_size, 32); cudaThreadSynchronize(); err = cudaGetLastError(); if(err != cudaSuccess) fprintf(stderr, "sort d: %s.\n", cudaGetErrorString( err) ); stop(); unsigned long sort_d_time = report(); //}}} unsigned int *D_h = (unsigned int *)malloc( D_size * sizeof(unsigned int)); cudaMemcpy(D_h, D_d, (D_size) * sizeof(unsigned int), cudaMemcpyDeviceToHost); int block_size = 256; dim3 dimBlock(block_size); int grid_size = ( Q_size + block_size - 1) / (block_size * 1); dim3 dimGrid( grid_size ); //{{{ index int index_grid_size = ( I_size + block_size - 1) / (block_size * 1); dim3 index_dimGrid( index_grid_size ); unsigned int *I_d; cudaMalloc((void **)&I_d, (I_size)*sizeof(unsigned int)); start(); gen_index <<<index_dimGrid, dimBlock>>> ( D_d, D_size, I_d, I_size); cudaThreadSynchronize(); err = cudaGetLastError(); if(err != cudaSuccess) fprintf(stderr, "index: %s.\n", cudaGetErrorString( err) ); stop(); unsigned long index_time = report(); unsigned int *I_h = (unsigned int *)malloc( I_size * sizeof(unsigned int)); cudaMemcpy(I_h, I_d, (I_size) * sizeof(unsigned int), cudaMemcpyDeviceToHost); cudaFree(I_d); //}}} //{{{ tree int tree_grid_size = ( T_size + block_size - 1) / (block_size * 1); dim3 tree_dimGrid( tree_grid_size ); unsigned int *T_d; cudaMalloc((void **)&T_d, (T_size)*sizeof(unsigned int)); start(); gen_tree <<<tree_dimGrid, dimBlock>>> ( D_d, D_size, T_d, T_size); cudaThreadSynchronize(); err = cudaGetLastError(); if(err != cudaSuccess) fprintf(stderr, "tree: %s.\n", cudaGetErrorString( err) ); stop(); unsigned long tree_time = report(); unsigned int *T_h = (unsigned int *)malloc( T_size * sizeof(unsigned int)); cudaMemcpy(T_h, T_d, (T_size) * sizeof(unsigned int), cudaMemcpyDeviceToHost); cudaFree(T_d); //}}} int i; for (i = 0; i < I_size; i++) printf( "%d\t" "i:%u,%u\t" "t:%u,%u\n", i, //_i_to_I(i,I_size,D_size), //_i_to_T(i,T_size,D_size), I_h[i],D_h[ _i_to_I(i,I_size,D_size) ], T_h[i],D_h[ _i_to_T(i,T_size,D_size) ] ); return 0; }
7d95beafe3c406b76ea6eb5d0a3fd2d02c9bf99a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.4.1) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver December 2013 @generated s Tue Dec 17 13:18:45 2013 */ #include "common_magma.h" #define PRECISION_s /* The version for fermi can be found in ssymv_fermi.cu */ /* TODO: generate ssymv_tesla.cu from ssymv_tesla.cu somehow. * Basically just strip out cuConj calls. */ #define symv_bs 64 #define thread_x 64 #define thread_y 4 #define bank_shift 33 #define quarter_thread_x 16 #define half_thread_x 32 /******************************************************************************* * Lower case, where n is multiple of block size (symv_bs) */ __global__ void ssymv_kernel_tesla_L_special( int n, float alpha, const float * __restrict__ A, int lda, const float * __restrict__ x, int incx, float beta, float * __restrict__ y, int incy, float * __restrict__ WC) { int tx = threadIdx.x; int ty = threadIdx.y; int blkc = blockIdx.x; float res = MAGMA_S_ZERO; float res_ = MAGMA_S_ZERO; float res1 = MAGMA_S_ZERO; // la must be at least half_thread_x*bank_shift = 32x33 = 1056; // quarter_thread_x*(thread_x+2) = 16*(64+2) = 1056 // (was thread_x+1 here, but thread_x+3 in ssymv_tesla.cu; +1 is insufficient) __shared__ float la [quarter_thread_x][thread_x+3]; /* Why +3? */ __shared__ float buff [thread_x]; __shared__ float buff2[thread_x]; float tr[4]; float b[8]; int break_d = thread_x * blkc; const int td = (thread_x * ty) + tx; int tx_ = td % half_thread_x; int ty_ = td / half_thread_x; WC += break_d + tx; x += (break_d + tx) * incx; A += break_d * (lda+1); A += ty_* lda + tx_; if ( ty == 0 ) { buff[tx] = x[0]; } // obtain the vector x store in buff; tx = tx_; ty = ty_; #pragma unroll for(int j=0; j < half_thread_x; j += 8) la[0][ bank_shift * (ty_+j) + tx_] = A[ j * lda]; __syncthreads(); #pragma unroll for(int i=ty_*4; i<(ty_ * 4 + 4); i++) { if ( i < tx_ ) { la[0][bank_shift * tx_ + i] = la[0][ i * bank_shift + tx_]; } else la[0][bank_shift * tx_ + i] = la[0][ bank_shift * tx_ + i]; } __syncthreads(); #pragma unroll for(int j=0; j < 4; j++) res += la[0][bank_shift * tx_ + j + ty_ * 4] * buff[j + ty_ * 4]; __syncthreads(); la[0][bank_shift*tx_+ty_] = res; __syncthreads(); if ( ty_== 0 ) { res1 = la[0][tx_*bank_shift+0]+la[0][tx_*bank_shift+1] + la[0][tx_*bank_shift+2]+la[0][tx_*bank_shift+3] + la[0][tx_*bank_shift+4]+la[0][tx_*bank_shift+5] + la[0][tx_*bank_shift+6]+la[0][tx_*bank_shift+7]; } else { res1 = MAGMA_S_ZERO; } __syncthreads(); res = MAGMA_S_ZERO; A += half_thread_x + half_thread_x*lda; #pragma unroll for(int j=0; j < half_thread_x; j += 8) la[0][bank_shift*(ty_+j)+tx_] = A[ j * lda]; __syncthreads(); #pragma unroll for(int i=ty_*4; i<(4+ty_*4); i++) { if ( i < tx_ ) { la[0][bank_shift*tx_+i] = la[0][bank_shift*i+tx_]; } else la[0][bank_shift*tx_+i] = la[0][bank_shift*tx_+i]; } __syncthreads(); #pragma unroll for(int j=0; j < 4; j++) res += la[0][bank_shift*tx_+j+ty_*4] * buff[half_thread_x + j + 4 * ty_]; __syncthreads(); la[0][bank_shift*tx_+ty_] = res; __syncthreads(); float res2 = MAGMA_S_ZERO; if ( ty_== 1 ) { res2 = la[0][tx_*bank_shift+0]+la[0][tx_*bank_shift+1] + la[0][tx_*bank_shift+2]+la[0][tx_*bank_shift+3] + la[0][tx_*bank_shift+4]+la[0][tx_*bank_shift+5] + la[0][tx_*bank_shift+6]+la[0][tx_*bank_shift+7]; } else { res2 = MAGMA_S_ZERO; } __syncthreads(); res = MAGMA_S_ZERO; A -= half_thread_x*lda; res_ = MAGMA_S_ZERO; #pragma unroll for(int j=0; j < half_thread_x; j += 8) tr[j/8] = A[ j * lda]; #pragma unroll for(int j=0; j < 4; j++) { res += tr[j] * buff[ j*8 + ty_]; la[0][bank_shift*(ty_+j*8)+tx_] = tr[j]; } __syncthreads(); #pragma unroll for(int j=0; j < 4; j++) res_ += la[0][bank_shift*tx_+j+ty_*4] * buff[half_thread_x +j+ty_*4]; __syncthreads(); la[0][bank_shift*tx_+ty_] = res; __syncthreads(); if ( ty_ == 1 ) { res2 = res2 + la[0][tx_*bank_shift+0]+la[0][tx_*bank_shift+1] + la[0][tx_*bank_shift+2]+la[0][tx_*bank_shift+3] + la[0][tx_*bank_shift+4]+la[0][tx_*bank_shift+5] + la[0][tx_*bank_shift+6]+la[0][tx_*bank_shift+7]; } else { res2 = MAGMA_S_ZERO; } __syncthreads(); la[0][bank_shift*tx_+ty_] = res_; __syncthreads(); if ( ty_ == 0 ) { res1 = res1 + la[0][tx_*bank_shift+0]+la[0][tx_*bank_shift+1] + la[0][tx_*bank_shift+2]+la[0][tx_*bank_shift+3] + la[0][tx_*bank_shift+4]+la[0][tx_*bank_shift+5] + la[0][tx_*bank_shift+6]+la[0][tx_*bank_shift+7]; } else { res1 = MAGMA_S_ZERO; } A -= half_thread_x; __syncthreads(); tx = threadIdx.x; ty = threadIdx.y; if ( ty_ == 0 && ty == 0 ) res = res1; else if ( ty_ == 1 && ty == 0 ) res = res2; else { res = MAGMA_S_ZERO; } A -= ty_* lda; A -= tx_; A = A - lda * blkc * thread_x; x = x - blkc * thread_x * incx; A += 4 * ty* lda; A += tx; int wc_c = 0; int count = 0; tx_ = td % quarter_thread_x; ty_ = td / quarter_thread_x; WC -= tx; WC += tx_; #pragma unroll for(int j=0; j < 4; j++) { b[j] = buff[ty_*4+j]; } #pragma unroll for( int i=0; i < thread_x*blkc; i += thread_x ) { res_ = MAGMA_S_ZERO; count++; if ( ty == 0 ) buff2[tx] = x[i*incx]; __syncthreads(); #pragma unroll for( int k=0; k < 4; k++ ) { #pragma unroll for(int j=0; j < 4; j++) tr[j] = A[j*lda]; #pragma unroll for(int j=0; j < 4; j++) { res += tr[j] * buff2[ quarter_thread_x*k + ty*4 + j]; la[j + ty*4][tx] = tr[j]; } __syncthreads(); res_ = MAGMA_S_ZERO; #pragma unroll for(int j=0; j < 4; j++) { res_ += la[tx_][ty_*4+j] * b[j]; } b[4+k] = res_; __syncthreads(); A += lda * quarter_thread_x; } #pragma unroll for(int k=0; k < 4; k++) { la[tx_][ty_+quarter_thread_x*k] = b[4+k]; } __syncthreads(); if ( ty_ < 4 ) { int k = ty_*quarter_thread_x; res_ = la[tx_][ 0+k] + la[tx_][ 1+k] + la[tx_][ 2+k] + la[tx_][ 3+k] + la[tx_][ 4+k] + la[tx_][ 5+k] + la[tx_][ 6+k] + la[tx_][ 7+k] + la[tx_][ 8+k] + la[tx_][ 9+k] + la[tx_][10+k] + la[tx_][11+k] + la[tx_][12+k] + la[tx_][13+k] + la[tx_][14+k] + la[tx_][15+k]; WC[k + wc_c*lda ] = res_; } wc_c++; __syncthreads(); } WC += tx; WC -= tx_; la[ty][tx] = res; __syncthreads(); if ( ty == 0 ) { res = la[0][tx]+ la[1][tx] + la[2][tx]+ la[3][tx]; WC[0+lda*(blkc) ] = res; } } /************************************************************** * Lower case for generic sizes */ __global__ void ssymv_kernel_tesla_L_generic( int n, float alpha, const float * __restrict__ A, int lda, const float * __restrict__ x, int incx, float beta, float * __restrict__ y, int incy, float * __restrict__ WC, int m_mod_thread_x) { int tx = threadIdx.x; int ty = threadIdx.y; int blkc = blockIdx.x; float res = MAGMA_S_ZERO; float res_ = MAGMA_S_ZERO; float res1 = MAGMA_S_ZERO; __shared__ float la [quarter_thread_x][thread_x+3]; __shared__ float buff [thread_x]; __shared__ float buff2[thread_x]; float tr[4]; float b[8]; int break_d = thread_x * blkc; const int td = (thread_x * ty) + tx; int tx_ = td % half_thread_x; int ty_ = td / half_thread_x; WC += break_d + tx; x += (break_d + tx) * incx; A += break_d * (lda+1); A += lda * ty_; int trackA; if ( blkc == ( gridDim.x - 1 ) ) { if ( ty == 0 ) { if ( tx > m_mod_thread_x ) { buff[tx] = MAGMA_S_ZERO; } else buff[tx] = x[0]; } if ( tx_ > m_mod_thread_x ) trackA=m_mod_thread_x; else trackA=tx_; A += trackA; } else { if ( ty == 0 ) { buff[tx] = x[0]; } trackA = tx_; A += trackA; } // Somehow merging these two if - else creates problem // It could be a potential bug -- from synchronization or from cuda or compiler if ( blkc == ( gridDim.x - 1 ) ) { #pragma unroll for(int j=0; j < half_thread_x; j += 8) { if ( ( ty_ + j ) > m_mod_thread_x ) { la[0][bank_shift*(ty_+j)+tx_] = MAGMA_S_MAKE( 9999, 0 ); } else la[0][bank_shift*(ty_+j)+tx_] = A[ j * lda]; } A -= trackA; } else { #pragma unroll for(int j=0; j < half_thread_x; j += 8) { la[0][bank_shift*(ty_+j)+tx_] = A[ j * lda]; } } tx = tx_; ty = ty_; __syncthreads(); #pragma unroll for(int i=ty_*4; i<(ty_*4+4); i++) { if ( i < tx_ ) { la[0][bank_shift*tx_+i] = la[0][i*bank_shift+tx_]; } else la[0][bank_shift*tx_+i] = la[0][bank_shift*tx_+i]; } __syncthreads(); #pragma unroll for(int j=0; j < 4; j++) res += la[0][bank_shift*tx_+j+ty_*4] * buff[j+ty_*4]; __syncthreads(); la[0][bank_shift*tx_+ty_] = res; __syncthreads(); if ( ty_== 0 ) { res1 = la[0][tx_*bank_shift+0] + la[0][tx_*bank_shift+1] + la[0][tx_*bank_shift+2] + la[0][tx_*bank_shift+3] + la[0][tx_*bank_shift+4] + la[0][tx_*bank_shift+5] + la[0][tx_*bank_shift+6] + la[0][tx_*bank_shift+7]; } else { res1 = MAGMA_S_ZERO; } __syncthreads(); res = MAGMA_S_ZERO; if ( blkc == ( gridDim.x - 1 ) ) { if ( (tx_+half_thread_x) > m_mod_thread_x ) trackA = m_mod_thread_x; else trackA = tx_ + half_thread_x; A += trackA+half_thread_x*lda; #pragma unroll for(int j=0; j < half_thread_x; j += 8) { if ( ( ty_ + j+half_thread_x ) > m_mod_thread_x ) { la[0][bank_shift*(ty_+j)+tx_] = MAGMA_S_MAKE( 99999, 0 ); } else la[0][bank_shift*(ty_+j)+tx_] = A[ j * lda]; } A -= trackA+half_thread_x*lda; A += tx_; A += half_thread_x + half_thread_x*lda; } else { A += half_thread_x + half_thread_x*lda; #pragma unroll for(int j=0; j < half_thread_x; j += 8) { la[0][bank_shift*(ty_+j)+tx_] = A[ j * lda]; } } __syncthreads(); #pragma unroll for(int i=ty_*4; i<(4+ty_*4); i++) { if ( i < tx_ ) { la[0][bank_shift*tx_+i] = la[0][bank_shift*i+tx_]; } else la[0][bank_shift*tx_+i] = la[0][bank_shift*tx_+i]; } __syncthreads(); #pragma unroll for(int j=0; j < 4; j++) res += la[0][bank_shift*tx_+j+ty_*4] * buff[half_thread_x + j + 4 * ty_]; __syncthreads(); la[0][bank_shift*tx_+ty_] = res; __syncthreads(); float res2; res2 = MAGMA_S_ZERO; if ( ty_== 1 ) { res2 = la[0][tx_*bank_shift+0] + la[0][tx_*bank_shift+1] + la[0][tx_*bank_shift+2] + la[0][tx_*bank_shift+3] + la[0][tx_*bank_shift+4] + la[0][tx_*bank_shift+5] + la[0][tx_*bank_shift+6] + la[0][tx_*bank_shift+7]; } else { res2 = MAGMA_S_ZERO; } __syncthreads(); res = MAGMA_S_ZERO; res_ = MAGMA_S_ZERO; A -= half_thread_x*lda; if ( blkc == ( gridDim.x - 1 ) ) { A -= tx_; if ( tx_ > m_mod_thread_x ) trackA=m_mod_thread_x; else trackA=tx_; A += trackA; #pragma unroll for(int j=0; j < half_thread_x; j += 8) if ( ( ty_ + j ) > m_mod_thread_x ) { tr[j/8] = MAGMA_S_MAKE( 99999, 0 ); } else tr[j/8] = A[ j * lda]; A -= trackA; A += tx_; } else { #pragma unroll for(int j=0; j < half_thread_x; j += 8) tr[j/8] = A[ j * lda]; } __syncthreads(); #pragma unroll for(int j=0; j < 4; j++) { res += tr[j] * buff[ j*8 + ty_]; la[0][bank_shift*(ty_+j*8)+tx_] = tr[j]; } __syncthreads(); #pragma unroll for(int j=0; j < 4; j++) res_ += la[0][bank_shift*tx_+j+ty_*4] * buff[half_thread_x +j+ty_*4]; __syncthreads(); la[0][bank_shift*tx_+ty_] = res; __syncthreads(); if ( ty_ == 1 ) { res2 = res2 + la[0][tx_*bank_shift+0] + la[0][tx_*bank_shift+1] + la[0][tx_*bank_shift+2] + la[0][tx_*bank_shift+3] + la[0][tx_*bank_shift+4] + la[0][tx_*bank_shift+5] + la[0][tx_*bank_shift+6] + la[0][tx_*bank_shift+7]; } else { res2 = MAGMA_S_ZERO; } __syncthreads(); la[0][bank_shift*tx_+ty_] = res_; __syncthreads(); if ( ty_ == 0 ) { res1 = res1 + la[0][tx_*bank_shift+0] + la[0][tx_*bank_shift+1] + la[0][tx_*bank_shift+2] + la[0][tx_*bank_shift+3] + la[0][tx_*bank_shift+4] + la[0][tx_*bank_shift+5] + la[0][tx_*bank_shift+6] + la[0][tx_*bank_shift+7]; } else { res1 = MAGMA_S_ZERO; } A -= half_thread_x; __syncthreads(); tx = threadIdx.x; ty = threadIdx.y; if ( ty_ == 0 && ty == 0 ) res = res1; else if ( ty_ == 1 && ty == 0 ) res = res2; else { res = MAGMA_S_ZERO; } A -= ty_* lda; A -= tx_; A = A - lda*break_d; x = x - break_d*incx; A += 4 * ty* lda; if ( blkc == ( gridDim.x - 1 ) ) { if ( tx <= m_mod_thread_x ) A += tx; else A += m_mod_thread_x; } else{ A += tx; } int wc_c = 0; int count = 0; tx_ = td % quarter_thread_x; ty_ = td / quarter_thread_x; WC -= tx; WC += tx_; #pragma unroll for(int j=0; j < 4; j++) b[j] = buff[ty_*4+j]; #pragma unroll for( int i=0; i < break_d; i += thread_x ) { res_ = MAGMA_S_ZERO; count++; if ( ty == 0 ) { buff2[tx] = x[i*incx]; } __syncthreads(); #pragma unroll for( int k=0; k < 4; k++ ) { #pragma unroll for(int j=0; j < 4; j++) tr[j] = A[j*lda]; #pragma unroll for(int j=0; j < 4; j++) { res += tr[j]*buff2[quarter_thread_x*k + ty*4+(j)]; la[( (j)+ty*4)][tx] = tr[j]; } __syncthreads(); res_ = MAGMA_S_ZERO; #pragma unroll for(int j=0; j < 4; j++) res_ += la[tx_][ty_*4+j]* b[j]; b[4+k] = res_; __syncthreads(); A += lda* quarter_thread_x; } #pragma unroll for(int k=0; k < 4; k++) { la[tx_][ty_+quarter_thread_x*k] = b[4+k]; } __syncthreads(); if ( ty_ < 4 ) { int k = ty_*quarter_thread_x; res_ = la[tx_][ 0+k] + la[tx_][ 1+k] + la[tx_][ 2+k] + la[tx_][ 3+k] + la[tx_][ 4+k] + la[tx_][ 5+k] + la[tx_][ 6+k] + la[tx_][ 7+k] + la[tx_][ 8+k] + la[tx_][ 9+k] + la[tx_][10+k] + la[tx_][11+k] + la[tx_][12+k] + la[tx_][13+k] + la[tx_][14+k] + la[tx_][15+k]; WC[k + wc_c*lda ] = res_; } wc_c++; __syncthreads(); } WC += tx; WC -= tx_; la[ty][tx] = res; __syncthreads(); if ( ty == 0 ) { res=la[0][tx]+ la[1][tx]+ la[2][tx]+ la[3][tx]; WC[0+lda*(blkc)] = res; } } __global__ void ssymv_kernel_tesla_L_update( int n, float alpha, const float * __restrict__ A, int lda, const float * __restrict__ x, int incx, float beta, float * __restrict__ y, int incy, float * __restrict__ WC ) { int i; int tx = threadIdx.x; int ind = blockIdx.x * thread_x + tx; float Ca; Ca = MAGMA_S_ZERO; WC += ind + lda * blockIdx.x; for(i = blockIdx.x*thread_x; i < n; i += thread_x) { Ca += WC[0]; WC += thread_x; } if ( ind < n ) y[ind * incy] = beta * y[ind * incy] + alpha * Ca; } extern "C" void magmablas_ssymv_tesla_L( magma_int_t n, float alpha, const float *A, magma_int_t lda, const float *x, magma_int_t incx, float beta, float *y, magma_int_t incy, float *dwork) { magma_int_t blocks = (n - 1)/symv_bs + 1; dim3 grid(blocks, 1, 1); dim3 threads(thread_x, thread_y, 1); /* * If matrix size is multiple of symv_bs, we use a specific code. * otherwise, we call the generic case. */ if ( n % symv_bs == 0 ) { hipLaunchKernelGGL(( ssymv_kernel_tesla_L_special), dim3(grid), dim3(threads), 0, magma_stream , n, alpha, A, lda, x, incx, beta, y, incy, dwork); } else{ magma_int_t m_mod_thread_x = (n % symv_bs) - 1; hipLaunchKernelGGL(( ssymv_kernel_tesla_L_generic), dim3(grid), dim3(threads), 0, magma_stream , n, alpha, A, lda, x, incx, beta, y, incy, dwork, m_mod_thread_x); } dim3 threads_u(symv_bs, 1, 1); hipLaunchKernelGGL(( ssymv_kernel_tesla_L_update), dim3(grid), dim3(threads_u), 0, magma_stream , n, alpha, A, lda, x, incx, beta, y, incy, dwork); } /************************************************************************* Purpose ======= magmablas_ssymv_work performs the matrix-vector operation: y := alpha*A*x + beta*y, where alpha and beta are scalars, x and y are n element vectors and A is an n by n symmetric matrix. the interface of magmablas_ssymv_work is different from magmablas_ssymv in the last argument dwork As magma implements ssymv through two steps: 1) perform the multiplication in each thread blocks and put the intermediate value in a space of device memory which we call working space. dwork is the working space 2) sum the intermediate values and store the final result in y. The size of dwork is lda * ceil(n/thread_x) where thread_x = 64 magamblasw_ssymv requires users to explicitly a working space, while magmablas_ssymv is a wrapper routine of magmabalsw_ssymv allocating the working space inside the routine and provides the same interface with cublas. If users need to call ssymv frequently, we suggest to use magmablas_ssymv_work instead of magmablas_ssymv. As the overhead of allocating and free in device memory in magmablas_ssymv would hurt performance. Our tests show that this penalty is about 10Gflop/s when matrix size is around 10000. */ extern "C" magma_int_t magmablas_ssymv_tesla_work( char uplo, magma_int_t n, float alpha, const float *A, magma_int_t lda, const float *x, magma_int_t incx, float beta, float *y, magma_int_t incy, float *dwork, magma_int_t lwork) { char uplo_[2] = {uplo, 0}; int upper = lapackf77_lsame(uplo_, "U"); /* * Test the input parameters. */ if ((! upper) && (! lapackf77_lsame(uplo_, "L"))) { return -1; } else if ( n < 0 ) { return -2; } else if ( lda < max(1,n) ) { return -5; } else if ( incx == 0 ) { return -7; } else if ( incy == 0 ) { return -10; } /* * Quick return if possible. */ if ( (n == 0) || ( MAGMA_S_EQUAL(alpha, MAGMA_S_ZERO) && MAGMA_S_EQUAL(beta, MAGMA_S_ONE) ) ) return MAGMA_SUCCESS; /* TODO: Upper case is not implemented in MAGMA */ /* NOTE: [cz]symv are not implemented in cublas v1, but are in cublas v2. */ if ( upper ) { #if defined(PRECISION_z) || defined(PRECISION_c) fprintf(stderr, "%s: %s\n", __func__, "Upper case not implemented"); #else hipblasSsymv(uplo, n, alpha, A, lda, x, incx, beta, y, incy); #endif } else { magmablas_ssymv_tesla_L(n, alpha, A, lda, x, incx, beta, y, incy, dwork); } return MAGMA_SUCCESS; } /************************************************************************* Purpose ======= magmablas_ssymv performs the matrix-vector operation: y := alpha*A*x + beta*y, where alpha and beta are scalars, x and y are n element vectors and A is an n by n symmetric matrix. Arguments ========== UPLO CHARACTER*1. On entry, UPLO specifies whether the upper or lower triangular part of the array A is to be referenced as follows: UPLO = 'U' or 'u' Only the upper triangular part of A is to be referenced. UPLO = 'L' or 'l' Only the lower triangular part of A is to be referenced. Unchanged on exit. N INTEGER. On entry, N specifies the order of the matrix A. N must be at least zero. Unchanged on exit. ALPHA REAL*16. On entry, ALPHA specifies the scalar alpha. Unchanged on exit. A REAL*16 array of DIMENSION ( LDA, n ). Before entry with UPLO = 'U' or 'u', the leading n by n upper triangular part of the array A must contain the upper triangular part of the symmetric matrix and the strictly lower triangular part of A is not referenced. Before entry with UPLO = 'L' or 'l', the leading n by n lower triangular part of the array A must contain the lower triangular part of the symmetric matrix and the strictly upper triangular part of A is not referenced. Note that the imaginary parts of the diagonal elements need not be set and are assumed to be zero. Unchanged on exit. LDA INTEGER. On entry, LDA specifies the first dimension of A as declared in the calling (sub) program. LDA must be at least max( 1, n ). Unchanged on exit. It is recommended that lda is multiple of 16. Otherwise performance would be deteriorated as the memory accesses would not be fully coalescent. X REAL*16 array of dimension at least ( 1 + ( n - 1 )*abs( INCX ) ). Before entry, the incremented array X must contain the n element vector x. Unchanged on exit. INCX INTEGER. On entry, INCX specifies the increment for the elements of X. INCX must not be zero. Unchanged on exit. BETA REAL*16. On entry, BETA specifies the scalar beta. When BETA is supplied as zero then Y need not be set on input. Unchanged on exit. Y REAL*16 array of dimension at least ( 1 + ( n - 1 )*abs( INCY ) ). Before entry, the incremented array Y must contain the n element vector y. On exit, Y is overwritten by the updated vector y. INCY INTEGER. On entry, INCY specifies the increment for the elements of Y. INCY must not be zero. Unchanged on exit. */ extern "C" magma_int_t magmablas_ssymv_tesla( char uplo, magma_int_t n, float alpha, const float *A, magma_int_t lda, const float *x, magma_int_t incx, float beta, float *y, magma_int_t incy) { char uplo_[2] = {uplo, 0}; int upper = lapackf77_lsame(uplo_, "U"); /* * Test the input parameters. */ if ((! upper) && (! lapackf77_lsame(uplo_, "L"))) { return -1; } else if ( n < 0 ) { return -2; } else if ( lda < max(1,n) ) { return -5; } else if ( incx == 0 ) { return -7; } else if ( incy == 0 ) { return -10; } /* * Quick return if possible. */ if ( (n == 0) || ( MAGMA_S_EQUAL(alpha, MAGMA_S_ZERO) && MAGMA_S_EQUAL(beta, MAGMA_S_ONE) ) ) return MAGMA_SUCCESS; /* TODO: Upper case is not implemented in MAGMA */ /* NOTE: [cz]symv are not implemented in cublas v1, but are in cublas v2. */ if ( upper ) { #if defined(PRECISION_z) || defined(PRECISION_c) fprintf(stderr, "%s: %s\n", __func__, "Upper case not implemented"); #else hipblasSsymv(uplo, n, alpha, A, lda, x, incx, beta, y, incy); #endif } else { float *dwork; magma_int_t blocks = (n - 1) / thread_x + 1; magma_int_t lwork = lda * (blocks + 1); // TODO deal with error magma_smalloc( &dwork, lwork ); magmablas_ssymv_tesla_work( uplo, n, alpha, A, lda, x, incx, beta, y, incy, dwork, lwork); magma_free( dwork ); } return MAGMA_SUCCESS; }
7d95beafe3c406b76ea6eb5d0a3fd2d02c9bf99a.cu
/* -- MAGMA (version 1.4.1) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver December 2013 @generated s Tue Dec 17 13:18:45 2013 */ #include "common_magma.h" #define PRECISION_s /* The version for fermi can be found in ssymv_fermi.cu */ /* TODO: generate ssymv_tesla.cu from ssymv_tesla.cu somehow. * Basically just strip out cuConj calls. */ #define symv_bs 64 #define thread_x 64 #define thread_y 4 #define bank_shift 33 #define quarter_thread_x 16 #define half_thread_x 32 /******************************************************************************* * Lower case, where n is multiple of block size (symv_bs) */ __global__ void ssymv_kernel_tesla_L_special( int n, float alpha, const float * __restrict__ A, int lda, const float * __restrict__ x, int incx, float beta, float * __restrict__ y, int incy, float * __restrict__ WC) { int tx = threadIdx.x; int ty = threadIdx.y; int blkc = blockIdx.x; float res = MAGMA_S_ZERO; float res_ = MAGMA_S_ZERO; float res1 = MAGMA_S_ZERO; // la must be at least half_thread_x*bank_shift = 32x33 = 1056; // quarter_thread_x*(thread_x+2) = 16*(64+2) = 1056 // (was thread_x+1 here, but thread_x+3 in ssymv_tesla.cu; +1 is insufficient) __shared__ float la [quarter_thread_x][thread_x+3]; /* Why +3? */ __shared__ float buff [thread_x]; __shared__ float buff2[thread_x]; float tr[4]; float b[8]; int break_d = thread_x * blkc; const int td = (thread_x * ty) + tx; int tx_ = td % half_thread_x; int ty_ = td / half_thread_x; WC += break_d + tx; x += (break_d + tx) * incx; A += break_d * (lda+1); A += ty_* lda + tx_; if ( ty == 0 ) { buff[tx] = x[0]; } // obtain the vector x store in buff; tx = tx_; ty = ty_; #pragma unroll for(int j=0; j < half_thread_x; j += 8) la[0][ bank_shift * (ty_+j) + tx_] = A[ j * lda]; __syncthreads(); #pragma unroll for(int i=ty_*4; i<(ty_ * 4 + 4); i++) { if ( i < tx_ ) { la[0][bank_shift * tx_ + i] = la[0][ i * bank_shift + tx_]; } else la[0][bank_shift * tx_ + i] = la[0][ bank_shift * tx_ + i]; } __syncthreads(); #pragma unroll for(int j=0; j < 4; j++) res += la[0][bank_shift * tx_ + j + ty_ * 4] * buff[j + ty_ * 4]; __syncthreads(); la[0][bank_shift*tx_+ty_] = res; __syncthreads(); if ( ty_== 0 ) { res1 = la[0][tx_*bank_shift+0]+la[0][tx_*bank_shift+1] + la[0][tx_*bank_shift+2]+la[0][tx_*bank_shift+3] + la[0][tx_*bank_shift+4]+la[0][tx_*bank_shift+5] + la[0][tx_*bank_shift+6]+la[0][tx_*bank_shift+7]; } else { res1 = MAGMA_S_ZERO; } __syncthreads(); res = MAGMA_S_ZERO; A += half_thread_x + half_thread_x*lda; #pragma unroll for(int j=0; j < half_thread_x; j += 8) la[0][bank_shift*(ty_+j)+tx_] = A[ j * lda]; __syncthreads(); #pragma unroll for(int i=ty_*4; i<(4+ty_*4); i++) { if ( i < tx_ ) { la[0][bank_shift*tx_+i] = la[0][bank_shift*i+tx_]; } else la[0][bank_shift*tx_+i] = la[0][bank_shift*tx_+i]; } __syncthreads(); #pragma unroll for(int j=0; j < 4; j++) res += la[0][bank_shift*tx_+j+ty_*4] * buff[half_thread_x + j + 4 * ty_]; __syncthreads(); la[0][bank_shift*tx_+ty_] = res; __syncthreads(); float res2 = MAGMA_S_ZERO; if ( ty_== 1 ) { res2 = la[0][tx_*bank_shift+0]+la[0][tx_*bank_shift+1] + la[0][tx_*bank_shift+2]+la[0][tx_*bank_shift+3] + la[0][tx_*bank_shift+4]+la[0][tx_*bank_shift+5] + la[0][tx_*bank_shift+6]+la[0][tx_*bank_shift+7]; } else { res2 = MAGMA_S_ZERO; } __syncthreads(); res = MAGMA_S_ZERO; A -= half_thread_x*lda; res_ = MAGMA_S_ZERO; #pragma unroll for(int j=0; j < half_thread_x; j += 8) tr[j/8] = A[ j * lda]; #pragma unroll for(int j=0; j < 4; j++) { res += tr[j] * buff[ j*8 + ty_]; la[0][bank_shift*(ty_+j*8)+tx_] = tr[j]; } __syncthreads(); #pragma unroll for(int j=0; j < 4; j++) res_ += la[0][bank_shift*tx_+j+ty_*4] * buff[half_thread_x +j+ty_*4]; __syncthreads(); la[0][bank_shift*tx_+ty_] = res; __syncthreads(); if ( ty_ == 1 ) { res2 = res2 + la[0][tx_*bank_shift+0]+la[0][tx_*bank_shift+1] + la[0][tx_*bank_shift+2]+la[0][tx_*bank_shift+3] + la[0][tx_*bank_shift+4]+la[0][tx_*bank_shift+5] + la[0][tx_*bank_shift+6]+la[0][tx_*bank_shift+7]; } else { res2 = MAGMA_S_ZERO; } __syncthreads(); la[0][bank_shift*tx_+ty_] = res_; __syncthreads(); if ( ty_ == 0 ) { res1 = res1 + la[0][tx_*bank_shift+0]+la[0][tx_*bank_shift+1] + la[0][tx_*bank_shift+2]+la[0][tx_*bank_shift+3] + la[0][tx_*bank_shift+4]+la[0][tx_*bank_shift+5] + la[0][tx_*bank_shift+6]+la[0][tx_*bank_shift+7]; } else { res1 = MAGMA_S_ZERO; } A -= half_thread_x; __syncthreads(); tx = threadIdx.x; ty = threadIdx.y; if ( ty_ == 0 && ty == 0 ) res = res1; else if ( ty_ == 1 && ty == 0 ) res = res2; else { res = MAGMA_S_ZERO; } A -= ty_* lda; A -= tx_; A = A - lda * blkc * thread_x; x = x - blkc * thread_x * incx; A += 4 * ty* lda; A += tx; int wc_c = 0; int count = 0; tx_ = td % quarter_thread_x; ty_ = td / quarter_thread_x; WC -= tx; WC += tx_; #pragma unroll for(int j=0; j < 4; j++) { b[j] = buff[ty_*4+j]; } #pragma unroll for( int i=0; i < thread_x*blkc; i += thread_x ) { res_ = MAGMA_S_ZERO; count++; if ( ty == 0 ) buff2[tx] = x[i*incx]; __syncthreads(); #pragma unroll for( int k=0; k < 4; k++ ) { #pragma unroll for(int j=0; j < 4; j++) tr[j] = A[j*lda]; #pragma unroll for(int j=0; j < 4; j++) { res += tr[j] * buff2[ quarter_thread_x*k + ty*4 + j]; la[j + ty*4][tx] = tr[j]; } __syncthreads(); res_ = MAGMA_S_ZERO; #pragma unroll for(int j=0; j < 4; j++) { res_ += la[tx_][ty_*4+j] * b[j]; } b[4+k] = res_; __syncthreads(); A += lda * quarter_thread_x; } #pragma unroll for(int k=0; k < 4; k++) { la[tx_][ty_+quarter_thread_x*k] = b[4+k]; } __syncthreads(); if ( ty_ < 4 ) { int k = ty_*quarter_thread_x; res_ = la[tx_][ 0+k] + la[tx_][ 1+k] + la[tx_][ 2+k] + la[tx_][ 3+k] + la[tx_][ 4+k] + la[tx_][ 5+k] + la[tx_][ 6+k] + la[tx_][ 7+k] + la[tx_][ 8+k] + la[tx_][ 9+k] + la[tx_][10+k] + la[tx_][11+k] + la[tx_][12+k] + la[tx_][13+k] + la[tx_][14+k] + la[tx_][15+k]; WC[k + wc_c*lda ] = res_; } wc_c++; __syncthreads(); } WC += tx; WC -= tx_; la[ty][tx] = res; __syncthreads(); if ( ty == 0 ) { res = la[0][tx]+ la[1][tx] + la[2][tx]+ la[3][tx]; WC[0+lda*(blkc) ] = res; } } /************************************************************** * Lower case for generic sizes */ __global__ void ssymv_kernel_tesla_L_generic( int n, float alpha, const float * __restrict__ A, int lda, const float * __restrict__ x, int incx, float beta, float * __restrict__ y, int incy, float * __restrict__ WC, int m_mod_thread_x) { int tx = threadIdx.x; int ty = threadIdx.y; int blkc = blockIdx.x; float res = MAGMA_S_ZERO; float res_ = MAGMA_S_ZERO; float res1 = MAGMA_S_ZERO; __shared__ float la [quarter_thread_x][thread_x+3]; __shared__ float buff [thread_x]; __shared__ float buff2[thread_x]; float tr[4]; float b[8]; int break_d = thread_x * blkc; const int td = (thread_x * ty) + tx; int tx_ = td % half_thread_x; int ty_ = td / half_thread_x; WC += break_d + tx; x += (break_d + tx) * incx; A += break_d * (lda+1); A += lda * ty_; int trackA; if ( blkc == ( gridDim.x - 1 ) ) { if ( ty == 0 ) { if ( tx > m_mod_thread_x ) { buff[tx] = MAGMA_S_ZERO; } else buff[tx] = x[0]; } if ( tx_ > m_mod_thread_x ) trackA=m_mod_thread_x; else trackA=tx_; A += trackA; } else { if ( ty == 0 ) { buff[tx] = x[0]; } trackA = tx_; A += trackA; } // Somehow merging these two if - else creates problem // It could be a potential bug -- from synchronization or from cuda or compiler if ( blkc == ( gridDim.x - 1 ) ) { #pragma unroll for(int j=0; j < half_thread_x; j += 8) { if ( ( ty_ + j ) > m_mod_thread_x ) { la[0][bank_shift*(ty_+j)+tx_] = MAGMA_S_MAKE( 9999, 0 ); } else la[0][bank_shift*(ty_+j)+tx_] = A[ j * lda]; } A -= trackA; } else { #pragma unroll for(int j=0; j < half_thread_x; j += 8) { la[0][bank_shift*(ty_+j)+tx_] = A[ j * lda]; } } tx = tx_; ty = ty_; __syncthreads(); #pragma unroll for(int i=ty_*4; i<(ty_*4+4); i++) { if ( i < tx_ ) { la[0][bank_shift*tx_+i] = la[0][i*bank_shift+tx_]; } else la[0][bank_shift*tx_+i] = la[0][bank_shift*tx_+i]; } __syncthreads(); #pragma unroll for(int j=0; j < 4; j++) res += la[0][bank_shift*tx_+j+ty_*4] * buff[j+ty_*4]; __syncthreads(); la[0][bank_shift*tx_+ty_] = res; __syncthreads(); if ( ty_== 0 ) { res1 = la[0][tx_*bank_shift+0] + la[0][tx_*bank_shift+1] + la[0][tx_*bank_shift+2] + la[0][tx_*bank_shift+3] + la[0][tx_*bank_shift+4] + la[0][tx_*bank_shift+5] + la[0][tx_*bank_shift+6] + la[0][tx_*bank_shift+7]; } else { res1 = MAGMA_S_ZERO; } __syncthreads(); res = MAGMA_S_ZERO; if ( blkc == ( gridDim.x - 1 ) ) { if ( (tx_+half_thread_x) > m_mod_thread_x ) trackA = m_mod_thread_x; else trackA = tx_ + half_thread_x; A += trackA+half_thread_x*lda; #pragma unroll for(int j=0; j < half_thread_x; j += 8) { if ( ( ty_ + j+half_thread_x ) > m_mod_thread_x ) { la[0][bank_shift*(ty_+j)+tx_] = MAGMA_S_MAKE( 99999, 0 ); } else la[0][bank_shift*(ty_+j)+tx_] = A[ j * lda]; } A -= trackA+half_thread_x*lda; A += tx_; A += half_thread_x + half_thread_x*lda; } else { A += half_thread_x + half_thread_x*lda; #pragma unroll for(int j=0; j < half_thread_x; j += 8) { la[0][bank_shift*(ty_+j)+tx_] = A[ j * lda]; } } __syncthreads(); #pragma unroll for(int i=ty_*4; i<(4+ty_*4); i++) { if ( i < tx_ ) { la[0][bank_shift*tx_+i] = la[0][bank_shift*i+tx_]; } else la[0][bank_shift*tx_+i] = la[0][bank_shift*tx_+i]; } __syncthreads(); #pragma unroll for(int j=0; j < 4; j++) res += la[0][bank_shift*tx_+j+ty_*4] * buff[half_thread_x + j + 4 * ty_]; __syncthreads(); la[0][bank_shift*tx_+ty_] = res; __syncthreads(); float res2; res2 = MAGMA_S_ZERO; if ( ty_== 1 ) { res2 = la[0][tx_*bank_shift+0] + la[0][tx_*bank_shift+1] + la[0][tx_*bank_shift+2] + la[0][tx_*bank_shift+3] + la[0][tx_*bank_shift+4] + la[0][tx_*bank_shift+5] + la[0][tx_*bank_shift+6] + la[0][tx_*bank_shift+7]; } else { res2 = MAGMA_S_ZERO; } __syncthreads(); res = MAGMA_S_ZERO; res_ = MAGMA_S_ZERO; A -= half_thread_x*lda; if ( blkc == ( gridDim.x - 1 ) ) { A -= tx_; if ( tx_ > m_mod_thread_x ) trackA=m_mod_thread_x; else trackA=tx_; A += trackA; #pragma unroll for(int j=0; j < half_thread_x; j += 8) if ( ( ty_ + j ) > m_mod_thread_x ) { tr[j/8] = MAGMA_S_MAKE( 99999, 0 ); } else tr[j/8] = A[ j * lda]; A -= trackA; A += tx_; } else { #pragma unroll for(int j=0; j < half_thread_x; j += 8) tr[j/8] = A[ j * lda]; } __syncthreads(); #pragma unroll for(int j=0; j < 4; j++) { res += tr[j] * buff[ j*8 + ty_]; la[0][bank_shift*(ty_+j*8)+tx_] = tr[j]; } __syncthreads(); #pragma unroll for(int j=0; j < 4; j++) res_ += la[0][bank_shift*tx_+j+ty_*4] * buff[half_thread_x +j+ty_*4]; __syncthreads(); la[0][bank_shift*tx_+ty_] = res; __syncthreads(); if ( ty_ == 1 ) { res2 = res2 + la[0][tx_*bank_shift+0] + la[0][tx_*bank_shift+1] + la[0][tx_*bank_shift+2] + la[0][tx_*bank_shift+3] + la[0][tx_*bank_shift+4] + la[0][tx_*bank_shift+5] + la[0][tx_*bank_shift+6] + la[0][tx_*bank_shift+7]; } else { res2 = MAGMA_S_ZERO; } __syncthreads(); la[0][bank_shift*tx_+ty_] = res_; __syncthreads(); if ( ty_ == 0 ) { res1 = res1 + la[0][tx_*bank_shift+0] + la[0][tx_*bank_shift+1] + la[0][tx_*bank_shift+2] + la[0][tx_*bank_shift+3] + la[0][tx_*bank_shift+4] + la[0][tx_*bank_shift+5] + la[0][tx_*bank_shift+6] + la[0][tx_*bank_shift+7]; } else { res1 = MAGMA_S_ZERO; } A -= half_thread_x; __syncthreads(); tx = threadIdx.x; ty = threadIdx.y; if ( ty_ == 0 && ty == 0 ) res = res1; else if ( ty_ == 1 && ty == 0 ) res = res2; else { res = MAGMA_S_ZERO; } A -= ty_* lda; A -= tx_; A = A - lda*break_d; x = x - break_d*incx; A += 4 * ty* lda; if ( blkc == ( gridDim.x - 1 ) ) { if ( tx <= m_mod_thread_x ) A += tx; else A += m_mod_thread_x; } else{ A += tx; } int wc_c = 0; int count = 0; tx_ = td % quarter_thread_x; ty_ = td / quarter_thread_x; WC -= tx; WC += tx_; #pragma unroll for(int j=0; j < 4; j++) b[j] = buff[ty_*4+j]; #pragma unroll for( int i=0; i < break_d; i += thread_x ) { res_ = MAGMA_S_ZERO; count++; if ( ty == 0 ) { buff2[tx] = x[i*incx]; } __syncthreads(); #pragma unroll for( int k=0; k < 4; k++ ) { #pragma unroll for(int j=0; j < 4; j++) tr[j] = A[j*lda]; #pragma unroll for(int j=0; j < 4; j++) { res += tr[j]*buff2[quarter_thread_x*k + ty*4+(j)]; la[( (j)+ty*4)][tx] = tr[j]; } __syncthreads(); res_ = MAGMA_S_ZERO; #pragma unroll for(int j=0; j < 4; j++) res_ += la[tx_][ty_*4+j]* b[j]; b[4+k] = res_; __syncthreads(); A += lda* quarter_thread_x; } #pragma unroll for(int k=0; k < 4; k++) { la[tx_][ty_+quarter_thread_x*k] = b[4+k]; } __syncthreads(); if ( ty_ < 4 ) { int k = ty_*quarter_thread_x; res_ = la[tx_][ 0+k] + la[tx_][ 1+k] + la[tx_][ 2+k] + la[tx_][ 3+k] + la[tx_][ 4+k] + la[tx_][ 5+k] + la[tx_][ 6+k] + la[tx_][ 7+k] + la[tx_][ 8+k] + la[tx_][ 9+k] + la[tx_][10+k] + la[tx_][11+k] + la[tx_][12+k] + la[tx_][13+k] + la[tx_][14+k] + la[tx_][15+k]; WC[k + wc_c*lda ] = res_; } wc_c++; __syncthreads(); } WC += tx; WC -= tx_; la[ty][tx] = res; __syncthreads(); if ( ty == 0 ) { res=la[0][tx]+ la[1][tx]+ la[2][tx]+ la[3][tx]; WC[0+lda*(blkc)] = res; } } __global__ void ssymv_kernel_tesla_L_update( int n, float alpha, const float * __restrict__ A, int lda, const float * __restrict__ x, int incx, float beta, float * __restrict__ y, int incy, float * __restrict__ WC ) { int i; int tx = threadIdx.x; int ind = blockIdx.x * thread_x + tx; float Ca; Ca = MAGMA_S_ZERO; WC += ind + lda * blockIdx.x; for(i = blockIdx.x*thread_x; i < n; i += thread_x) { Ca += WC[0]; WC += thread_x; } if ( ind < n ) y[ind * incy] = beta * y[ind * incy] + alpha * Ca; } extern "C" void magmablas_ssymv_tesla_L( magma_int_t n, float alpha, const float *A, magma_int_t lda, const float *x, magma_int_t incx, float beta, float *y, magma_int_t incy, float *dwork) { magma_int_t blocks = (n - 1)/symv_bs + 1; dim3 grid(blocks, 1, 1); dim3 threads(thread_x, thread_y, 1); /* * If matrix size is multiple of symv_bs, we use a specific code. * otherwise, we call the generic case. */ if ( n % symv_bs == 0 ) { ssymv_kernel_tesla_L_special<<< grid, threads, 0, magma_stream >>> (n, alpha, A, lda, x, incx, beta, y, incy, dwork); } else{ magma_int_t m_mod_thread_x = (n % symv_bs) - 1; ssymv_kernel_tesla_L_generic<<< grid, threads, 0, magma_stream >>> (n, alpha, A, lda, x, incx, beta, y, incy, dwork, m_mod_thread_x); } dim3 threads_u(symv_bs, 1, 1); ssymv_kernel_tesla_L_update<<< grid, threads_u, 0, magma_stream >>> (n, alpha, A, lda, x, incx, beta, y, incy, dwork); } /************************************************************************* Purpose ======= magmablas_ssymv_work performs the matrix-vector operation: y := alpha*A*x + beta*y, where alpha and beta are scalars, x and y are n element vectors and A is an n by n symmetric matrix. the interface of magmablas_ssymv_work is different from magmablas_ssymv in the last argument dwork As magma implements ssymv through two steps: 1) perform the multiplication in each thread blocks and put the intermediate value in a space of device memory which we call working space. dwork is the working space 2) sum the intermediate values and store the final result in y. The size of dwork is lda * ceil(n/thread_x) where thread_x = 64 magamblasw_ssymv requires users to explicitly a working space, while magmablas_ssymv is a wrapper routine of magmabalsw_ssymv allocating the working space inside the routine and provides the same interface with cublas. If users need to call ssymv frequently, we suggest to use magmablas_ssymv_work instead of magmablas_ssymv. As the overhead of allocating and free in device memory in magmablas_ssymv would hurt performance. Our tests show that this penalty is about 10Gflop/s when matrix size is around 10000. */ extern "C" magma_int_t magmablas_ssymv_tesla_work( char uplo, magma_int_t n, float alpha, const float *A, magma_int_t lda, const float *x, magma_int_t incx, float beta, float *y, magma_int_t incy, float *dwork, magma_int_t lwork) { char uplo_[2] = {uplo, 0}; int upper = lapackf77_lsame(uplo_, "U"); /* * Test the input parameters. */ if ((! upper) && (! lapackf77_lsame(uplo_, "L"))) { return -1; } else if ( n < 0 ) { return -2; } else if ( lda < max(1,n) ) { return -5; } else if ( incx == 0 ) { return -7; } else if ( incy == 0 ) { return -10; } /* * Quick return if possible. */ if ( (n == 0) || ( MAGMA_S_EQUAL(alpha, MAGMA_S_ZERO) && MAGMA_S_EQUAL(beta, MAGMA_S_ONE) ) ) return MAGMA_SUCCESS; /* TODO: Upper case is not implemented in MAGMA */ /* NOTE: [cz]symv are not implemented in cublas v1, but are in cublas v2. */ if ( upper ) { #if defined(PRECISION_z) || defined(PRECISION_c) fprintf(stderr, "%s: %s\n", __func__, "Upper case not implemented"); #else cublasSsymv(uplo, n, alpha, A, lda, x, incx, beta, y, incy); #endif } else { magmablas_ssymv_tesla_L(n, alpha, A, lda, x, incx, beta, y, incy, dwork); } return MAGMA_SUCCESS; } /************************************************************************* Purpose ======= magmablas_ssymv performs the matrix-vector operation: y := alpha*A*x + beta*y, where alpha and beta are scalars, x and y are n element vectors and A is an n by n symmetric matrix. Arguments ========== UPLO CHARACTER*1. On entry, UPLO specifies whether the upper or lower triangular part of the array A is to be referenced as follows: UPLO = 'U' or 'u' Only the upper triangular part of A is to be referenced. UPLO = 'L' or 'l' Only the lower triangular part of A is to be referenced. Unchanged on exit. N INTEGER. On entry, N specifies the order of the matrix A. N must be at least zero. Unchanged on exit. ALPHA REAL*16. On entry, ALPHA specifies the scalar alpha. Unchanged on exit. A REAL*16 array of DIMENSION ( LDA, n ). Before entry with UPLO = 'U' or 'u', the leading n by n upper triangular part of the array A must contain the upper triangular part of the symmetric matrix and the strictly lower triangular part of A is not referenced. Before entry with UPLO = 'L' or 'l', the leading n by n lower triangular part of the array A must contain the lower triangular part of the symmetric matrix and the strictly upper triangular part of A is not referenced. Note that the imaginary parts of the diagonal elements need not be set and are assumed to be zero. Unchanged on exit. LDA INTEGER. On entry, LDA specifies the first dimension of A as declared in the calling (sub) program. LDA must be at least max( 1, n ). Unchanged on exit. It is recommended that lda is multiple of 16. Otherwise performance would be deteriorated as the memory accesses would not be fully coalescent. X REAL*16 array of dimension at least ( 1 + ( n - 1 )*abs( INCX ) ). Before entry, the incremented array X must contain the n element vector x. Unchanged on exit. INCX INTEGER. On entry, INCX specifies the increment for the elements of X. INCX must not be zero. Unchanged on exit. BETA REAL*16. On entry, BETA specifies the scalar beta. When BETA is supplied as zero then Y need not be set on input. Unchanged on exit. Y REAL*16 array of dimension at least ( 1 + ( n - 1 )*abs( INCY ) ). Before entry, the incremented array Y must contain the n element vector y. On exit, Y is overwritten by the updated vector y. INCY INTEGER. On entry, INCY specifies the increment for the elements of Y. INCY must not be zero. Unchanged on exit. */ extern "C" magma_int_t magmablas_ssymv_tesla( char uplo, magma_int_t n, float alpha, const float *A, magma_int_t lda, const float *x, magma_int_t incx, float beta, float *y, magma_int_t incy) { char uplo_[2] = {uplo, 0}; int upper = lapackf77_lsame(uplo_, "U"); /* * Test the input parameters. */ if ((! upper) && (! lapackf77_lsame(uplo_, "L"))) { return -1; } else if ( n < 0 ) { return -2; } else if ( lda < max(1,n) ) { return -5; } else if ( incx == 0 ) { return -7; } else if ( incy == 0 ) { return -10; } /* * Quick return if possible. */ if ( (n == 0) || ( MAGMA_S_EQUAL(alpha, MAGMA_S_ZERO) && MAGMA_S_EQUAL(beta, MAGMA_S_ONE) ) ) return MAGMA_SUCCESS; /* TODO: Upper case is not implemented in MAGMA */ /* NOTE: [cz]symv are not implemented in cublas v1, but are in cublas v2. */ if ( upper ) { #if defined(PRECISION_z) || defined(PRECISION_c) fprintf(stderr, "%s: %s\n", __func__, "Upper case not implemented"); #else cublasSsymv(uplo, n, alpha, A, lda, x, incx, beta, y, incy); #endif } else { float *dwork; magma_int_t blocks = (n - 1) / thread_x + 1; magma_int_t lwork = lda * (blocks + 1); // TODO deal with error magma_smalloc( &dwork, lwork ); magmablas_ssymv_tesla_work( uplo, n, alpha, A, lda, x, incx, beta, y, incy, dwork, lwork); magma_free( dwork ); } return MAGMA_SUCCESS; }
2a4215542d0828005c0afde7a90bc7741ed86c06.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime_api.h> /**************************************************************************** This program gives an example of a poor way to implement a password cracker in CUDA C. It is poor because it acheives this with just one thread, which is obviously not good given the scale of parallelism available to CUDA programs. The intentions of this program are: 1) Demonstrate the use of __device__ and __global__ functions 2) Enable a simulation of password cracking in the absence of library with equivalent functionality to libcrypt. The password to be found is hardcoded into a function called is_a_match. Compile and run with: nvcc -o cuda_crack.cu cuda_crack ./cuda_crack Dr Kevan Buckley, University of Wolverhampton, 2018 *****************************************************************************/ /**************************************************************************** This function returns 1 if the attempt at cracking the password is identical to the plain text password string stored in the program. Otherwise,it returns 0. *****************************************************************************/ __device__ int is_a_match(char *attempt) { char plain_password[] = "KB"; char *a = attempt; char *p = plain_password; while(*a == *p) { if(*a == '\0') { return 1; } a++; p++; } return 0; } /**************************************************************************** The kernel function assume that there will be only one thread and uses nested loops to generate all possible passwords and test whether they match the hidden password. *****************************************************************************/ __global__ void kernel() { char password[3]; password[2] = '\0'; password[0] ='A' + threadIdx.x; password[1] ='A' + blockIdx.x; if(is_a_match(password)) { printf("password found: %s\n", password); } else { } } int main() { hipLaunchKernelGGL(( kernel) , dim3(26), dim3(26), 0, 0, ); hipDeviceSynchronize(); return 0; }
2a4215542d0828005c0afde7a90bc7741ed86c06.cu
#include <stdio.h> #include <cuda_runtime_api.h> /**************************************************************************** This program gives an example of a poor way to implement a password cracker in CUDA C. It is poor because it acheives this with just one thread, which is obviously not good given the scale of parallelism available to CUDA programs. The intentions of this program are: 1) Demonstrate the use of __device__ and __global__ functions 2) Enable a simulation of password cracking in the absence of library with equivalent functionality to libcrypt. The password to be found is hardcoded into a function called is_a_match. Compile and run with: nvcc -o cuda_crack.cu cuda_crack ./cuda_crack Dr Kevan Buckley, University of Wolverhampton, 2018 *****************************************************************************/ /**************************************************************************** This function returns 1 if the attempt at cracking the password is identical to the plain text password string stored in the program. Otherwise,it returns 0. *****************************************************************************/ __device__ int is_a_match(char *attempt) { char plain_password[] = "KB"; char *a = attempt; char *p = plain_password; while(*a == *p) { if(*a == '\0') { return 1; } a++; p++; } return 0; } /**************************************************************************** The kernel function assume that there will be only one thread and uses nested loops to generate all possible passwords and test whether they match the hidden password. *****************************************************************************/ __global__ void kernel() { char password[3]; password[2] = '\0'; password[0] ='A' + threadIdx.x; password[1] ='A' + blockIdx.x; if(is_a_match(password)) { printf("password found: %s\n", password); } else { } } int main() { kernel <<<26, 26>>>(); cudaThreadSynchronize(); return 0; }
2931e67971bd0e1eb8f71b4ba4dfa1499f06d4a7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. #include <ATen/ATen.h> #include <ATen/hip/HIPContext.h> #include <THH/THH.h> #include <THH/THHDeviceUtils.cuh> #include <vector> #include <iostream> int const threadsPerBlock = sizeof(unsigned long long) * 8; __device__ inline float devIoU(float const * const a, float const * const b, bool iou_flag) { float left = max(a[0], b[0]), right = min(a[2], b[2]); float top = max(a[1], b[1]), bottom = min(a[3], b[3]); float width = max(right - left + 1, 0.f), height = max(bottom - top + 1, 0.f); float interS = width * height; float Sa = (a[2] - a[0] + 1) * (a[3] - a[1] + 1); float Sb = (b[2] - b[0] + 1) * (b[3] - b[1] + 1); if (iou_flag) { return interS / (Sa + Sb - interS); } else { return interS / min(Sa, Sb); } } __global__ void nms_kernel(const int n_boxes, const float nms_overlap_thresh, const float *dev_boxes, unsigned long long *dev_mask, bool iou_flag) { const int row_start = blockIdx.y; const int col_start = blockIdx.x; // if (row_start > col_start) return; const int row_size = min(n_boxes - row_start * threadsPerBlock, threadsPerBlock); const int col_size = min(n_boxes - col_start * threadsPerBlock, threadsPerBlock); __shared__ float block_boxes[threadsPerBlock * 5]; if (threadIdx.x < col_size) { block_boxes[threadIdx.x * 5 + 0] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 0]; block_boxes[threadIdx.x * 5 + 1] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 1]; block_boxes[threadIdx.x * 5 + 2] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 2]; block_boxes[threadIdx.x * 5 + 3] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 3]; block_boxes[threadIdx.x * 5 + 4] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 4]; } __syncthreads(); if (threadIdx.x < row_size) { const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x; const float *cur_box = dev_boxes + cur_box_idx * 5; int i = 0; unsigned long long t = 0; int start = 0; if (row_start == col_start) { start = threadIdx.x + 1; } for (i = start; i < col_size; i++) { if (devIoU(cur_box, block_boxes + i * 5, iou_flag) > nms_overlap_thresh) { t |= 1ULL << i; } } const int col_blocks = THCCeilDiv(n_boxes, threadsPerBlock); dev_mask[cur_box_idx * col_blocks + col_start] = t; } } // boxes is a N x 5 tensor at::Tensor nms_cuda(const at::Tensor boxes, float nms_overlap_thresh, bool iou_flag) { using scalar_t = float; AT_ASSERTM(boxes.type().is_cuda(), "boxes must be a CUDA tensor"); auto scores = boxes.select(1, 4); auto order_t = std::get<1>(scores.sort(0, /* descending=*/true)); auto boxes_sorted = boxes.index_select(0, order_t); int boxes_num = boxes.size(0); const int col_blocks = THCCeilDiv(boxes_num, threadsPerBlock); scalar_t* boxes_dev = boxes_sorted.data<scalar_t>(); THCState *state = at::globalContext().lazyInitCUDA(); // TODO replace with getTHCState unsigned long long* mask_dev = NULL; //THCudaCheck(THCudaMalloc(state, (void**) &mask_dev, // boxes_num * col_blocks * sizeof(unsigned long long))); mask_dev = (unsigned long long*) THCudaMalloc(state, boxes_num * col_blocks * sizeof(unsigned long long)); dim3 blocks(THCCeilDiv(boxes_num, threadsPerBlock), THCCeilDiv(boxes_num, threadsPerBlock)); dim3 threads(threadsPerBlock); hipLaunchKernelGGL(( nms_kernel), dim3(blocks), dim3(threads), 0, 0, boxes_num, nms_overlap_thresh, boxes_dev, mask_dev, iou_flag); std::vector<unsigned long long> mask_host(boxes_num * col_blocks); THCudaCheck(hipMemcpy(&mask_host[0], mask_dev, sizeof(unsigned long long) * boxes_num * col_blocks, hipMemcpyDeviceToHost)); std::vector<unsigned long long> remv(col_blocks); memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks); at::Tensor keep = at::empty({boxes_num}, boxes.options().dtype(at::kLong).device(at::kCPU)); int64_t* keep_out = keep.data<int64_t>(); int num_to_keep = 0; for (int i = 0; i < boxes_num; i++) { int nblock = i / threadsPerBlock; int inblock = i % threadsPerBlock; if (!(remv[nblock] & (1ULL << inblock))) { keep_out[num_to_keep++] = i; unsigned long long *p = &mask_host[0] + i * col_blocks; for (int j = nblock; j < col_blocks; j++) { remv[j] |= p[j]; } } } THCudaFree(state, mask_dev); // TODO improve this part return std::get<0>(order_t.index({ keep.narrow(/*dim=*/0, /*start=*/0, /*length=*/num_to_keep).to( order_t.device(), keep.scalar_type()) }).sort(0, false)); }
2931e67971bd0e1eb8f71b4ba4dfa1499f06d4a7.cu
// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. #include <ATen/ATen.h> #include <ATen/cuda/CUDAContext.h> #include <THC/THC.h> #include <THC/THCDeviceUtils.cuh> #include <vector> #include <iostream> int const threadsPerBlock = sizeof(unsigned long long) * 8; __device__ inline float devIoU(float const * const a, float const * const b, bool iou_flag) { float left = max(a[0], b[0]), right = min(a[2], b[2]); float top = max(a[1], b[1]), bottom = min(a[3], b[3]); float width = max(right - left + 1, 0.f), height = max(bottom - top + 1, 0.f); float interS = width * height; float Sa = (a[2] - a[0] + 1) * (a[3] - a[1] + 1); float Sb = (b[2] - b[0] + 1) * (b[3] - b[1] + 1); if (iou_flag) { return interS / (Sa + Sb - interS); } else { return interS / min(Sa, Sb); } } __global__ void nms_kernel(const int n_boxes, const float nms_overlap_thresh, const float *dev_boxes, unsigned long long *dev_mask, bool iou_flag) { const int row_start = blockIdx.y; const int col_start = blockIdx.x; // if (row_start > col_start) return; const int row_size = min(n_boxes - row_start * threadsPerBlock, threadsPerBlock); const int col_size = min(n_boxes - col_start * threadsPerBlock, threadsPerBlock); __shared__ float block_boxes[threadsPerBlock * 5]; if (threadIdx.x < col_size) { block_boxes[threadIdx.x * 5 + 0] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 0]; block_boxes[threadIdx.x * 5 + 1] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 1]; block_boxes[threadIdx.x * 5 + 2] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 2]; block_boxes[threadIdx.x * 5 + 3] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 3]; block_boxes[threadIdx.x * 5 + 4] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 4]; } __syncthreads(); if (threadIdx.x < row_size) { const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x; const float *cur_box = dev_boxes + cur_box_idx * 5; int i = 0; unsigned long long t = 0; int start = 0; if (row_start == col_start) { start = threadIdx.x + 1; } for (i = start; i < col_size; i++) { if (devIoU(cur_box, block_boxes + i * 5, iou_flag) > nms_overlap_thresh) { t |= 1ULL << i; } } const int col_blocks = THCCeilDiv(n_boxes, threadsPerBlock); dev_mask[cur_box_idx * col_blocks + col_start] = t; } } // boxes is a N x 5 tensor at::Tensor nms_cuda(const at::Tensor boxes, float nms_overlap_thresh, bool iou_flag) { using scalar_t = float; AT_ASSERTM(boxes.type().is_cuda(), "boxes must be a CUDA tensor"); auto scores = boxes.select(1, 4); auto order_t = std::get<1>(scores.sort(0, /* descending=*/true)); auto boxes_sorted = boxes.index_select(0, order_t); int boxes_num = boxes.size(0); const int col_blocks = THCCeilDiv(boxes_num, threadsPerBlock); scalar_t* boxes_dev = boxes_sorted.data<scalar_t>(); THCState *state = at::globalContext().lazyInitCUDA(); // TODO replace with getTHCState unsigned long long* mask_dev = NULL; //THCudaCheck(THCudaMalloc(state, (void**) &mask_dev, // boxes_num * col_blocks * sizeof(unsigned long long))); mask_dev = (unsigned long long*) THCudaMalloc(state, boxes_num * col_blocks * sizeof(unsigned long long)); dim3 blocks(THCCeilDiv(boxes_num, threadsPerBlock), THCCeilDiv(boxes_num, threadsPerBlock)); dim3 threads(threadsPerBlock); nms_kernel<<<blocks, threads>>>(boxes_num, nms_overlap_thresh, boxes_dev, mask_dev, iou_flag); std::vector<unsigned long long> mask_host(boxes_num * col_blocks); THCudaCheck(cudaMemcpy(&mask_host[0], mask_dev, sizeof(unsigned long long) * boxes_num * col_blocks, cudaMemcpyDeviceToHost)); std::vector<unsigned long long> remv(col_blocks); memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks); at::Tensor keep = at::empty({boxes_num}, boxes.options().dtype(at::kLong).device(at::kCPU)); int64_t* keep_out = keep.data<int64_t>(); int num_to_keep = 0; for (int i = 0; i < boxes_num; i++) { int nblock = i / threadsPerBlock; int inblock = i % threadsPerBlock; if (!(remv[nblock] & (1ULL << inblock))) { keep_out[num_to_keep++] = i; unsigned long long *p = &mask_host[0] + i * col_blocks; for (int j = nblock; j < col_blocks; j++) { remv[j] |= p[j]; } } } THCudaFree(state, mask_dev); // TODO improve this part return std::get<0>(order_t.index({ keep.narrow(/*dim=*/0, /*start=*/0, /*length=*/num_to_keep).to( order_t.device(), keep.scalar_type()) }).sort(0, false)); }
2a786663fbec076883b543e450d5073b24ae5cd6.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <hip/hip_runtime.h> #include "util.hpp" // TODO CUDA kernel implementing axpy // y = y + alpha*x template<typename T> __global__ void axpy(int n, T alpha, const T* x, T* y){ int i = threadIdx.x + blockIdx.x * blockDim.x; if(i < n) y[i] = y[i] + alpha * x[i]; } int main(int argc, char** argv) { size_t pow = read_arg(argc, argv, 1, 16); size_t n = 1 << pow; auto size_in_bytes = n * sizeof(double); hipInit(0); std::cout << "memcopy and daxpy test of size " << n << "\n"; double* x_device = malloc_device<double>(n); double* y_device = malloc_device<double>(n); double* x_host = malloc_host<double>(n, 1.5); double* y_host = malloc_host<double>(n, 3.0); double* y = malloc_host<double>(n, 0.0); // copy to device auto start = get_time(); copy_to_device<double>(x_host, x_device, n); copy_to_device<double>(y_host, y_device, n); auto time_H2D = get_time() - start; // TODO calculate grid dimensions // IGNORE for the first kernel writing exercise int block_dim = 64; int grid_dim = (n + block_dim -1) / block_dim; // synchronize the host and device so that the timings are accurate hipDeviceSynchronize(); start = get_time(); // TODO launch kernel (alpha=2.0) // round up hipLaunchKernelGGL(( axpy), dim3(grid_dim), dim3(block_dim), 0, 0, n, 2.0, x_device, y_device); hipDeviceSynchronize(); auto time_axpy = get_time() - start; // check for error in last kernel call cuda_check_last_kernel("axpy kernel"); // copy result back to host start = get_time(); copy_to_host<double>(y_device, y, n); auto time_D2H = get_time() - start; std::cout << "-------\ntimings\n-------\n"; std::cout << "H2D: " << time_H2D << " s\n"; std::cout << "D2H: " << time_D2H << " s\n"; std::cout << "axpy: " << time_axpy << " s\n"; std::cout << std::endl; std::cout << "total: " << time_axpy+time_H2D+time_D2H << " s\n"; std::cout << std::endl; std::cout << "-------\nbandwidth\n-------\n"; auto H2D_BW = size_in_bytes/1e6*2 / time_H2D; auto D2H_BW = size_in_bytes/1e6 / time_D2H; std::cout << "H2D BW: " << H2D_BW << " MB/s\n"; std::cout << "D2H BW: " << D2H_BW << " MB/s\n"; // check for errors auto errors = 0; for(auto i=0; i<n; ++i) { if(::fabs(6.-y[i])>1e-15) { ++errors; } } std::cout << (errors>0 ? "failed" : "passed") << " with " << errors << " errors\n"; hipFree(x_device); hipFree(y_device); free(x_host); free(y_host); free(y); return 0; }
2a786663fbec076883b543e450d5073b24ae5cd6.cu
#include <iostream> #include <cuda.h> #include "util.hpp" // TODO CUDA kernel implementing axpy // y = y + alpha*x template<typename T> __global__ void axpy(int n, T alpha, const T* x, T* y){ int i = threadIdx.x + blockIdx.x * blockDim.x; if(i < n) y[i] = y[i] + alpha * x[i]; } int main(int argc, char** argv) { size_t pow = read_arg(argc, argv, 1, 16); size_t n = 1 << pow; auto size_in_bytes = n * sizeof(double); cuInit(0); std::cout << "memcopy and daxpy test of size " << n << "\n"; double* x_device = malloc_device<double>(n); double* y_device = malloc_device<double>(n); double* x_host = malloc_host<double>(n, 1.5); double* y_host = malloc_host<double>(n, 3.0); double* y = malloc_host<double>(n, 0.0); // copy to device auto start = get_time(); copy_to_device<double>(x_host, x_device, n); copy_to_device<double>(y_host, y_device, n); auto time_H2D = get_time() - start; // TODO calculate grid dimensions // IGNORE for the first kernel writing exercise int block_dim = 64; int grid_dim = (n + block_dim -1) / block_dim; // synchronize the host and device so that the timings are accurate cudaDeviceSynchronize(); start = get_time(); // TODO launch kernel (alpha=2.0) // round up axpy<<<grid_dim, block_dim>>>(n, 2.0, x_device, y_device); cudaDeviceSynchronize(); auto time_axpy = get_time() - start; // check for error in last kernel call cuda_check_last_kernel("axpy kernel"); // copy result back to host start = get_time(); copy_to_host<double>(y_device, y, n); auto time_D2H = get_time() - start; std::cout << "-------\ntimings\n-------\n"; std::cout << "H2D: " << time_H2D << " s\n"; std::cout << "D2H: " << time_D2H << " s\n"; std::cout << "axpy: " << time_axpy << " s\n"; std::cout << std::endl; std::cout << "total: " << time_axpy+time_H2D+time_D2H << " s\n"; std::cout << std::endl; std::cout << "-------\nbandwidth\n-------\n"; auto H2D_BW = size_in_bytes/1e6*2 / time_H2D; auto D2H_BW = size_in_bytes/1e6 / time_D2H; std::cout << "H2D BW: " << H2D_BW << " MB/s\n"; std::cout << "D2H BW: " << D2H_BW << " MB/s\n"; // check for errors auto errors = 0; for(auto i=0; i<n; ++i) { if(std::fabs(6.-y[i])>1e-15) { ++errors; } } std::cout << (errors>0 ? "failed" : "passed") << " with " << errors << " errors\n"; cudaFree(x_device); cudaFree(y_device); free(x_host); free(y_host); free(y); return 0; }
f4ad7d9dbcbf689dc01343a4376d0d58bfcb77de.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * \file dnn/src/cuda/convolution3d/forward/inplace_matmul_impl.cu * MegEngine is Licensed under the Apache License, Version 2.0 (the "License") * * Copyright (c) 2014-2020 Megvii Inc. All rights reserved. * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */ #include "./inplace_matmul_impl.cuh" #include "src/cuda/utils.cuh" #include <iostream> #include <stdio.h> using namespace megdnn; using namespace cuda; namespace { struct BufferFetcherTexture { hipTextureObject_t tex; __device__ __forceinline__ float get(uint32_t offset) { return tex1Dfetch<float>(tex, offset); } }; struct BufferFetcherRaw { const float *ptr; __device__ __forceinline__ float get(uint32_t offset) { return ptr[offset]; } }; struct BufferFetcherTextureHost { bool init_succ; BufferFetcherTexture val; BufferFetcherTextureHost(float *p, const size_t n); ~BufferFetcherTextureHost() { reset(); } void reset() { if (init_succ) { cuda_check(hipDestroyTextureObject(val.tex)); init_succ = false; } } }; BufferFetcherTextureHost::BufferFetcherTextureHost(float *p, const size_t n) { init_succ = false; hipTextureObject_t tex_obj; hipResourceDesc res_desc; memset(&res_desc, 0, sizeof(hipResourceDesc)); res_desc.resType = hipResourceTypeLinear; res_desc.res.linear.devPtr = static_cast<void *>(p); res_desc.res.linear.sizeInBytes = n*sizeof(float); res_desc.res.linear.desc = hipCreateChannelDesc(32, 0, 0, 0, hipChannelFormatKindFloat); hipTextureDesc tex_desc; memset(&tex_desc, 0, sizeof(hipTextureDesc)); if (hipCreateTextureObject(&tex_obj, &res_desc, &tex_desc, NULL) == hipSuccess) { val.tex = tex_obj; init_succ = true; } else { hipGetLastError(); // reset error } } template<class BufferFetcher> struct KernelPtr { typedef void(*type)(BufferFetcher, BufferFetcher, float*, uint32_t, uint32_t, uint32_t, uint32_t, uint32_t, uint32_t, uint32_t, uint32_t, uint32_t, uint32_t, uint32_t, uint32_t, uint32_t, uint32_t, uint32_t, uint32_t, uint32_t, uint32_t, uint32_t, uint32_t, uint32_t, uint32_t); }; //! 1 -> 0xffffffff, 0 -> 0x00000000 __device__ __forceinline__ uint32_t bool_as_mask(uint32_t cond) { return (!cond) - 1u; } union FloatAndU32 { float f; uint32_t u; }; //! \p mask must be either all 1 or 0 bits template<class BufferFetcher> __device__ __forceinline__ float visit_with_mask( BufferFetcher buf, uint32_t offset, uint32_t mask) { FloatAndU32 f; f.f = buf.get(offset & mask); f.u &= mask; return f.f; } template <uint32_t BY, uint32_t BX, bool is_xcorr, class BufferFetcher> __global__ void conv_kernel(BufferFetcher src, BufferFetcher filter, float *dst, const uint32_t INP_BS, const uint32_t OUT_BS, const uint32_t IC, const uint32_t ID, const uint32_t IH, const uint32_t IW, const uint32_t OC, const uint32_t OD, const uint32_t OH, const uint32_t OW, const uint32_t FD, const uint32_t FH, const uint32_t FW, const uint32_t SD, const uint32_t SH, const uint32_t SW, const uint32_t PD, const uint32_t PH, const uint32_t PW, const uint32_t DD, const uint32_t DH, const uint32_t DW) { const uint32_t BM = BY < BX ? BY : BX; // BY*BX == 256 // (OC) * (IC*FD*FH*FW) * (OD*OH*OW) const uint32_t n = blockIdx.z; const uint32_t tidx = threadIdx.x; const uint32_t tidy = threadIdx.y; const uint32_t posx = blockIdx.x * blockDim.x + threadIdx.x; const uint32_t posy = blockIdx.y * blockDim.y + threadIdx.y; const uint32_t posx2 = posx<<2; const uint32_t posy2 = posy<<2; const uint32_t heightA = OC; const uint32_t widthA = IC*FD*FH*FW; const uint32_t heightB = widthA; const uint32_t widthB = OD*OH*OW; const uint32_t od0 = (posx2+0) / OW / OH * SD; const uint32_t oh0 = (posx2+0) / OW % OH * SH; const uint32_t ow0 = (posx2+0) % OW * SW; const uint32_t op0 = od0 * IH * IW + oh0 * IW + ow0; const uint32_t od1 = (posx2+1) / OW / OH * SD; const uint32_t oh1 = (posx2+1) / OW % OH * SH; const uint32_t ow1 = (posx2+1) % OW * SW; const uint32_t op1 = od1 * IH * IW + oh1 * IW + ow1; const uint32_t od2 = (posx2+2) / OW / OH * SD; const uint32_t oh2 = (posx2+2) / OW % OH * SH; const uint32_t ow2 = (posx2+2) % OW * SW; const uint32_t op2 = od2 * IH * IW + oh2 * IW + ow2; const uint32_t od3 = (posx2+3) / OW / OH * SD; const uint32_t oh3 = (posx2+3) / OW % OH * SH; const uint32_t ow3 = (posx2+3) % OW * SW; const uint32_t op3 = od3 * IH * IW + oh3 * IW + ow3; const uint32_t FP = FD*FH*FW; // OC % (BLOCK*4) == 0 // IC*FD*FH*FW % BLOCK == 0 // OD*OH*OW % (BLOCK*4) == 0 __shared__ float4 localA[BY][BM]; __shared__ float4 localB[BM][BX]; uint32_t i = 0u; uint32_t offsetA = posy2 * widthA + tidx; uint32_t offsetB = n*INP_BS - PD*IH*IW - PH*IW - PW; float4 sum0 = {0.0f, 0.0f, 0.0f, 0.0f}, sum1 = {0.0f, 0.0f, 0.0f, 0.0f}, sum2 = {0.0f, 0.0f, 0.0f, 0.0f}, sum3 = {0.0f, 0.0f, 0.0f, 0.0f}; uint32_t fd = tidy / FW / FH % FD; uint32_t fh = tidy / FW % FH; uint32_t fw = tidy % FW; uint32_t ic = tidy / (FD*FH*FW); uint32_t icm = tidy % (FD*FH*FW); const uint32_t fds = BM / FW / FH % FD; const uint32_t fhs = BM / FW % FH; const uint32_t fws = BM % FW; const uint32_t ics = BM / (FD*FH*FW); const uint32_t icms = BM % (FD*FH*FW); for (; i < widthA; i += BM, offsetA += BM) { // load localA if (tidx < BM) { localA[tidy][tidx].x = filter.get(offsetA + 0*widthA); localA[tidy][tidx].y = filter.get(offsetA + 1*widthA); localA[tidy][tidx].z = filter.get(offsetA + 2*widthA); localA[tidy][tidx].w = filter.get(offsetA + 3*widthA); } // load localB uint32_t fd2, fh2, fw2; if (is_xcorr) { fd2 = fd; fh2 = fh; fw2 = fw; } else { fd2 = FD-fd-1; fh2 = FH-fh-1; fw2 = FW-fw-1; } if (tidy < BM) { uint32_t fd2d = fd2 * DD, fh2d = fh2 * DH, fw2d = fw2 * DW; uint32_t tmp = offsetB+ic*ID*IH*IW+fd2d*IH*IW+fh2d*IW+fw2d, ok = bool_as_mask(tidy+i < heightB), p0 = bool_as_mask( fd2d+od0 >= PD && fd2d+od0 < ID+PD && fh2d+oh0 >= PH && fh2d+oh0 < IH+PH && fw2d+ow0 >= PW && fw2d+ow0 < IW+PW), p1 = bool_as_mask( fd2d+od1 >= PD && fd2d+od1 < ID+PD && fh2d+oh1 >= PH && fh2d+oh1 < IH+PH && fw2d+ow1 >= PW && fw2d+ow1 < IW+PW), p2 = bool_as_mask( fd2d+od2 >= PD && fd2d+od2 < ID+PD && fh2d+oh2 >= PH && fh2d+oh2 < IH+PH && fw2d+ow2 >= PW && fw2d+ow2 < IW+PW), p3 = bool_as_mask( fd2d+od3 >= PD && fd2d+od3 < ID+PD && fh2d+oh3 >= PH && fh2d+oh3 < IH+PH && fw2d+ow3 >= PW && fw2d+ow3 < IW+PW); localB[tidy][tidx].x = visit_with_mask(src, tmp+op0, ok & p0); localB[tidy][tidx].y = visit_with_mask(src, tmp+op1, ok & p1); localB[tidy][tidx].z = visit_with_mask(src, tmp+op2, ok & p2); localB[tidy][tidx].w = visit_with_mask(src, tmp+op3, ok & p3); } __syncthreads(); // die without this sync().. for (uint32_t j = 0u; j < BM; ++j) { float4 tmpA = localA[tidy][j]; float4 tmpB = localB[j][tidx]; sum0.x += tmpA.x * tmpB.x; sum0.y += tmpA.x * tmpB.y; sum0.z += tmpA.x * tmpB.z; sum0.w += tmpA.x * tmpB.w; sum1.x += tmpA.y * tmpB.x; sum1.y += tmpA.y * tmpB.y; sum1.z += tmpA.y * tmpB.z; sum1.w += tmpA.y * tmpB.w; sum2.x += tmpA.z * tmpB.x; sum2.y += tmpA.z * tmpB.y; sum2.z += tmpA.z * tmpB.z; sum2.w += tmpA.z * tmpB.w; sum3.x += tmpA.w * tmpB.x; sum3.y += tmpA.w * tmpB.y; sum3.z += tmpA.w * tmpB.z; sum3.w += tmpA.w * tmpB.w; } fd += fds; fw += fws; fh += fhs; fh += (fw >= FW); fw -= (fw >= FW) * FW; fd += (fh >= FH); fh -= (fh >= FH) * FH; fd -= (fd >= FD) * FD; ic += ics; icm += icms; ic += (icm >= FP); icm -= (icm >= FP) * FP; __syncthreads(); } const uint32_t dst_idx = n*OUT_BS + posy2*widthB + posx2; bool y0 = (posy2+0 < heightA); bool y1 = (posy2+1 < heightA); bool y2 = (posy2+2 < heightA); bool y3 = (posy2+3 < heightA); bool x0 = (posx2+0 < widthB); bool x1 = (posx2+1 < widthB); bool x2 = (posx2+2 < widthB); bool x3 = (posx2+3 < widthB); if (y0) { if (x0) dst[dst_idx + 0*widthB + 0] = sum0.x; if (x1) dst[dst_idx + 0*widthB + 1] = sum0.y; if (x2) dst[dst_idx + 0*widthB + 2] = sum0.z; if (x3) dst[dst_idx + 0*widthB + 3] = sum0.w; } if (y1) { if (x0) dst[dst_idx + 1*widthB + 0] = sum1.x; if (x1) dst[dst_idx + 1*widthB + 1] = sum1.y; if (x2) dst[dst_idx + 1*widthB + 2] = sum1.z; if (x3) dst[dst_idx + 1*widthB + 3] = sum1.w; } if (y2) { if (x0) dst[dst_idx + 2*widthB + 0] = sum2.x; if (x1) dst[dst_idx + 2*widthB + 1] = sum2.y; if (x2) dst[dst_idx + 2*widthB + 2] = sum2.z; if (x3) dst[dst_idx + 2*widthB + 3] = sum2.w; } if (y3) { if (x0) dst[dst_idx + 3*widthB + 0] = sum3.x; if (x1) dst[dst_idx + 3*widthB + 1] = sum3.y; if (x2) dst[dst_idx + 3*widthB + 2] = sum3.z; if (x3) dst[dst_idx + 3*widthB + 3] = sum3.w; } } } // anonymous namespace void convolution3d::exec_inplace_matmul_fwd( const float *src, const float *filter, float *dst, size_t N, size_t INP_BS, size_t OUT_BS, size_t IC, size_t ID, size_t IH, size_t IW, size_t OC, size_t OD, size_t OH, size_t OW, size_t FD, size_t FH, size_t FW, size_t PD, size_t PH, size_t PW, size_t SD, size_t SH, size_t SW, size_t DD, size_t DH, size_t DW, bool is_xcorr, hipStream_t stream) { BufferFetcherTextureHost src_tex(const_cast<float *>(src), N * INP_BS), filter_tex(const_cast<float *>(filter), OC*IC*FD*FH*FW); BufferFetcherRaw src_buf, filter_buf; src_buf.ptr = src; filter_buf.ptr = filter; if (!src_tex.init_succ || !filter_tex.init_succ) { src_tex.reset(); filter_tex.reset(); } int m = OC; int n = OD*OH*OW; int BY = 1; int BX = 1; if (m <= 64) { while (BY < 16 && (BY<<2) < m) BY <<= 1; BX = 256 / BY; } else if (n <= 64) { while (BX < 16 && (BX<<2) < n) BX <<= 1; BY = 256 / BX; } else { BX = BY = 16; } dim3 blocks(DIVUP(OD*OH*OW, 4*BX), DIVUP(OC, 4*BY), N); dim3 threads(BX, BY); #define DISPATCH_BX_BY(BX, BY) do { \ if (src_tex.init_succ) { \ KernelPtr<BufferFetcherTexture>::type kptr; \ if (is_xcorr) { \ kptr = conv_kernel<BY, BX, true, BufferFetcherTexture>; \ } else { \ kptr = conv_kernel<BY, BX, false, BufferFetcherTexture>; \ } \ hipLaunchKernelGGL(( kptr), dim3(blocks), dim3(threads), 0, stream, \ src_tex.val, filter_tex.val, dst, \ INP_BS, OUT_BS, \ IC, ID, IH, IW, \ OC, OD, OH, OW, \ FD, FH, FW, \ SD, SH, SW, \ PD, PH, PW, \ DD, DH, DW); \ } else { \ KernelPtr<BufferFetcherRaw>::type kptr; \ if (is_xcorr) { \ kptr = conv_kernel<BY, BX, true, BufferFetcherRaw>; \ } else { \ kptr = conv_kernel<BY, BX, false, BufferFetcherRaw>; \ } \ hipLaunchKernelGGL(( kptr), dim3(blocks), dim3(threads), 0, stream, \ src_buf, filter_buf, dst, \ INP_BS, OUT_BS, \ IC, ID, IH, IW, \ OC, OD, OH, OW, \ FD, FH, FW, \ SD, SH, SW, \ PD, PH, PW, \ DD, DH, DW); \ } \ } while (0) #define DISPATCH_BX(BX) do { \ DISPATCH_BX_BY(BX, 256/BX); \ } while (0) #define DISPATCH() do { \ switch (BX) { \ case 1: DISPATCH_BX(1); break; \ case 2: DISPATCH_BX(2); break; \ case 4: DISPATCH_BX(4); break; \ case 8: DISPATCH_BX(8); break; \ case 16: DISPATCH_BX(16); break; \ case 32: DISPATCH_BX(32); break; \ case 64: DISPATCH_BX(64); break; \ case 128: DISPATCH_BX(128); break; \ case 256: DISPATCH_BX(256); break; \ default: \ report_error("no usable kernel"); \ } \ } while (0) DISPATCH(); #undef DISPATCH #undef DISPATCH_BX #undef DISPATCH_BX_BY after_kernel_launch(); } // vim: syntax=cpp.doxygen
f4ad7d9dbcbf689dc01343a4376d0d58bfcb77de.cu
/** * \file dnn/src/cuda/convolution3d/forward/inplace_matmul_impl.cu * MegEngine is Licensed under the Apache License, Version 2.0 (the "License") * * Copyright (c) 2014-2020 Megvii Inc. All rights reserved. * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */ #include "./inplace_matmul_impl.cuh" #include "src/cuda/utils.cuh" #include <iostream> #include <stdio.h> using namespace megdnn; using namespace cuda; namespace { struct BufferFetcherTexture { cudaTextureObject_t tex; __device__ __forceinline__ float get(uint32_t offset) { return tex1Dfetch<float>(tex, offset); } }; struct BufferFetcherRaw { const float *ptr; __device__ __forceinline__ float get(uint32_t offset) { return ptr[offset]; } }; struct BufferFetcherTextureHost { bool init_succ; BufferFetcherTexture val; BufferFetcherTextureHost(float *p, const size_t n); ~BufferFetcherTextureHost() { reset(); } void reset() { if (init_succ) { cuda_check(cudaDestroyTextureObject(val.tex)); init_succ = false; } } }; BufferFetcherTextureHost::BufferFetcherTextureHost(float *p, const size_t n) { init_succ = false; cudaTextureObject_t tex_obj; cudaResourceDesc res_desc; memset(&res_desc, 0, sizeof(cudaResourceDesc)); res_desc.resType = cudaResourceTypeLinear; res_desc.res.linear.devPtr = static_cast<void *>(p); res_desc.res.linear.sizeInBytes = n*sizeof(float); res_desc.res.linear.desc = cudaCreateChannelDesc(32, 0, 0, 0, cudaChannelFormatKindFloat); cudaTextureDesc tex_desc; memset(&tex_desc, 0, sizeof(cudaTextureDesc)); if (cudaCreateTextureObject(&tex_obj, &res_desc, &tex_desc, NULL) == cudaSuccess) { val.tex = tex_obj; init_succ = true; } else { cudaGetLastError(); // reset error } } template<class BufferFetcher> struct KernelPtr { typedef void(*type)(BufferFetcher, BufferFetcher, float*, uint32_t, uint32_t, uint32_t, uint32_t, uint32_t, uint32_t, uint32_t, uint32_t, uint32_t, uint32_t, uint32_t, uint32_t, uint32_t, uint32_t, uint32_t, uint32_t, uint32_t, uint32_t, uint32_t, uint32_t, uint32_t, uint32_t); }; //! 1 -> 0xffffffff, 0 -> 0x00000000 __device__ __forceinline__ uint32_t bool_as_mask(uint32_t cond) { return (!cond) - 1u; } union FloatAndU32 { float f; uint32_t u; }; //! \p mask must be either all 1 or 0 bits template<class BufferFetcher> __device__ __forceinline__ float visit_with_mask( BufferFetcher buf, uint32_t offset, uint32_t mask) { FloatAndU32 f; f.f = buf.get(offset & mask); f.u &= mask; return f.f; } template <uint32_t BY, uint32_t BX, bool is_xcorr, class BufferFetcher> __global__ void conv_kernel(BufferFetcher src, BufferFetcher filter, float *dst, const uint32_t INP_BS, const uint32_t OUT_BS, const uint32_t IC, const uint32_t ID, const uint32_t IH, const uint32_t IW, const uint32_t OC, const uint32_t OD, const uint32_t OH, const uint32_t OW, const uint32_t FD, const uint32_t FH, const uint32_t FW, const uint32_t SD, const uint32_t SH, const uint32_t SW, const uint32_t PD, const uint32_t PH, const uint32_t PW, const uint32_t DD, const uint32_t DH, const uint32_t DW) { const uint32_t BM = BY < BX ? BY : BX; // BY*BX == 256 // (OC) * (IC*FD*FH*FW) * (OD*OH*OW) const uint32_t n = blockIdx.z; const uint32_t tidx = threadIdx.x; const uint32_t tidy = threadIdx.y; const uint32_t posx = blockIdx.x * blockDim.x + threadIdx.x; const uint32_t posy = blockIdx.y * blockDim.y + threadIdx.y; const uint32_t posx2 = posx<<2; const uint32_t posy2 = posy<<2; const uint32_t heightA = OC; const uint32_t widthA = IC*FD*FH*FW; const uint32_t heightB = widthA; const uint32_t widthB = OD*OH*OW; const uint32_t od0 = (posx2+0) / OW / OH * SD; const uint32_t oh0 = (posx2+0) / OW % OH * SH; const uint32_t ow0 = (posx2+0) % OW * SW; const uint32_t op0 = od0 * IH * IW + oh0 * IW + ow0; const uint32_t od1 = (posx2+1) / OW / OH * SD; const uint32_t oh1 = (posx2+1) / OW % OH * SH; const uint32_t ow1 = (posx2+1) % OW * SW; const uint32_t op1 = od1 * IH * IW + oh1 * IW + ow1; const uint32_t od2 = (posx2+2) / OW / OH * SD; const uint32_t oh2 = (posx2+2) / OW % OH * SH; const uint32_t ow2 = (posx2+2) % OW * SW; const uint32_t op2 = od2 * IH * IW + oh2 * IW + ow2; const uint32_t od3 = (posx2+3) / OW / OH * SD; const uint32_t oh3 = (posx2+3) / OW % OH * SH; const uint32_t ow3 = (posx2+3) % OW * SW; const uint32_t op3 = od3 * IH * IW + oh3 * IW + ow3; const uint32_t FP = FD*FH*FW; // OC % (BLOCK*4) == 0 // IC*FD*FH*FW % BLOCK == 0 // OD*OH*OW % (BLOCK*4) == 0 __shared__ float4 localA[BY][BM]; __shared__ float4 localB[BM][BX]; uint32_t i = 0u; uint32_t offsetA = posy2 * widthA + tidx; uint32_t offsetB = n*INP_BS - PD*IH*IW - PH*IW - PW; float4 sum0 = {0.0f, 0.0f, 0.0f, 0.0f}, sum1 = {0.0f, 0.0f, 0.0f, 0.0f}, sum2 = {0.0f, 0.0f, 0.0f, 0.0f}, sum3 = {0.0f, 0.0f, 0.0f, 0.0f}; uint32_t fd = tidy / FW / FH % FD; uint32_t fh = tidy / FW % FH; uint32_t fw = tidy % FW; uint32_t ic = tidy / (FD*FH*FW); uint32_t icm = tidy % (FD*FH*FW); const uint32_t fds = BM / FW / FH % FD; const uint32_t fhs = BM / FW % FH; const uint32_t fws = BM % FW; const uint32_t ics = BM / (FD*FH*FW); const uint32_t icms = BM % (FD*FH*FW); for (; i < widthA; i += BM, offsetA += BM) { // load localA if (tidx < BM) { localA[tidy][tidx].x = filter.get(offsetA + 0*widthA); localA[tidy][tidx].y = filter.get(offsetA + 1*widthA); localA[tidy][tidx].z = filter.get(offsetA + 2*widthA); localA[tidy][tidx].w = filter.get(offsetA + 3*widthA); } // load localB uint32_t fd2, fh2, fw2; if (is_xcorr) { fd2 = fd; fh2 = fh; fw2 = fw; } else { fd2 = FD-fd-1; fh2 = FH-fh-1; fw2 = FW-fw-1; } if (tidy < BM) { uint32_t fd2d = fd2 * DD, fh2d = fh2 * DH, fw2d = fw2 * DW; uint32_t tmp = offsetB+ic*ID*IH*IW+fd2d*IH*IW+fh2d*IW+fw2d, ok = bool_as_mask(tidy+i < heightB), p0 = bool_as_mask( fd2d+od0 >= PD && fd2d+od0 < ID+PD && fh2d+oh0 >= PH && fh2d+oh0 < IH+PH && fw2d+ow0 >= PW && fw2d+ow0 < IW+PW), p1 = bool_as_mask( fd2d+od1 >= PD && fd2d+od1 < ID+PD && fh2d+oh1 >= PH && fh2d+oh1 < IH+PH && fw2d+ow1 >= PW && fw2d+ow1 < IW+PW), p2 = bool_as_mask( fd2d+od2 >= PD && fd2d+od2 < ID+PD && fh2d+oh2 >= PH && fh2d+oh2 < IH+PH && fw2d+ow2 >= PW && fw2d+ow2 < IW+PW), p3 = bool_as_mask( fd2d+od3 >= PD && fd2d+od3 < ID+PD && fh2d+oh3 >= PH && fh2d+oh3 < IH+PH && fw2d+ow3 >= PW && fw2d+ow3 < IW+PW); localB[tidy][tidx].x = visit_with_mask(src, tmp+op0, ok & p0); localB[tidy][tidx].y = visit_with_mask(src, tmp+op1, ok & p1); localB[tidy][tidx].z = visit_with_mask(src, tmp+op2, ok & p2); localB[tidy][tidx].w = visit_with_mask(src, tmp+op3, ok & p3); } __syncthreads(); // die without this sync().. for (uint32_t j = 0u; j < BM; ++j) { float4 tmpA = localA[tidy][j]; float4 tmpB = localB[j][tidx]; sum0.x += tmpA.x * tmpB.x; sum0.y += tmpA.x * tmpB.y; sum0.z += tmpA.x * tmpB.z; sum0.w += tmpA.x * tmpB.w; sum1.x += tmpA.y * tmpB.x; sum1.y += tmpA.y * tmpB.y; sum1.z += tmpA.y * tmpB.z; sum1.w += tmpA.y * tmpB.w; sum2.x += tmpA.z * tmpB.x; sum2.y += tmpA.z * tmpB.y; sum2.z += tmpA.z * tmpB.z; sum2.w += tmpA.z * tmpB.w; sum3.x += tmpA.w * tmpB.x; sum3.y += tmpA.w * tmpB.y; sum3.z += tmpA.w * tmpB.z; sum3.w += tmpA.w * tmpB.w; } fd += fds; fw += fws; fh += fhs; fh += (fw >= FW); fw -= (fw >= FW) * FW; fd += (fh >= FH); fh -= (fh >= FH) * FH; fd -= (fd >= FD) * FD; ic += ics; icm += icms; ic += (icm >= FP); icm -= (icm >= FP) * FP; __syncthreads(); } const uint32_t dst_idx = n*OUT_BS + posy2*widthB + posx2; bool y0 = (posy2+0 < heightA); bool y1 = (posy2+1 < heightA); bool y2 = (posy2+2 < heightA); bool y3 = (posy2+3 < heightA); bool x0 = (posx2+0 < widthB); bool x1 = (posx2+1 < widthB); bool x2 = (posx2+2 < widthB); bool x3 = (posx2+3 < widthB); if (y0) { if (x0) dst[dst_idx + 0*widthB + 0] = sum0.x; if (x1) dst[dst_idx + 0*widthB + 1] = sum0.y; if (x2) dst[dst_idx + 0*widthB + 2] = sum0.z; if (x3) dst[dst_idx + 0*widthB + 3] = sum0.w; } if (y1) { if (x0) dst[dst_idx + 1*widthB + 0] = sum1.x; if (x1) dst[dst_idx + 1*widthB + 1] = sum1.y; if (x2) dst[dst_idx + 1*widthB + 2] = sum1.z; if (x3) dst[dst_idx + 1*widthB + 3] = sum1.w; } if (y2) { if (x0) dst[dst_idx + 2*widthB + 0] = sum2.x; if (x1) dst[dst_idx + 2*widthB + 1] = sum2.y; if (x2) dst[dst_idx + 2*widthB + 2] = sum2.z; if (x3) dst[dst_idx + 2*widthB + 3] = sum2.w; } if (y3) { if (x0) dst[dst_idx + 3*widthB + 0] = sum3.x; if (x1) dst[dst_idx + 3*widthB + 1] = sum3.y; if (x2) dst[dst_idx + 3*widthB + 2] = sum3.z; if (x3) dst[dst_idx + 3*widthB + 3] = sum3.w; } } } // anonymous namespace void convolution3d::exec_inplace_matmul_fwd( const float *src, const float *filter, float *dst, size_t N, size_t INP_BS, size_t OUT_BS, size_t IC, size_t ID, size_t IH, size_t IW, size_t OC, size_t OD, size_t OH, size_t OW, size_t FD, size_t FH, size_t FW, size_t PD, size_t PH, size_t PW, size_t SD, size_t SH, size_t SW, size_t DD, size_t DH, size_t DW, bool is_xcorr, cudaStream_t stream) { BufferFetcherTextureHost src_tex(const_cast<float *>(src), N * INP_BS), filter_tex(const_cast<float *>(filter), OC*IC*FD*FH*FW); BufferFetcherRaw src_buf, filter_buf; src_buf.ptr = src; filter_buf.ptr = filter; if (!src_tex.init_succ || !filter_tex.init_succ) { src_tex.reset(); filter_tex.reset(); } int m = OC; int n = OD*OH*OW; int BY = 1; int BX = 1; if (m <= 64) { while (BY < 16 && (BY<<2) < m) BY <<= 1; BX = 256 / BY; } else if (n <= 64) { while (BX < 16 && (BX<<2) < n) BX <<= 1; BY = 256 / BX; } else { BX = BY = 16; } dim3 blocks(DIVUP(OD*OH*OW, 4*BX), DIVUP(OC, 4*BY), N); dim3 threads(BX, BY); #define DISPATCH_BX_BY(BX, BY) do { \ if (src_tex.init_succ) { \ KernelPtr<BufferFetcherTexture>::type kptr; \ if (is_xcorr) { \ kptr = conv_kernel<BY, BX, true, BufferFetcherTexture>; \ } else { \ kptr = conv_kernel<BY, BX, false, BufferFetcherTexture>; \ } \ kptr<<<blocks, threads, 0, stream>>>( \ src_tex.val, filter_tex.val, dst, \ INP_BS, OUT_BS, \ IC, ID, IH, IW, \ OC, OD, OH, OW, \ FD, FH, FW, \ SD, SH, SW, \ PD, PH, PW, \ DD, DH, DW); \ } else { \ KernelPtr<BufferFetcherRaw>::type kptr; \ if (is_xcorr) { \ kptr = conv_kernel<BY, BX, true, BufferFetcherRaw>; \ } else { \ kptr = conv_kernel<BY, BX, false, BufferFetcherRaw>; \ } \ kptr<<<blocks, threads, 0, stream>>>( \ src_buf, filter_buf, dst, \ INP_BS, OUT_BS, \ IC, ID, IH, IW, \ OC, OD, OH, OW, \ FD, FH, FW, \ SD, SH, SW, \ PD, PH, PW, \ DD, DH, DW); \ } \ } while (0) #define DISPATCH_BX(BX) do { \ DISPATCH_BX_BY(BX, 256/BX); \ } while (0) #define DISPATCH() do { \ switch (BX) { \ case 1: DISPATCH_BX(1); break; \ case 2: DISPATCH_BX(2); break; \ case 4: DISPATCH_BX(4); break; \ case 8: DISPATCH_BX(8); break; \ case 16: DISPATCH_BX(16); break; \ case 32: DISPATCH_BX(32); break; \ case 64: DISPATCH_BX(64); break; \ case 128: DISPATCH_BX(128); break; \ case 256: DISPATCH_BX(256); break; \ default: \ report_error("no usable kernel"); \ } \ } while (0) DISPATCH(); #undef DISPATCH #undef DISPATCH_BX #undef DISPATCH_BX_BY after_kernel_launch(); } // vim: syntax=cpp.doxygen
8f15b46f354e80d2ad1a9a39cd5219d8525799f4.hip
// !!! This is a file automatically generated by hipify!!! #include <stdlib.h> #include <memory.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <hiprand/hiprand_kernel.h> #include "cuqueue.cuh" #include "utils.h" #define RAND_FACTOR 1e9+7 #define THREAD (16 * 1024) // get thread index __device__ int getIndex(){ int col = blockDim.x * blockIdx.x + threadIdx.x; int row = blockDim.y * blockIdx.y + threadIdx.y; int line = blockDim.x * gridDim.x; return row * line + col; } // find next unused node __device__ LL findVertex(LL* nodeSet, LL totalNodes){ int nextNode = atomicAdd(&nodeSet[0], 1LL); if(nextNode >= totalNodes) return 0; return nextNode; } // judge if node 'nd' is visited once __device__ bool nd_isVisited(bool* vis, LL nd, LL tot, int index){ return vis[index * tot + nd - 1]; } // set node 'nd' visited __device__ void nd_setVisited(bool* vis, LL nd, LL tot, int index){ vis[index * tot + nd - 1] = true; } __device__ void nd_resetVisited(bool* vis, LL tot, int index){ LL i = index * tot, end = index * tot + tot; for(;i < end;i++) vis[i] = false; } // initialize random seeds for each thread __global__ void setupRandGenerator(float* randSeed, hiprandState_t* state){ int index = getIndex(); unsigned long seed = (unsigned long)(randSeed[index] * RAND_FACTOR); hiprand_init(seed, index, 0, &state[index]); } // BFS kernel function in each thread __global__ void bfs(LL totalNodes, LL* adjCount, LL* adjList, LL* nodeSet, LL* queue, bool* closed, hiprandState_t* state, float constProb, bool thread){ int index = getIndex(); LL nodeCount = 0, node = index + 1; int que_h, que_t; LL adjNode, prevNode; LL nodeSum = 0; float randProb; hiprandState_t localState = state[index]; int start, stop; start = clock(); while((node = findVertex(nodeSet, totalNodes)) > 0){ nodeCount = 0; prevNode = node; que_init(que_h, que_t, index); nd_resetVisited(closed, totalNodes, index); if(!que_enque(queue, que_h, que_t, prevNode, index)); // overflow nd_setVisited(closed, prevNode, totalNodes, index); while(!que_isEmpty(que_h, que_t, index)){ node = que_deque(queue, que_h, que_t, index); adjNode = adjCount[node - 1]; while(adjNode < adjCount[node]){ if(!nd_isVisited(closed, adjList[adjNode], totalNodes, index)){ randProb = hiprand_uniform(&localState); if(randProb < constProb){ if(!que_enque(queue, que_h, que_t, adjList[adjNode], index)); nd_setVisited(closed, adjList[adjNode], totalNodes, index); nodeCount++; } } adjNode++; } } if(atomicCAS(nodeSet + prevNode, 0, nodeCount) != 0); nodeSum += nodeCount; } stop = clock(); if(thread) printf("%d %lld %f\n", index, nodeSum, 1.f*(stop-start)/CLOCKS_PER_SEC); state[index] = localState; } float CONSTANT_PROBABILITY = 0.05; int main(int argc, char** argv){ // argument parsing char ch, filePath[256]; bool thread = false; while((ch = getopt_long(argc, argv, short_options, long_options, NULL)) != -1){ switch(ch){ case 'f': strncpy(filePath, optarg, 256); break; case 'p': CONSTANT_PROBABILITY = atof(optarg); CONSTANT_PROBABILITY = CONSTANT_PROBABILITY > 1 ? 1. : CONSTANT_PROBABILITY; break; case 't': thread = true; break; } } // read graph from file LL totalNodes = 0, totalEdges = 0; LL* h_adjCount = NULL, *h_adjList = NULL; readGraph(filePath, h_adjList, h_adjCount, totalNodes, totalEdges); if(!thread){ printf("========= NEW RUN\n"); printf("This graph contains %lld nodes connected by %lld edges\n", totalNodes, totalEdges); printf("Set constant probability: %.2f\n", CONSTANT_PROBABILITY); printf("Running on %d threads\n", THREAD); } // addresses for GPU memory addresses storage bool* d_closed; LL* d_queue, *d_nodeSet; LL* d_adjList, *d_adjCount; float* d_randSeed; float gpu_runtime; hiprandState_t* d_randState; hipEvent_t start, stop; // define GPU thread layout dim3 gridSize(4,4), blockSize(32,32); // generate random numbers for each thread as random seeds hiprandGenerator_t curandGenerator; hipMalloc((void**)&d_randSeed, sizeof(float) * THREAD); hipMalloc((void**)&d_randState, sizeof(hiprandState_t) * THREAD); hiprandCreateGenerator(&curandGenerator, HIPRAND_RNG_PSEUDO_DEFAULT); hiprandSetPseudoRandomGeneratorSeed(curandGenerator, time(NULL)); hiprandGenerateUniform(curandGenerator, d_randSeed, THREAD); hipLaunchKernelGGL(( setupRandGenerator), dim3(gridSize),dim3(blockSize), 0, 0, d_randSeed, d_randState); // cuda memory allocation and initialization hipMalloc((void**)&d_closed, sizeof(bool) * THREAD * totalNodes); hipMalloc((void**)&d_queue, sizeof(LL) * THREAD * QUE_LEN); // compress? hipMalloc((void**)&d_nodeSet, sizeof(LL) * (totalNodes + 1)); hipMalloc((void**)&d_adjList, sizeof(LL) * totalEdges); hipMalloc((void**)&d_adjCount, sizeof(LL) * (totalNodes + 1)); // sum of edges before current node hipMemset(d_nodeSet, 0LL, sizeof(LL) * (totalNodes + 1)); hipMemset(d_closed, false, sizeof(bool) * THREAD * totalNodes); hipMemcpy(d_adjList, h_adjList, sizeof(LL) * totalEdges, hipMemcpyHostToDevice); hipMemcpy(d_adjCount, h_adjCount, sizeof(LL) * (totalNodes + 1), hipMemcpyHostToDevice); // elapsed time record hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); // launch the kernel hipLaunchKernelGGL(( bfs), dim3(gridSize),dim3(blockSize), 0, 0, totalNodes, d_adjCount, d_adjList, d_nodeSet, d_queue, d_closed, d_randState, CONSTANT_PROBABILITY, thread); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&gpu_runtime, start, stop); // statistics if(!thread){ // LL* h_nodeSet = new LL[totalNodes + 1]; // hipMemcpy(h_nodeSet, // d_nodeSet, // sizeof(LL) * (totalNodes + 1), // hipMemcpyDeviceToHost); // for(LL i = 1;i <= totalNodes;i++) // if(h_nodeSet[i] > 0) // printf("Node %lld influence %lld other nodes\n", i, h_nodeSet[i]); // delete[] h_nodeSet; printf("========= GPU ELAPSED TIME: %f ms\n\n", gpu_runtime); } if(thread) printf("%f\n", gpu_runtime); // cuda memory free hipEventDestroy(start); hipEventDestroy(stop); hipFree(d_randSeed); hipFree(d_randState); hipFree(d_closed); hipFree(d_queue); hipFree(d_nodeSet); hipFree(d_adjList); hipFree(d_adjCount); delete[] h_adjList; delete[] h_adjCount; return 0; }
8f15b46f354e80d2ad1a9a39cd5219d8525799f4.cu
#include <stdlib.h> #include <memory.h> #include <cuda.h> #include <cuda_runtime.h> #include <curand_kernel.h> #include "cuqueue.cuh" #include "utils.h" #define RAND_FACTOR 1e9+7 #define THREAD (16 * 1024) // get thread index __device__ int getIndex(){ int col = blockDim.x * blockIdx.x + threadIdx.x; int row = blockDim.y * blockIdx.y + threadIdx.y; int line = blockDim.x * gridDim.x; return row * line + col; } // find next unused node __device__ LL findVertex(LL* nodeSet, LL totalNodes){ int nextNode = atomicAdd(&nodeSet[0], 1LL); if(nextNode >= totalNodes) return 0; return nextNode; } // judge if node 'nd' is visited once __device__ bool nd_isVisited(bool* vis, LL nd, LL tot, int index){ return vis[index * tot + nd - 1]; } // set node 'nd' visited __device__ void nd_setVisited(bool* vis, LL nd, LL tot, int index){ vis[index * tot + nd - 1] = true; } __device__ void nd_resetVisited(bool* vis, LL tot, int index){ LL i = index * tot, end = index * tot + tot; for(;i < end;i++) vis[i] = false; } // initialize random seeds for each thread __global__ void setupRandGenerator(float* randSeed, curandState* state){ int index = getIndex(); unsigned long seed = (unsigned long)(randSeed[index] * RAND_FACTOR); curand_init(seed, index, 0, &state[index]); } // BFS kernel function in each thread __global__ void bfs(LL totalNodes, LL* adjCount, LL* adjList, LL* nodeSet, LL* queue, bool* closed, curandState* state, float constProb, bool thread){ int index = getIndex(); LL nodeCount = 0, node = index + 1; int que_h, que_t; LL adjNode, prevNode; LL nodeSum = 0; float randProb; curandState localState = state[index]; int start, stop; start = clock(); while((node = findVertex(nodeSet, totalNodes)) > 0){ nodeCount = 0; prevNode = node; que_init(que_h, que_t, index); nd_resetVisited(closed, totalNodes, index); if(!que_enque(queue, que_h, que_t, prevNode, index)); // overflow nd_setVisited(closed, prevNode, totalNodes, index); while(!que_isEmpty(que_h, que_t, index)){ node = que_deque(queue, que_h, que_t, index); adjNode = adjCount[node - 1]; while(adjNode < adjCount[node]){ if(!nd_isVisited(closed, adjList[adjNode], totalNodes, index)){ randProb = curand_uniform(&localState); if(randProb < constProb){ if(!que_enque(queue, que_h, que_t, adjList[adjNode], index)); nd_setVisited(closed, adjList[adjNode], totalNodes, index); nodeCount++; } } adjNode++; } } if(atomicCAS(nodeSet + prevNode, 0, nodeCount) != 0); nodeSum += nodeCount; } stop = clock(); if(thread) printf("%d %lld %f\n", index, nodeSum, 1.f*(stop-start)/CLOCKS_PER_SEC); state[index] = localState; } float CONSTANT_PROBABILITY = 0.05; int main(int argc, char** argv){ // argument parsing char ch, filePath[256]; bool thread = false; while((ch = getopt_long(argc, argv, short_options, long_options, NULL)) != -1){ switch(ch){ case 'f': strncpy(filePath, optarg, 256); break; case 'p': CONSTANT_PROBABILITY = atof(optarg); CONSTANT_PROBABILITY = CONSTANT_PROBABILITY > 1 ? 1. : CONSTANT_PROBABILITY; break; case 't': thread = true; break; } } // read graph from file LL totalNodes = 0, totalEdges = 0; LL* h_adjCount = NULL, *h_adjList = NULL; readGraph(filePath, h_adjList, h_adjCount, totalNodes, totalEdges); if(!thread){ printf("========= NEW RUN\n"); printf("This graph contains %lld nodes connected by %lld edges\n", totalNodes, totalEdges); printf("Set constant probability: %.2f\n", CONSTANT_PROBABILITY); printf("Running on %d threads\n", THREAD); } // addresses for GPU memory addresses storage bool* d_closed; LL* d_queue, *d_nodeSet; LL* d_adjList, *d_adjCount; float* d_randSeed; float gpu_runtime; curandState* d_randState; cudaEvent_t start, stop; // define GPU thread layout dim3 gridSize(4,4), blockSize(32,32); // generate random numbers for each thread as random seeds curandGenerator_t curandGenerator; cudaMalloc((void**)&d_randSeed, sizeof(float) * THREAD); cudaMalloc((void**)&d_randState, sizeof(curandState) * THREAD); curandCreateGenerator(&curandGenerator, CURAND_RNG_PSEUDO_DEFAULT); curandSetPseudoRandomGeneratorSeed(curandGenerator, time(NULL)); curandGenerateUniform(curandGenerator, d_randSeed, THREAD); setupRandGenerator<<<gridSize,blockSize>>>(d_randSeed, d_randState); // cuda memory allocation and initialization cudaMalloc((void**)&d_closed, sizeof(bool) * THREAD * totalNodes); cudaMalloc((void**)&d_queue, sizeof(LL) * THREAD * QUE_LEN); // compress? cudaMalloc((void**)&d_nodeSet, sizeof(LL) * (totalNodes + 1)); cudaMalloc((void**)&d_adjList, sizeof(LL) * totalEdges); cudaMalloc((void**)&d_adjCount, sizeof(LL) * (totalNodes + 1)); // sum of edges before current node cudaMemset(d_nodeSet, 0LL, sizeof(LL) * (totalNodes + 1)); cudaMemset(d_closed, false, sizeof(bool) * THREAD * totalNodes); cudaMemcpy(d_adjList, h_adjList, sizeof(LL) * totalEdges, cudaMemcpyHostToDevice); cudaMemcpy(d_adjCount, h_adjCount, sizeof(LL) * (totalNodes + 1), cudaMemcpyHostToDevice); // elapsed time record cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); // launch the kernel bfs<<<gridSize,blockSize>>>(totalNodes, d_adjCount, d_adjList, d_nodeSet, d_queue, d_closed, d_randState, CONSTANT_PROBABILITY, thread); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&gpu_runtime, start, stop); // statistics if(!thread){ // LL* h_nodeSet = new LL[totalNodes + 1]; // cudaMemcpy(h_nodeSet, // d_nodeSet, // sizeof(LL) * (totalNodes + 1), // cudaMemcpyDeviceToHost); // for(LL i = 1;i <= totalNodes;i++) // if(h_nodeSet[i] > 0) // printf("Node %lld influence %lld other nodes\n", i, h_nodeSet[i]); // delete[] h_nodeSet; printf("========= GPU ELAPSED TIME: %f ms\n\n", gpu_runtime); } if(thread) printf("%f\n", gpu_runtime); // cuda memory free cudaEventDestroy(start); cudaEventDestroy(stop); cudaFree(d_randSeed); cudaFree(d_randState); cudaFree(d_closed); cudaFree(d_queue); cudaFree(d_nodeSet); cudaFree(d_adjList); cudaFree(d_adjCount); delete[] h_adjList; delete[] h_adjCount; return 0; }
db670c1a7dcf564a1120a90bbb09b06ceb5614f4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdint.h> #include <stdio.h> #include <stdlib.h> #include "profiler.h" #define KiB 1024 #define MiB (KiB*KiB) #define GiB (KiB*KiB*KiB) #define GHz (1000L*1000L*1000L) #define NOUTER_ITERS 1L #define NINNER_ITERS 50L #define CACHE_LINE_LENGTH 128L #define STRIDE_START 5L #define STRIDE_END 5L #define ALLOCATION_START (512L) #define ALLOCATION_END (512L*MiB) #define MEM_LD_LATENCY //#define INST_LATENCY __global__ void lat(const size_t ncache_lines, char* P, char* dummy, long long int* cycles) { const size_t gid = blockDim.x*blockIdx.x+threadIdx.x; if(gid > 0) { return; } #if defined(MEM_LD_LATENCY) char** p0 = (char**)P; // Warmup for(size_t n = 0; n < ncache_lines; ++n) { p0 = (char**)*p0; } long long int t0 = clock64(); char** p1 = (char**)P; #pragma unroll 64 for(size_t n = 0; n < ncache_lines*NINNER_ITERS; ++n) { p1 = (char**)*p1; } *dummy = *(char*)p0 + *(char*)p1; #elif defined(INST_LATENCY) long long int t0 = clock64(); float a = 0.9999f; for(int n = 0; n < NINNER_ITERS; ++n) { #if 0 a += 0.99991f; a *= 0.9991f; a += a * 0.999991f; a = sqrtf(a); a /= 0.999991f; #endif // if 0 } #if 0 printf("%.7f\n", a); #endif // if 0 *dummy = (char)a; #endif *cycles += clock64()-t0; } __global__ void make_ring(const size_t ncache_lines, const size_t as, const size_t st, char* P) { const size_t gid = blockDim.x*blockIdx.x+threadIdx.x; if(gid > 0) { return; } // Create a ring of pointers at the cache line granularity for(size_t i = 0; i < ncache_lines; ++i) { *(char**)&P[(i*CACHE_LINE_LENGTH)] = &P[((i+st)*CACHE_LINE_LENGTH)%as]; } } int main() { struct Profile profile; struct ProfileEntry* pe = &profile.profiler_entries[0]; pe->time = 0.0; // Initialise char* P; char* dummy; hipMalloc((void**)&P, ALLOCATION_END); hipMalloc((void**)&dummy, 1); printf("Allocating %lu MiB\n", ALLOCATION_END/MiB); // Open files FILE* nfp = fopen("/dev/null", "a"); FILE* fp = fopen("lat.csv", "a"); long long int* d_cycles; long long int* d_cycles_dummy; hipMalloc(&d_cycles, sizeof(long long int)); hipMalloc(&d_cycles_dummy, sizeof(long long int)); for(size_t st = STRIDE_START; st <= STRIDE_END; ++st) { for(size_t as = ALLOCATION_START; as <= ALLOCATION_END; as *= 2L) { const size_t ncache_lines = as/CACHE_LINE_LENGTH; #if defined(MEM_LD_LATENCY) hipLaunchKernelGGL(( make_ring), dim3(1),dim3(1), 0, 0, ncache_lines, as, st, P); #endif // Zero the cycles long long int h_cycles = 0; hipMemcpy(d_cycles, &h_cycles, sizeof(long long int), hipMemcpyHostToDevice); // Perform the test START_PROFILING(&profile); for(size_t i = 0; i < NOUTER_ITERS; ++i) { hipLaunchKernelGGL(( lat), dim3(1),dim3(1), 0, 0, ncache_lines, P, dummy, d_cycles); } hipDeviceSynchronize(); STOP_PROFILING(&profile, "p"); // Bring the cycle count back from the device hipMemcpy(&h_cycles, d_cycles, sizeof(long long int), hipMemcpyDeviceToHost); printf("Elapsed Clock Cycles %lu\n", h_cycles); #if defined(MEM_LD_LATENCY) double loads = (double)NOUTER_ITERS*ncache_lines*NINNER_ITERS; double cycles_load = ((double)h_cycles/loads); printf("Array Size %.3fMB Stride %d Cache Lines %d Time %.12fs\n", (double)as/MiB, st, ncache_lines, pe->time); double loads_s = loads / pe->time; double cycles_s = 1.48*GHz; double cycles_load2 = (double)(cycles_s / loads_s); printf("Loads = %lu\n", loads); printf("Cycles / Load = %.4f\n", cycles_load); //printf("backup = %.4f\n", cycles_load2); fprintf(fp, "%d,%lu,%.4f\n", st, as, cycles_load); #elif defined(INST_LATENCY) size_t ops = NOUTER_ITERS*NINNER_ITERS; printf("Ops %lu\n", ops); printf("Cycles / Op %.4f\n", h_cycles/(double)ops); #endif h_cycles = 0; hipMemcpy(d_cycles, &h_cycles, sizeof(long long int), hipMemcpyHostToDevice); pe->time = 0.0; } } fclose(nfp); fclose(fp); hipFree(P); return 0; }
db670c1a7dcf564a1120a90bbb09b06ceb5614f4.cu
#include <stdint.h> #include <stdio.h> #include <stdlib.h> #include "profiler.h" #define KiB 1024 #define MiB (KiB*KiB) #define GiB (KiB*KiB*KiB) #define GHz (1000L*1000L*1000L) #define NOUTER_ITERS 1L #define NINNER_ITERS 50L #define CACHE_LINE_LENGTH 128L #define STRIDE_START 5L #define STRIDE_END 5L #define ALLOCATION_START (512L) #define ALLOCATION_END (512L*MiB) #define MEM_LD_LATENCY //#define INST_LATENCY __global__ void lat(const size_t ncache_lines, char* P, char* dummy, long long int* cycles) { const size_t gid = blockDim.x*blockIdx.x+threadIdx.x; if(gid > 0) { return; } #if defined(MEM_LD_LATENCY) char** p0 = (char**)P; // Warmup for(size_t n = 0; n < ncache_lines; ++n) { p0 = (char**)*p0; } long long int t0 = clock64(); char** p1 = (char**)P; #pragma unroll 64 for(size_t n = 0; n < ncache_lines*NINNER_ITERS; ++n) { p1 = (char**)*p1; } *dummy = *(char*)p0 + *(char*)p1; #elif defined(INST_LATENCY) long long int t0 = clock64(); float a = 0.9999f; for(int n = 0; n < NINNER_ITERS; ++n) { #if 0 a += 0.99991f; a *= 0.9991f; a += a * 0.999991f; a = sqrtf(a); a /= 0.999991f; #endif // if 0 } #if 0 printf("%.7f\n", a); #endif // if 0 *dummy = (char)a; #endif *cycles += clock64()-t0; } __global__ void make_ring(const size_t ncache_lines, const size_t as, const size_t st, char* P) { const size_t gid = blockDim.x*blockIdx.x+threadIdx.x; if(gid > 0) { return; } // Create a ring of pointers at the cache line granularity for(size_t i = 0; i < ncache_lines; ++i) { *(char**)&P[(i*CACHE_LINE_LENGTH)] = &P[((i+st)*CACHE_LINE_LENGTH)%as]; } } int main() { struct Profile profile; struct ProfileEntry* pe = &profile.profiler_entries[0]; pe->time = 0.0; // Initialise char* P; char* dummy; cudaMalloc((void**)&P, ALLOCATION_END); cudaMalloc((void**)&dummy, 1); printf("Allocating %lu MiB\n", ALLOCATION_END/MiB); // Open files FILE* nfp = fopen("/dev/null", "a"); FILE* fp = fopen("lat.csv", "a"); long long int* d_cycles; long long int* d_cycles_dummy; cudaMalloc(&d_cycles, sizeof(long long int)); cudaMalloc(&d_cycles_dummy, sizeof(long long int)); for(size_t st = STRIDE_START; st <= STRIDE_END; ++st) { for(size_t as = ALLOCATION_START; as <= ALLOCATION_END; as *= 2L) { const size_t ncache_lines = as/CACHE_LINE_LENGTH; #if defined(MEM_LD_LATENCY) make_ring<<<1,1>>>(ncache_lines, as, st, P); #endif // Zero the cycles long long int h_cycles = 0; cudaMemcpy(d_cycles, &h_cycles, sizeof(long long int), cudaMemcpyHostToDevice); // Perform the test START_PROFILING(&profile); for(size_t i = 0; i < NOUTER_ITERS; ++i) { lat<<<1,1>>>(ncache_lines, P, dummy, d_cycles); } cudaDeviceSynchronize(); STOP_PROFILING(&profile, "p"); // Bring the cycle count back from the device cudaMemcpy(&h_cycles, d_cycles, sizeof(long long int), cudaMemcpyDeviceToHost); printf("Elapsed Clock Cycles %lu\n", h_cycles); #if defined(MEM_LD_LATENCY) double loads = (double)NOUTER_ITERS*ncache_lines*NINNER_ITERS; double cycles_load = ((double)h_cycles/loads); printf("Array Size %.3fMB Stride %d Cache Lines %d Time %.12fs\n", (double)as/MiB, st, ncache_lines, pe->time); double loads_s = loads / pe->time; double cycles_s = 1.48*GHz; double cycles_load2 = (double)(cycles_s / loads_s); printf("Loads = %lu\n", loads); printf("Cycles / Load = %.4f\n", cycles_load); //printf("backup = %.4f\n", cycles_load2); fprintf(fp, "%d,%lu,%.4f\n", st, as, cycles_load); #elif defined(INST_LATENCY) size_t ops = NOUTER_ITERS*NINNER_ITERS; printf("Ops %lu\n", ops); printf("Cycles / Op %.4f\n", h_cycles/(double)ops); #endif h_cycles = 0; cudaMemcpy(d_cycles, &h_cycles, sizeof(long long int), cudaMemcpyHostToDevice); pe->time = 0.0; } } fclose(nfp); fclose(fp); cudaFree(P); return 0; }
e4ce5d7b21b502752b3b984f240212162140b1ab.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "hip/hip_fp16.h" #include "src/cuda/conv_bias/chanwise/kern.cuh" #include "src/cuda/conv_bias/chanwise/kern_helper.cuh" #include "src/cuda/conv_bias/chanwise/launch_config.cuh" #include "src/cuda/fp16_help.cuh" using namespace megdnn; using namespace cuda; using namespace conv_bias; using namespace chanwise; #include "src/cuda/conv_bias/chanwise/depthwise_large_filter_algo.cuh" namespace megdnn { namespace cuda { namespace conv_bias { namespace chanwise { // =====================================fwd===================================== #define check template <> void run_fwd_depthwise_large_filter( float* dst, const float* src, const float* flt, const Param& param, hipStream_t stream) { INSTANCE(float, float2, DepthwiseConv2dDirection::DIRECTION_FORWARD) } #if TORCH_HIP_VERSION >= 9000 template <> void run_fwd_depthwise_large_filter( __half* dst, const __half* src, const __half* flt, const Param& param, hipStream_t stream) { INSTANCE(__half, __half2, DepthwiseConv2dDirection::DIRECTION_FORWARD) } #endif } // namespace chanwise } // namespace conv_bias } // namespace cuda } // namespace megdnn // vim: syntax=cuda.doxygen
e4ce5d7b21b502752b3b984f240212162140b1ab.cu
#include "cuda.h" #include "cuda_fp16.h" #include "src/cuda/conv_bias/chanwise/kern.cuh" #include "src/cuda/conv_bias/chanwise/kern_helper.cuh" #include "src/cuda/conv_bias/chanwise/launch_config.cuh" #include "src/cuda/fp16_help.cuh" using namespace megdnn; using namespace cuda; using namespace conv_bias; using namespace chanwise; #include "src/cuda/conv_bias/chanwise/depthwise_large_filter_algo.cuh" namespace megdnn { namespace cuda { namespace conv_bias { namespace chanwise { // =====================================fwd===================================== #define check template <> void run_fwd_depthwise_large_filter( float* dst, const float* src, const float* flt, const Param& param, cudaStream_t stream) { INSTANCE(float, float2, DepthwiseConv2dDirection::DIRECTION_FORWARD) } #if CUDA_VERSION >= 9000 template <> void run_fwd_depthwise_large_filter( __half* dst, const __half* src, const __half* flt, const Param& param, cudaStream_t stream) { INSTANCE(__half, __half2, DepthwiseConv2dDirection::DIRECTION_FORWARD) } #endif } // namespace chanwise } // namespace conv_bias } // namespace cuda } // namespace megdnn // vim: syntax=cuda.doxygen
acf990a58cadee3f7be528111f33f7fb2be69275.hip
// !!! This is a file automatically generated by hipify!!! #include <cstdio> #include <omp.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #ifdef USE_MPI #include <mpi.h> #endif #include "../utils/common.h" static size_t N = 1000; static size_t iter1 = 200; static size_t iter2 = 400; void init(int *p, size_t size) { for (size_t i = 0; i < size; ++i) { p[i] = i; } } void output(int *p, size_t size) { for (size_t i = 0; i < size; ++i) { printf("index %zu: %d\n", i, p[i]); } } int main(int argc, char *argv[]) { #ifdef USE_MPI int numtasks, rank; MPI_Init(&argc, &argv); MPI_Comm_size(MPI_COMM_WORLD, &numtasks); MPI_Comm_rank(MPI_COMM_WORLD, &rank); printf("MPI task %d/%d\n", rank, numtasks); #endif // Init device hipDevice_t device; hipCtx_t context; hipModule_t module; hipFunction_t function; int device_id = 0; if (argc > 1) { device_id = atoi(argv[1]); } cu_init_device(device_id, device, context); cu_load_module_function(module, "vecAdd.cubin", function, "vecAdd"); // Iter bias if (argc > 2) { iter1 = atoi(argv[2]); } if (argc > 3) { iter2 = atoi(argv[3]); } #pragma omp parallel { int l[N], r[N], p[N]; hipDeviceptr_t dl, dr, dp; init(l, N); init(r, N); size_t threads = 256; size_t blocks = (N - 1) / threads + 1; DRIVER_API_CALL(hipCtxSetCurrent(context)); DRIVER_API_CALL(cuMemAlloc(&dl, N * sizeof(int))); DRIVER_API_CALL(cuMemAlloc(&dr, N * sizeof(int))); DRIVER_API_CALL(cuMemAlloc(&dp, N * sizeof(int))); DRIVER_API_CALL(cuMemcpyHtoD(dl, l, N * sizeof(int))); DRIVER_API_CALL(cuMemcpyHtoD(dr, r, N * sizeof(int))); void *args[6] = { &dl, &dr, &dp, &N, &iter1, &iter2 }; GPU_TEST_FOR(DRIVER_API_CALL(hipModuleLaunchKernel(function, blocks, 1, 1, threads, 1, 1, 0, 0, args, 0))); DRIVER_API_CALL(cuMemcpyDtoH(l, dl, N * sizeof(int))); DRIVER_API_CALL(cuMemcpyDtoH(r, dr, N * sizeof(int))); DRIVER_API_CALL(cuMemcpyDtoH(p, dp, N * sizeof(int))); DRIVER_API_CALL(hipFree(dl)); DRIVER_API_CALL(hipFree(dr)); DRIVER_API_CALL(hipFree(dp)); #ifdef OUTPUT #pragma omp critical { printf("Thread %d\n", omp_get_thread_num()); output(p, N); } #endif DRIVER_API_CALL(hipCtxSynchronize()); } DRIVER_API_CALL(hipModuleUnload(module)); DRIVER_API_CALL(hipCtxDestroy(context)); RUNTIME_API_CALL(hipDeviceSynchronize()); #ifdef USE_MPI MPI_Finalize(); #endif return 0; }
acf990a58cadee3f7be528111f33f7fb2be69275.cu
#include <cstdio> #include <omp.h> #include <cuda.h> #include <cuda_runtime.h> #ifdef USE_MPI #include <mpi.h> #endif #include "../utils/common.h" static size_t N = 1000; static size_t iter1 = 200; static size_t iter2 = 400; void init(int *p, size_t size) { for (size_t i = 0; i < size; ++i) { p[i] = i; } } void output(int *p, size_t size) { for (size_t i = 0; i < size; ++i) { printf("index %zu: %d\n", i, p[i]); } } int main(int argc, char *argv[]) { #ifdef USE_MPI int numtasks, rank; MPI_Init(&argc, &argv); MPI_Comm_size(MPI_COMM_WORLD, &numtasks); MPI_Comm_rank(MPI_COMM_WORLD, &rank); printf("MPI task %d/%d\n", rank, numtasks); #endif // Init device CUdevice device; CUcontext context; CUmodule module; CUfunction function; int device_id = 0; if (argc > 1) { device_id = atoi(argv[1]); } cu_init_device(device_id, device, context); cu_load_module_function(module, "vecAdd.cubin", function, "vecAdd"); // Iter bias if (argc > 2) { iter1 = atoi(argv[2]); } if (argc > 3) { iter2 = atoi(argv[3]); } #pragma omp parallel { int l[N], r[N], p[N]; CUdeviceptr dl, dr, dp; init(l, N); init(r, N); size_t threads = 256; size_t blocks = (N - 1) / threads + 1; DRIVER_API_CALL(cuCtxSetCurrent(context)); DRIVER_API_CALL(cuMemAlloc(&dl, N * sizeof(int))); DRIVER_API_CALL(cuMemAlloc(&dr, N * sizeof(int))); DRIVER_API_CALL(cuMemAlloc(&dp, N * sizeof(int))); DRIVER_API_CALL(cuMemcpyHtoD(dl, l, N * sizeof(int))); DRIVER_API_CALL(cuMemcpyHtoD(dr, r, N * sizeof(int))); void *args[6] = { &dl, &dr, &dp, &N, &iter1, &iter2 }; GPU_TEST_FOR(DRIVER_API_CALL(cuLaunchKernel(function, blocks, 1, 1, threads, 1, 1, 0, 0, args, 0))); DRIVER_API_CALL(cuMemcpyDtoH(l, dl, N * sizeof(int))); DRIVER_API_CALL(cuMemcpyDtoH(r, dr, N * sizeof(int))); DRIVER_API_CALL(cuMemcpyDtoH(p, dp, N * sizeof(int))); DRIVER_API_CALL(cuMemFree(dl)); DRIVER_API_CALL(cuMemFree(dr)); DRIVER_API_CALL(cuMemFree(dp)); #ifdef OUTPUT #pragma omp critical { printf("Thread %d\n", omp_get_thread_num()); output(p, N); } #endif DRIVER_API_CALL(cuCtxSynchronize()); } DRIVER_API_CALL(cuModuleUnload(module)); DRIVER_API_CALL(cuCtxDestroy(context)); RUNTIME_API_CALL(cudaDeviceSynchronize()); #ifdef USE_MPI MPI_Finalize(); #endif return 0; }
b7be55041f3bc362d3c286e3ee45841e16a4947c.hip
// !!! This is a file automatically generated by hipify!!! /*! * Copyright 2017 H2O.ai, Inc. * License Apache License Version 2.0 (see LICENSE for details) */ #include "matrix/matrix.h" #include "matrix/matrix_dense.h" #include <thrust/copy.h> #include <thrust/reduce.h> #include <thrust/device_vector.h> #include <thrust/execution_policy.h> #include <thrust/random.h> #include <iostream> #include "hip/hip_runtime.h" #include <cstdlib> #include <unistd.h> #include "h2o4gpukmeans.h" #include "kmeans_impl.h" #include "kmeans_general.h" #include "kmeans_h2o4gpu.h" #include <random> #include <algorithm> #include <vector> #include <set> #include <csignal> #include "../../common/utils.h" #include <math.h> /** * METHODS FOR DATA COPYING AND GENERATION */ template<typename T> void random_data(int verbose, thrust::device_vector<T> &array, int m, int n) { thrust::host_vector<T> host_array(m * n); for (int i = 0; i < m * n; i++) { host_array[i] = (T) rand() / (T) RAND_MAX; } array = host_array; } /** * Copies data from srcdata to array * @tparam T * @param verbose Logging level * @param ord Column on row order of data * @param array Destination array * @param srcdata Source data * @param q Shard number (from 0 to n_gpu) * @param n * @param npergpu * @param d */ template<typename T> void copy_data(int verbose, const char ord, thrust::device_vector<T> &array, const T *srcdata, int q, int n, int npergpu, int d) { if (ord == 'c') { thrust::host_vector<T> host_array(npergpu * d); log_debug(verbose, "Copy data COL ORDER -> ROW ORDER"); int indexi, indexj; for (int i = 0; i < npergpu * d; i++) { indexi = i % d; // col indexj = i / d + q * npergpu; // row (shifted by which gpu) host_array[i] = srcdata[indexi * n + indexj]; } array = host_array; } else { log_debug(verbose, "Copy data ROW ORDER not changed"); thrust::host_vector<T> host_array(srcdata + q * npergpu * d, srcdata + q * npergpu * d + npergpu * d); array = host_array; } } /** * Like copy_data but shuffles the data according to mapping from v * @tparam T * @param verbose * @param v * @param ord * @param array * @param srcdata * @param q * @param n * @param npergpu * @param d */ template<typename T> void copy_data_shuffled(int verbose, std::vector<int> v, const char ord, thrust::device_vector<T> &array, const T *srcdata, int q, int n, int npergpu, int d) { thrust::host_vector<T> host_array(npergpu * d); if (ord == 'c') { log_debug(verbose, "Copy data shuffle COL ORDER -> ROW ORDER"); for (int i = 0; i < npergpu; i++) { for (int j = 0; j < d; j++) { host_array[i * d + j] = srcdata[v[q * npergpu + i] + j * n]; // shift by which gpu } } } else { log_debug(verbose, "Copy data shuffle ROW ORDER not changed"); for (int i = 0; i < npergpu; i++) { for (int j = 0; j < d; j++) { host_array[i * d + j] = srcdata[v[q * npergpu + i] * d + j]; // shift by which gpu } } } array = host_array; } template<typename T> void copy_centroids_shuffled(int verbose, std::vector<int> v, const char ord, thrust::device_vector<T> &array, const T *srcdata, int n, int k, int d) { copy_data_shuffled(verbose, v, ord, array, srcdata, 0, n, k, d); } /** * Copies centroids from initial training set randomly. * @tparam T * @param verbose * @param seed * @param ord * @param array * @param srcdata * @param q * @param n * @param npergpu * @param d * @param k */ template<typename T> void random_centroids(int verbose, int seed, const char ord, thrust::device_vector<T> &array, const T *srcdata, int q, int n, int npergpu, int d, int k) { thrust::host_vector<T> host_array(k * d); if (seed < 0) { std::random_device rd; //Will be used to obtain a seed for the random number engine seed = rd(); } std::mt19937 gen(seed); std::uniform_int_distribution<> dis(0, n - 1); // random i in range from 0..n-1 (i.e. only 1 gpu gets centroids) if (ord == 'c') { log_debug(verbose, "Random centroids COL ORDER -> ROW ORDER"); for (int i = 0; i < k; i++) { // rows int reali = dis(gen); // + q*npergpu; // row sampled (called indexj above) for (int j = 0; j < d; j++) { // cols host_array[i * d + j] = srcdata[reali + j * n]; } } } else { log_debug(verbose, "Random centroids ROW ORDER not changed"); for (int i = 0; i < k; i++) { // rows int reali = dis(gen); // + q*npergpu ; // row sampled for (int j = 0; j < d; j++) { // cols host_array[i * d + j] = srcdata[reali * d + j]; } } } array = host_array; } /** * KMEANS METHODS FIT, PREDICT, TRANSFORM */ #define __HBAR__ \ "----------------------------------------------------------------------------\n" namespace h2o4gpukmeans { template<typename T> int kmeans_fit(int verbose, int seed, int gpu_idtry, int n_gputry, size_t rows, size_t cols, const char ord, int k, int max_iterations, int init_from_data, T threshold, const T *srcdata, void **pred_centroids, void **pred_labels); template<typename T> int pick_point_idx_weighted( int seed, std::vector<T> *data, thrust::host_vector<T> weights) { T weighted_sum = 0; for(int i = 0; i < weights.size(); i++) { if(data) { weighted_sum += (data->data()[i] * weights.data()[i]); } else { weighted_sum += weights.data()[i]; } } T best_prob = 0.0; int best_prob_idx = 0; std::mt19937 mt(seed); std::uniform_real_distribution<> dist(0.0, 1.0); int i = 0; for(i = 0; i <= weights.size(); i++) { if(weights.size() == i) { break; } T prob_threshold = (T) dist(mt); T data_val = weights.data()[i]; if (data) { data_val *= data->data()[i]; } T prob_x = (data_val / weighted_sum); if(prob_x > prob_threshold) { break; } if (prob_x >= best_prob) { best_prob = prob_x; best_prob_idx = i; } } return weights.size() == i ? best_prob_idx : i; } /** * Copies cols records, starting at position idx*cols from data to centroids. Removes them afterwards from data. * Removes record from weights at position idx. * @tparam T * @param idx * @param cols * @param data * @param weights * @param centroids */ template<typename T> void add_centroid(int idx, int cols, thrust::host_vector<T> &data, thrust::host_vector<T> &weights, thrust::host_vector<T> &centroids) { for (int i = 0; i < cols; i++) { centroids.push_back(data[idx * cols + i]); } for (int i = cols - 1; i >= 0; i--) { data.erase(data.begin() + idx * cols + i); } weights.erase(weights.begin() + idx); } /** * K-Means++ algorithm * @tparam T * @param seed * @param data * @param weights * @param k * @param cols * @param centroids */ template<typename T> void kmeans_plus_plus( int verbose, int seed, thrust::host_vector<T> data, thrust::host_vector<T> weights, int k, int cols, thrust::host_vector<T> &centroids) { int centroid_idx = pick_point_idx_weighted( seed, (std::vector<T> *) NULL, weights ); add_centroid(centroid_idx, cols, data, weights, centroids); log_verbose(verbose, "KMeans++ - Allocating memory %d | %d | %d", data.size(), cols, centroids.size()); std::vector<T> best_pairwise_distances(data.size() / cols); // one for each row in data std::vector<T> std_data(data.begin(), data.end()); std::vector<T> std_centroids(centroids.begin(), centroids.end()); compute_distances(std_data, std_centroids, best_pairwise_distances, data.size() / cols, cols, 1); for (int iter = 0; iter < k - 1; iter++) { log_verbose(verbose, "KMeans++ - Iteraton %d/%d.", iter, k-1); centroid_idx = pick_point_idx_weighted( seed, &best_pairwise_distances, weights ); add_centroid(centroid_idx, cols, data, weights, centroids); best_pairwise_distances.erase(best_pairwise_distances.begin() + centroid_idx); // TODO necessary? std_data = std::vector<T>(data.begin(), data.end()); std_centroids = std::vector<T>(centroids.begin() + cols * (iter + 1), centroids.end()); int centroids_nr = std_centroids.size() / cols; std::vector<T> curr_pairwise_distances( centroids_nr * (std_data.size() / cols)); compute_distances(std_data, std_centroids, curr_pairwise_distances, std_data.size() / cols, cols, 1); for (int i = 0; i < curr_pairwise_distances.size(); i++) { best_pairwise_distances[i] = ::min(curr_pairwise_distances[i], best_pairwise_distances[i]); } } } template<typename T> struct min_calc_functor { T* all_costs_ptr; T* min_costs_ptr; T max = std::numeric_limits<T>::max(); int potential_k_rows; int rows_per_run; min_calc_functor(T* _all_costs_ptr, T* _min_costs_ptr, int _potential_k_rows, int _rows_per_run) { all_costs_ptr = _all_costs_ptr; min_costs_ptr = _min_costs_ptr; potential_k_rows = _potential_k_rows; rows_per_run = _rows_per_run; } __host__ __device__ void operator()(int idx) const { T best = max; for (int j = 0; j < potential_k_rows; j++) { best = min(best, std::abs(all_costs_ptr[j * rows_per_run + idx])); } min_costs_ptr[idx] = min(min_costs_ptr[idx], best); } }; /** * K-Means|| initialization method implementation as described in "Scalable K-Means++". * * This is a probabilistic method, which tries to choose points as much spread out as possible as centroids. * * In case it finds more than k centroids a K-Means++ algorithm is ran on potential centroids to pick k best suited ones. * * http://theory.stanford.edu/~sergei/papers/vldb12-kmpar.pdf * * @tparam T * @param verbose * @param seed * @param ord * @param data * @param data_dots * @param centroids * @param rows * @param cols * @param k * @param num_gpu * @param threshold */ template<typename T> thrust::host_vector<T> kmeans_parallel(int verbose, int seed, const char ord, thrust::device_vector<T> **data, thrust::device_vector<T> **data_dots, size_t rows, int cols, int k, int num_gpu, T threshold) { if (seed < 0) { std::random_device rd; int seed = rd(); } size_t rows_per_gpu = rows / num_gpu; std::mt19937 gen(seed); std::uniform_int_distribution<> dis(0, rows - 1); // Find the position (GPU idx and idx on that GPU) of the initial centroid int first_center = dis(gen); int first_center_idx = first_center % rows_per_gpu; int first_center_gpu = first_center / rows_per_gpu; log_verbose(verbose, "KMeans|| - Initial centroid %d on GPU %d.", first_center_idx, first_center_gpu); // Copies the initial centroid to potential centroids vector. That vector will store all potential centroids found // in the previous iteration. thrust::host_vector<T> h_potential_centroids(cols); std::vector<thrust::host_vector<T>> h_potential_centroids_per_gpu(num_gpu); CUDACHECK(hipSetDevice(first_center_gpu)); thrust::copy( (*data[first_center_gpu]).begin() + first_center_idx * cols, (*data[first_center_gpu]).begin() + (first_center_idx + 1) * cols, h_potential_centroids.begin() ); thrust::host_vector<T> h_all_potential_centroids = h_potential_centroids; // Initial the cost-to-potential-centroids and cost-to-closest-potential-centroid matrices. Initial cost is +infinity std::vector<thrust::device_vector<T>> d_min_costs(num_gpu); for (int q = 0; q < num_gpu; q++) { CUDACHECK(hipSetDevice(q)); d_min_costs[q].resize(rows_per_gpu); thrust::fill(d_min_costs[q].begin(), d_min_costs[q].end(), std::numeric_limits<T>::max()); } double t0 = timer<double>(); // The original white paper claims 8 should be enough int max_iter = ::min(8, (int)(2 + log(k)) ); for (int counter = 0; counter < max_iter; counter++) { log_verbose(verbose, "KMeans|| - Iteration %d.", counter); T total_min_cost = 0.0; int new_potential_centroids = 0; #pragma omp parallel for for (int i = 0; i < num_gpu; i++) { CUDACHECK(hipSetDevice(i)); thrust::device_vector<T> d_potential_centroids = h_potential_centroids; int potential_k_rows = d_potential_centroids.size() / cols; // Compute all the costs to each potential centroid from previous iteration thrust::device_vector<T> centroid_dots(potential_k_rows); kmeans::detail::batch_calculate_distances(verbose, 0, rows_per_gpu, cols, potential_k_rows, *data[i], d_potential_centroids, *data_dots[i], centroid_dots, [&](int rows_per_run, size_t offset, thrust::device_vector<T> &pairwise_distances) { // Find the closest potential center cost for each row auto min_cost_counter = thrust::make_counting_iterator(0); auto all_costs_ptr = thrust::raw_pointer_cast(pairwise_distances.data()); auto min_costs_ptr = thrust::raw_pointer_cast(d_min_costs[i].data() + offset); thrust::for_each(min_cost_counter, min_cost_counter + rows_per_run, // Functor instead of a lambda b/c nvcc is complaining about // nesting a __device__ lambda inside a regular lambda min_calc_functor<T>(all_costs_ptr, min_costs_ptr, potential_k_rows, rows_per_run)); } ); } for (int i = 0; i < num_gpu; i++) { CUDACHECK(hipSetDevice(i)); total_min_cost += thrust::reduce( d_min_costs[i].begin(), d_min_costs[i].end() ); } log_verbose(verbose, "KMeans|| - Total min cost from centers %g.", total_min_cost); if(total_min_cost == (T) 0.0) { continue; } std::set<int> copy_from_gpus; #pragma omp parallel for for (int i = 0; i < num_gpu; i++) { CUDACHECK(hipSetDevice(i)); // Count how many potential centroids there are using probabilities // The further the row is from the closest cluster center the higher the probability auto pot_cent_filter_counter = thrust::make_counting_iterator(0); auto min_costs_ptr = thrust::raw_pointer_cast(d_min_costs[i].data()); int pot_cent_num = thrust::count_if( pot_cent_filter_counter, pot_cent_filter_counter + rows_per_gpu, [=]__device__(int idx){ thrust::default_random_engine rng(seed); thrust::uniform_real_distribution<> dist(0.0, 1.0); int device; hipGetDevice(&device); rng.discard(idx + device * rows_per_gpu); T prob_threshold = (T) dist(rng); T prob_x = (( 2.0 * k * min_costs_ptr[idx]) / total_min_cost); return prob_x > prob_threshold; } ); log_debug(verbose, "KMeans|| - Potential centroids on GPU %d = %d.", i, pot_cent_num); if (pot_cent_num > 0) { copy_from_gpus.insert(i); // Copy all potential cluster centers thrust::device_vector<T> d_new_potential_centroids(pot_cent_num * cols); auto range = thrust::make_counting_iterator(0); thrust::copy_if( (*data[i]).begin(), (*data[i]).end(), range, d_new_potential_centroids.begin(), [=] __device__(int idx){ int row = idx / cols; thrust::default_random_engine rng(seed); thrust::uniform_real_distribution<> dist(0.0, 1.0); int device; hipGetDevice(&device); rng.discard(row + device * rows_per_gpu); T prob_threshold = (T) dist(rng); T prob_x = (( 2.0 * k * min_costs_ptr[row]) / total_min_cost); return prob_x > prob_threshold; }); h_potential_centroids_per_gpu[i].clear(); h_potential_centroids_per_gpu[i].resize(d_new_potential_centroids.size()); new_potential_centroids += d_new_potential_centroids.size(); thrust::copy( d_new_potential_centroids.begin(), d_new_potential_centroids.end(), h_potential_centroids_per_gpu[i].begin() ); } } log_verbose(verbose, "KMeans|| - New potential centroids %d.", new_potential_centroids); // Gather potential cluster centers from all GPUs if (new_potential_centroids > 0) { h_potential_centroids.clear(); h_potential_centroids.resize(new_potential_centroids); int old_pot_centroids_size = h_all_potential_centroids.size(); h_all_potential_centroids.resize(old_pot_centroids_size + new_potential_centroids); int offset = 0; for (int i = 0; i < num_gpu; i++) { if(copy_from_gpus.find(i) != copy_from_gpus.end()) { thrust::copy( h_potential_centroids_per_gpu[i].begin(), h_potential_centroids_per_gpu[i].end(), h_potential_centroids.begin() + offset ); offset += h_potential_centroids_per_gpu[i].size(); } } thrust::copy( h_potential_centroids.begin(), h_potential_centroids.end(), h_all_potential_centroids.begin() + old_pot_centroids_size ); } } double timeloop = static_cast<double>(timer<double>() - t0); thrust::host_vector<T> final_centroids(0); int potential_centroids_num = h_all_potential_centroids.size() / cols; if (potential_centroids_num <= k) { final_centroids.resize(k * cols); thrust::copy( h_all_potential_centroids.begin(), h_all_potential_centroids.end(), final_centroids.begin() ); // TODO what if potential_centroids_num < k ?? we don't want 0s } else { // If we found more than k potential cluster centers we need to take only a subset // This is done using a weighted k-means++ method, since the set should be very small // it should converge very fast and is all done on the CPU. thrust::host_vector<T> weights(potential_centroids_num); double tc0 = timer<double>(); // Weights correspond to the number of data points assigned to each potential cluster center count_pts_per_centroid( verbose, num_gpu, rows_per_gpu, cols, data, data_dots, h_all_potential_centroids, weights ); double timecount = static_cast<double>(timer<double>() - tc0); double tkpp = timer<double>(); kmeans_plus_plus( verbose, seed, h_all_potential_centroids, weights, k, cols, final_centroids ); double timekpp = static_cast<double>(timer<double>() - tkpp); log_verbose(verbose, "KMeans|| - Time loop: %g Time count: %g Time kpp: %g.", timeloop, timecount, timekpp); } return final_centroids; } volatile std::atomic_int flaggpu(0); inline void my_function_gpu(int sig) { // can be called asynchronously fprintf(stderr, "Caught signal %d. Terminating shortly.\n", sig); flaggpu = 1; } std::vector<int> kmeans_init(int verbose, int *final_n_gpu, int n_gputry, int gpu_idtry, int rows) { if (rows > std::numeric_limits<int>::max()) { fprintf(stderr, "rows > %d not implemented\n", std::numeric_limits<int>::max()); fflush(stderr); exit(0); } std::signal(SIGINT, my_function_gpu); std::signal(SIGTERM, my_function_gpu); // no more gpus than visible gpus int n_gpuvis; hipGetDeviceCount(&n_gpuvis); int n_gpu = ::min(n_gpuvis, n_gputry); // no more than rows n_gpu = ::min(n_gpu, rows); if (verbose) { std::cout << n_gpu << " gpus." << std::endl; } int gpu_id = gpu_idtry % n_gpuvis; // setup GPU list to use std::vector<int> dList(n_gpu); for (int idx = 0; idx < n_gpu; idx++) { int device_idx = (gpu_id + idx) % n_gpuvis; dList[idx] = device_idx; } *final_n_gpu = n_gpu; return dList; } template<typename T> H2O4GPUKMeans<T>::H2O4GPUKMeans(const T *A, int k, int n, int d) { _A = A; _k = k; _n = n; _d = d; } template<typename T> int kmeans_fit(int verbose, int seed, int gpu_idtry, int n_gputry, size_t rows, size_t cols, const char ord, int k, int max_iterations, int init_from_data, T threshold, const T *srcdata, void **pred_centroids, void **pred_labels) { // init random seed if use the C function rand() if (seed >= 0) { srand(seed); } else { srand(unsigned(time(NULL))); } // no more clusters than rows if (k > rows) { k = static_cast<int>(rows); fprintf(stderr, "Number of clusters adjusted to be equal to number of rows.\n"); fflush(stderr); } int n_gpu; std::vector<int> dList = kmeans_init(verbose, &n_gpu, n_gputry, gpu_idtry, rows); double t0t = timer<double>(); thrust::device_vector<T> *data[n_gpu]; thrust::device_vector<int> *labels[n_gpu]; thrust::device_vector<T> *d_centroids[n_gpu]; thrust::device_vector<T> *data_dots[n_gpu]; for (int q = 0; q < n_gpu; q++) { CUDACHECK(hipSetDevice(dList[q])); data[q] = new thrust::device_vector<T>(rows / n_gpu * cols); d_centroids[q] = new thrust::device_vector<T>(k * cols); data_dots[q] = new thrust::device_vector<T>(rows / n_gpu); kmeans::detail::labels_init(); } log_debug(verbose, "Number of points: %d", rows); log_debug(verbose, "Number of dimensions: %d", cols); log_debug(verbose, "Number of clusters: %d", k); log_debug(verbose, "Max. number of iterations: %d", max_iterations); log_debug(verbose, "Stopping threshold: %d", threshold); std::vector<int> v(rows); std::iota(std::begin(v), std::end(v), 0); // Fill with 0, 1, ..., rows. if (seed >= 0) { std::shuffle(v.begin(), v.end(), std::default_random_engine(seed)); } else { std::random_shuffle(v.begin(), v.end()); } // Copy the data to devices for (int q = 0; q < n_gpu; q++) { CUDACHECK(hipSetDevice(dList[q])); if (verbose) { std::cout << "Copying data to device: " << dList[q] << std::endl; } copy_data(verbose, ord, *data[q], &srcdata[0], q, rows, rows / n_gpu, cols); // Pre-compute the data matrix norms kmeans::detail::make_self_dots(rows / n_gpu, cols, *data[q], *data_dots[q]); } // Get random points as centroids int bytecount = cols * k * sizeof(T); // all centroids if (0 == init_from_data) { log_debug(verbose, "KMeans - Using random initialization."); int masterq = 0; CUDACHECK(hipSetDevice(dList[masterq])); copy_centroids_shuffled(verbose, v, ord, *d_centroids[masterq], &srcdata[0], rows, k, cols); // Copy centroids to all devices std::vector < hipStream_t * > streams; streams.resize(n_gpu); for (int q = 0; q < n_gpu; q++) { if (q == masterq) continue; CUDACHECK(hipSetDevice(dList[q])); if (verbose > 0) { std::cout << "Copying centroid data to device: " << dList[q] << std::endl; } streams[q] = reinterpret_cast<hipStream_t *>(malloc(sizeof(hipStream_t))); hipStreamCreate(streams[q]); hipMemcpyPeerAsync(thrust::raw_pointer_cast(&(*d_centroids[q])[0]), dList[q], thrust::raw_pointer_cast(&(*d_centroids[masterq])[0]), dList[masterq], bytecount, *(streams[q])); } for (int q = 0; q < n_gpu; q++) { if (q == masterq) continue; hipSetDevice(dList[q]); hipStreamDestroy(*(streams[q])); #if(DEBUGKMEANS) thrust::host_vector<T> h_centroidq=*d_centroids[q]; for(int ii=0;ii<k*d;ii++){ fprintf(stderr,"q=%d initcent[%d]=%g\n",q,ii,h_centroidq[ii]); fflush(stderr); } #endif } } else if (1 == init_from_data) { // kmeans|| log_debug(verbose, "KMeans - Using K-Means|| initialization."); thrust::host_vector<T> final_centroids = kmeans_parallel(verbose, seed, ord, data, data_dots, rows, cols, k, n_gpu, threshold); for (int q = 0; q < n_gpu; q++) { CUDACHECK(hipSetDevice(dList[q])); hipMemcpy( thrust::raw_pointer_cast(&(*d_centroids[q])[0]), thrust::raw_pointer_cast(&final_centroids[0]), bytecount, hipMemcpyHostToDevice); } } for (int q = 0; q < n_gpu; q++) { CUDACHECK(hipSetDevice(dList[q])); labels[q] = new thrust::device_vector<int>(rows / n_gpu); } double timetransfer = static_cast<double>(timer<double>() - t0t); double t0 = timer<double>(); int iter = kmeans::kmeans<T>(verbose, &flaggpu, rows, cols, k, data, labels, d_centroids, data_dots, dList, n_gpu, max_iterations, threshold, true); if (iter < 0) { log_error(verbose, "KMeans algorithm failed."); return iter; } double timefit = static_cast<double>(timer<double>() - t0); double t1 = timer<double>(); // copy result of centroids (sitting entirely on each device) back to host thrust::host_vector<T> *ctr = new thrust::host_vector<T>(*d_centroids[0]); // TODO FIXME: When do delete this ctr memory? // hipMemcpy(ctr->data().get(), centroids[0]->data().get(), sizeof(T)*k*d, hipMemcpyDeviceToHost); *pred_centroids = ctr->data(); // copy assigned labels thrust::host_vector<int> *h_labels = new thrust::host_vector<int>(rows); int offset = 0; for (int q = 0; q < n_gpu; q++) { h_labels->insert(h_labels->begin() + offset, labels[q]->begin(), labels[q]->end()); offset += labels[q]->size(); } *pred_labels = h_labels->data(); // debug if (verbose >= H2O4GPU_LOG_VERBOSE) { for (unsigned int ii = 0; ii < k; ii++) { fprintf(stderr, "ii=%d of k=%d ", ii, k); for (unsigned int jj = 0; jj < cols; jj++) { fprintf(stderr, "%g ", (*ctr)[cols * ii + jj]); } fprintf(stderr, "\n"); fflush(stderr); } } for (int q = 0; q < n_gpu; q++) { CUDACHECK(hipSetDevice(dList[q])); delete (data[q]); delete (labels[q]); delete (d_centroids[q]); delete (data_dots[q]); kmeans::detail::labels_close(); } double timecleanup = static_cast<double>(timer<double>() - t1); if (verbose) { std::cout << " Time fit: " << timefit << " s" << std::endl; fprintf(stderr, "Timetransfer: %g Timefit: %g Timecleanup: %g\n", timetransfer, timefit, timecleanup); fflush(stderr); } return 0; } template<typename T> int kmeans_predict(int verbose, int gpu_idtry, int n_gputry, size_t rows, size_t cols, const char ord, int k, const T *srcdata, const T *centroids, void **pred_labels) { // Print centroids if (verbose >= H2O4GPU_LOG_VERBOSE) { std::cout << std::endl; for (int i = 0; i < cols * k; i++) { std::cout << centroids[i] << " "; if (i % cols == 1) { std::cout << std::endl; } } } int n_gpu; std::vector<int> dList = kmeans_init(verbose, &n_gpu, n_gputry, gpu_idtry, rows); thrust::device_vector<T> *d_data[n_gpu]; thrust::device_vector<T> *d_centroids[n_gpu]; thrust::device_vector<T> *data_dots[n_gpu]; thrust::device_vector<T> *centroid_dots[n_gpu]; thrust::host_vector<int> *h_labels = new thrust::host_vector<int>(0); for (int q = 0; q < n_gpu; q++) { CUDACHECK(hipSetDevice(dList[q])); kmeans::detail::labels_init(); data_dots[q] = new thrust::device_vector<T>(rows / n_gpu); centroid_dots[q] = new thrust::device_vector<T>(k); d_centroids[q] = new thrust::device_vector<T>(k * cols); d_data[q] = new thrust::device_vector<T>(rows / n_gpu * cols); copy_data(verbose, 'r', *d_centroids[q], &centroids[0], 0, k, k, cols); copy_data(verbose, ord, *d_data[q], &srcdata[0], q, rows, rows / n_gpu, cols); kmeans::detail::make_self_dots(rows / n_gpu, cols, *d_data[q], *data_dots[q]); thrust::device_vector<int> d_labels(rows / n_gpu); kmeans::detail::batch_calculate_distances(verbose, q, rows / n_gpu, cols, k, *d_data[q], *d_centroids[q], *data_dots[q], *centroid_dots[q], [&](int n, size_t offset, thrust::device_vector<T> &pairwise_distances) { kmeans::detail::relabel(n, k, pairwise_distances, d_labels, offset); } ); h_labels->insert(h_labels->end(), d_labels.begin(), d_labels.end()); } *pred_labels = h_labels->data(); for (int q = 0; q < n_gpu; q++) { safe_cuda(hipSetDevice(dList[q])); kmeans::detail::labels_close(); delete (data_dots[q]); delete (centroid_dots[q]); delete (d_centroids[q]); delete (d_data[q]); } return 0; } template<typename T> int kmeans_transform(int verbose, int gpu_idtry, int n_gputry, size_t rows, size_t cols, const char ord, int k, const T *srcdata, const T *centroids, void **preds) { // Print centroids if (verbose >= H2O4GPU_LOG_VERBOSE) { std::cout << std::endl; for (int i = 0; i < cols * k; i++) { std::cout << centroids[i] << " "; if (i % cols == 1) { std::cout << std::endl; } } } int n_gpu; std::vector<int> dList = kmeans_init(verbose, &n_gpu, n_gputry, gpu_idtry, rows); thrust::device_vector<T> *d_data[n_gpu]; thrust::device_vector<T> *d_centroids[n_gpu]; thrust::device_vector<T> *d_pairwise_distances[n_gpu]; thrust::device_vector<T> *data_dots[n_gpu]; thrust::device_vector<T> *centroid_dots[n_gpu]; for (int q = 0; q < n_gpu; q++) { CUDACHECK(hipSetDevice(dList[q])); kmeans::detail::labels_init(); data_dots[q] = new thrust::device_vector<T>(rows / n_gpu); centroid_dots[q] = new thrust::device_vector<T>(k); d_pairwise_distances[q] = new thrust::device_vector<T>(rows / n_gpu * k); d_centroids[q] = new thrust::device_vector<T>(k * cols); d_data[q] = new thrust::device_vector<T>(rows / n_gpu * cols); copy_data(verbose, 'r', *d_centroids[q], &centroids[0], 0, k, k, cols); copy_data(verbose, ord, *d_data[q], &srcdata[0], q, rows, rows / n_gpu, cols); kmeans::detail::make_self_dots(rows / n_gpu, cols, *d_data[q], *data_dots[q]); // TODO batch this kmeans::detail::calculate_distances(verbose, q, rows / n_gpu, cols, k, *d_data[q], 0, *d_centroids[q], *data_dots[q], *centroid_dots[q], *d_pairwise_distances[q]); } // Move the resulting labels into host memory from all devices thrust::host_vector<T> *h_pairwise_distances = new thrust::host_vector<T>(0); for (int q = 0; q < n_gpu; q++) { h_pairwise_distances->insert(h_pairwise_distances->end(), d_pairwise_distances[q]->begin(), d_pairwise_distances[q]->end()); } *preds = h_pairwise_distances->data(); // Print centroids if (verbose >= H2O4GPU_LOG_VERBOSE) { std::cout << std::endl; for (int i = 0; i < rows * cols; i++) { std::cout << h_pairwise_distances->data()[i] << " "; if (i % cols == 1) { std::cout << std::endl; } } } for (int q = 0; q < n_gpu; q++) { safe_cuda(hipSetDevice(dList[q])); kmeans::detail::labels_close(); delete (d_pairwise_distances[q]); delete (data_dots[q]); delete (centroid_dots[q]); delete (d_centroids[q]); delete (d_data[q]); } return 0; } template<typename T> int makePtr_dense(int dopredict, int verbose, int seed, int gpu_idtry, int n_gputry, size_t rows, size_t cols, const char ord, int k, int max_iterations, int init_from_data, T threshold, const T *srcdata, const T *centroids, void **pred_centroids, void **pred_labels) { if (dopredict == 0) { return kmeans_fit(verbose, seed, gpu_idtry, n_gputry, rows, cols, ord, k, max_iterations, init_from_data, threshold, srcdata, pred_centroids, pred_labels); } else { return kmeans_predict(verbose, gpu_idtry, n_gputry, rows, cols, ord, k, srcdata, centroids, pred_labels); } } template int makePtr_dense<float>(int dopredict, int verbose, int seed, int gpu_id, int n_gpu, size_t rows, size_t cols, const char ord, int k, int max_iterations, int init_from_data, float threshold, const float *srcdata, const float *centroids, void **pred_centroids, void **pred_labels); template int makePtr_dense<double>(int dopredict, int verbose, int seed, int gpu_id, int n_gpu, size_t rows, size_t cols, const char ord, int k, int max_iterations, int init_from_data, double threshold, const double *srcdata, const double *centroids, void **pred_centroids, void **pred_labels); template int kmeans_fit<float>(int verbose, int seed, int gpu_idtry, int n_gputry, size_t rows, size_t cols, const char ord, int k, int max_iterations, int init_from_data, float threshold, const float *srcdata, void **pred_centroids, void **pred_labels); template int kmeans_fit<double>(int verbose, int seed, int gpu_idtry, int n_gputry, size_t rows, size_t cols, const char ord, int k, int max_iterations, int init_from_data, double threshold, const double *srcdata, void **pred_centroids, void **pred_labels); template int kmeans_predict<float>(int verbose, int gpu_idtry, int n_gputry, size_t rows, size_t cols, const char ord, int k, const float *srcdata, const float *centroids, void **pred_labels); template int kmeans_predict<double>(int verbose, int gpu_idtry, int n_gputry, size_t rows, size_t cols, const char ord, int k, const double *srcdata, const double *centroids, void **pred_labels); template int kmeans_transform<float>(int verbose, int gpu_id, int n_gpu, size_t m, size_t n, const char ord, int k, const float *src_data, const float *centroids, void **preds); template int kmeans_transform<double>(int verbose, int gpu_id, int n_gpu, size_t m, size_t n, const char ord, int k, const double *src_data, const double *centroids, void **preds); // Explicit template instantiation. #if !defined(H2O4GPU_DOUBLE) || H2O4GPU_DOUBLE == 1 template class H2O4GPUKMeans<double>; #endif #if !defined(H2O4GPU_SINGLE) || H2O4GPU_SINGLE == 1 template class H2O4GPUKMeans<float>; #endif } // namespace h2o4gpukmeans #ifdef __cplusplus extern "C" { #endif /* * Interface for other languages */ // Fit and Predict int make_ptr_float_kmeans(int dopredict, int verbose, int seed, int gpu_id, int n_gpu, size_t mTrain, size_t n, const char ord, int k, int max_iterations, int init_from_data, float threshold, const float *srcdata, const float *centroids, void **pred_centroids, void **pred_labels) { return h2o4gpukmeans::makePtr_dense<float>(dopredict, verbose, seed, gpu_id, n_gpu, mTrain, n, ord, k, max_iterations, init_from_data, threshold, srcdata, centroids, pred_centroids, pred_labels); } int make_ptr_double_kmeans(int dopredict, int verbose, int seed, int gpu_id, int n_gpu, size_t mTrain, size_t n, const char ord, int k, int max_iterations, int init_from_data, double threshold, const double *srcdata, const double *centroids, void **pred_centroids, void **pred_labels) { return h2o4gpukmeans::makePtr_dense<double>(dopredict, verbose, seed, gpu_id, n_gpu, mTrain, n, ord, k, max_iterations, init_from_data, threshold, srcdata, centroids, pred_centroids, pred_labels); } // Transform int kmeans_transform_float(int verbose, int gpu_id, int n_gpu, size_t m, size_t n, const char ord, int k, const float *src_data, const float *centroids, void **preds) { return h2o4gpukmeans::kmeans_transform<float>(verbose, gpu_id, n_gpu, m, n, ord, k, src_data, centroids, preds); } int kmeans_transform_double(int verbose, int gpu_id, int n_gpu, size_t m, size_t n, const char ord, int k, const double *src_data, const double *centroids, void **preds) { return h2o4gpukmeans::kmeans_transform<double>(verbose, gpu_id, n_gpu, m, n, ord, k, src_data, centroids, preds); } #ifdef __cplusplus } #endif
b7be55041f3bc362d3c286e3ee45841e16a4947c.cu
/*! * Copyright 2017 H2O.ai, Inc. * License Apache License Version 2.0 (see LICENSE for details) */ #include "matrix/matrix.h" #include "matrix/matrix_dense.h" #include <thrust/copy.h> #include <thrust/reduce.h> #include <thrust/device_vector.h> #include <thrust/execution_policy.h> #include <thrust/random.h> #include <iostream> #include "cuda.h" #include <cstdlib> #include <unistd.h> #include "h2o4gpukmeans.h" #include "kmeans_impl.h" #include "kmeans_general.h" #include "kmeans_h2o4gpu.h" #include <random> #include <algorithm> #include <vector> #include <set> #include <csignal> #include "../../common/utils.h" #include <math.h> /** * METHODS FOR DATA COPYING AND GENERATION */ template<typename T> void random_data(int verbose, thrust::device_vector<T> &array, int m, int n) { thrust::host_vector<T> host_array(m * n); for (int i = 0; i < m * n; i++) { host_array[i] = (T) rand() / (T) RAND_MAX; } array = host_array; } /** * Copies data from srcdata to array * @tparam T * @param verbose Logging level * @param ord Column on row order of data * @param array Destination array * @param srcdata Source data * @param q Shard number (from 0 to n_gpu) * @param n * @param npergpu * @param d */ template<typename T> void copy_data(int verbose, const char ord, thrust::device_vector<T> &array, const T *srcdata, int q, int n, int npergpu, int d) { if (ord == 'c') { thrust::host_vector<T> host_array(npergpu * d); log_debug(verbose, "Copy data COL ORDER -> ROW ORDER"); int indexi, indexj; for (int i = 0; i < npergpu * d; i++) { indexi = i % d; // col indexj = i / d + q * npergpu; // row (shifted by which gpu) host_array[i] = srcdata[indexi * n + indexj]; } array = host_array; } else { log_debug(verbose, "Copy data ROW ORDER not changed"); thrust::host_vector<T> host_array(srcdata + q * npergpu * d, srcdata + q * npergpu * d + npergpu * d); array = host_array; } } /** * Like copy_data but shuffles the data according to mapping from v * @tparam T * @param verbose * @param v * @param ord * @param array * @param srcdata * @param q * @param n * @param npergpu * @param d */ template<typename T> void copy_data_shuffled(int verbose, std::vector<int> v, const char ord, thrust::device_vector<T> &array, const T *srcdata, int q, int n, int npergpu, int d) { thrust::host_vector<T> host_array(npergpu * d); if (ord == 'c') { log_debug(verbose, "Copy data shuffle COL ORDER -> ROW ORDER"); for (int i = 0; i < npergpu; i++) { for (int j = 0; j < d; j++) { host_array[i * d + j] = srcdata[v[q * npergpu + i] + j * n]; // shift by which gpu } } } else { log_debug(verbose, "Copy data shuffle ROW ORDER not changed"); for (int i = 0; i < npergpu; i++) { for (int j = 0; j < d; j++) { host_array[i * d + j] = srcdata[v[q * npergpu + i] * d + j]; // shift by which gpu } } } array = host_array; } template<typename T> void copy_centroids_shuffled(int verbose, std::vector<int> v, const char ord, thrust::device_vector<T> &array, const T *srcdata, int n, int k, int d) { copy_data_shuffled(verbose, v, ord, array, srcdata, 0, n, k, d); } /** * Copies centroids from initial training set randomly. * @tparam T * @param verbose * @param seed * @param ord * @param array * @param srcdata * @param q * @param n * @param npergpu * @param d * @param k */ template<typename T> void random_centroids(int verbose, int seed, const char ord, thrust::device_vector<T> &array, const T *srcdata, int q, int n, int npergpu, int d, int k) { thrust::host_vector<T> host_array(k * d); if (seed < 0) { std::random_device rd; //Will be used to obtain a seed for the random number engine seed = rd(); } std::mt19937 gen(seed); std::uniform_int_distribution<> dis(0, n - 1); // random i in range from 0..n-1 (i.e. only 1 gpu gets centroids) if (ord == 'c') { log_debug(verbose, "Random centroids COL ORDER -> ROW ORDER"); for (int i = 0; i < k; i++) { // rows int reali = dis(gen); // + q*npergpu; // row sampled (called indexj above) for (int j = 0; j < d; j++) { // cols host_array[i * d + j] = srcdata[reali + j * n]; } } } else { log_debug(verbose, "Random centroids ROW ORDER not changed"); for (int i = 0; i < k; i++) { // rows int reali = dis(gen); // + q*npergpu ; // row sampled for (int j = 0; j < d; j++) { // cols host_array[i * d + j] = srcdata[reali * d + j]; } } } array = host_array; } /** * KMEANS METHODS FIT, PREDICT, TRANSFORM */ #define __HBAR__ \ "----------------------------------------------------------------------------\n" namespace h2o4gpukmeans { template<typename T> int kmeans_fit(int verbose, int seed, int gpu_idtry, int n_gputry, size_t rows, size_t cols, const char ord, int k, int max_iterations, int init_from_data, T threshold, const T *srcdata, void **pred_centroids, void **pred_labels); template<typename T> int pick_point_idx_weighted( int seed, std::vector<T> *data, thrust::host_vector<T> weights) { T weighted_sum = 0; for(int i = 0; i < weights.size(); i++) { if(data) { weighted_sum += (data->data()[i] * weights.data()[i]); } else { weighted_sum += weights.data()[i]; } } T best_prob = 0.0; int best_prob_idx = 0; std::mt19937 mt(seed); std::uniform_real_distribution<> dist(0.0, 1.0); int i = 0; for(i = 0; i <= weights.size(); i++) { if(weights.size() == i) { break; } T prob_threshold = (T) dist(mt); T data_val = weights.data()[i]; if (data) { data_val *= data->data()[i]; } T prob_x = (data_val / weighted_sum); if(prob_x > prob_threshold) { break; } if (prob_x >= best_prob) { best_prob = prob_x; best_prob_idx = i; } } return weights.size() == i ? best_prob_idx : i; } /** * Copies cols records, starting at position idx*cols from data to centroids. Removes them afterwards from data. * Removes record from weights at position idx. * @tparam T * @param idx * @param cols * @param data * @param weights * @param centroids */ template<typename T> void add_centroid(int idx, int cols, thrust::host_vector<T> &data, thrust::host_vector<T> &weights, thrust::host_vector<T> &centroids) { for (int i = 0; i < cols; i++) { centroids.push_back(data[idx * cols + i]); } for (int i = cols - 1; i >= 0; i--) { data.erase(data.begin() + idx * cols + i); } weights.erase(weights.begin() + idx); } /** * K-Means++ algorithm * @tparam T * @param seed * @param data * @param weights * @param k * @param cols * @param centroids */ template<typename T> void kmeans_plus_plus( int verbose, int seed, thrust::host_vector<T> data, thrust::host_vector<T> weights, int k, int cols, thrust::host_vector<T> &centroids) { int centroid_idx = pick_point_idx_weighted( seed, (std::vector<T> *) NULL, weights ); add_centroid(centroid_idx, cols, data, weights, centroids); log_verbose(verbose, "KMeans++ - Allocating memory %d | %d | %d", data.size(), cols, centroids.size()); std::vector<T> best_pairwise_distances(data.size() / cols); // one for each row in data std::vector<T> std_data(data.begin(), data.end()); std::vector<T> std_centroids(centroids.begin(), centroids.end()); compute_distances(std_data, std_centroids, best_pairwise_distances, data.size() / cols, cols, 1); for (int iter = 0; iter < k - 1; iter++) { log_verbose(verbose, "KMeans++ - Iteraton %d/%d.", iter, k-1); centroid_idx = pick_point_idx_weighted( seed, &best_pairwise_distances, weights ); add_centroid(centroid_idx, cols, data, weights, centroids); best_pairwise_distances.erase(best_pairwise_distances.begin() + centroid_idx); // TODO necessary? std_data = std::vector<T>(data.begin(), data.end()); std_centroids = std::vector<T>(centroids.begin() + cols * (iter + 1), centroids.end()); int centroids_nr = std_centroids.size() / cols; std::vector<T> curr_pairwise_distances( centroids_nr * (std_data.size() / cols)); compute_distances(std_data, std_centroids, curr_pairwise_distances, std_data.size() / cols, cols, 1); for (int i = 0; i < curr_pairwise_distances.size(); i++) { best_pairwise_distances[i] = std::min(curr_pairwise_distances[i], best_pairwise_distances[i]); } } } template<typename T> struct min_calc_functor { T* all_costs_ptr; T* min_costs_ptr; T max = std::numeric_limits<T>::max(); int potential_k_rows; int rows_per_run; min_calc_functor(T* _all_costs_ptr, T* _min_costs_ptr, int _potential_k_rows, int _rows_per_run) { all_costs_ptr = _all_costs_ptr; min_costs_ptr = _min_costs_ptr; potential_k_rows = _potential_k_rows; rows_per_run = _rows_per_run; } __host__ __device__ void operator()(int idx) const { T best = max; for (int j = 0; j < potential_k_rows; j++) { best = min(best, std::abs(all_costs_ptr[j * rows_per_run + idx])); } min_costs_ptr[idx] = min(min_costs_ptr[idx], best); } }; /** * K-Means|| initialization method implementation as described in "Scalable K-Means++". * * This is a probabilistic method, which tries to choose points as much spread out as possible as centroids. * * In case it finds more than k centroids a K-Means++ algorithm is ran on potential centroids to pick k best suited ones. * * http://theory.stanford.edu/~sergei/papers/vldb12-kmpar.pdf * * @tparam T * @param verbose * @param seed * @param ord * @param data * @param data_dots * @param centroids * @param rows * @param cols * @param k * @param num_gpu * @param threshold */ template<typename T> thrust::host_vector<T> kmeans_parallel(int verbose, int seed, const char ord, thrust::device_vector<T> **data, thrust::device_vector<T> **data_dots, size_t rows, int cols, int k, int num_gpu, T threshold) { if (seed < 0) { std::random_device rd; int seed = rd(); } size_t rows_per_gpu = rows / num_gpu; std::mt19937 gen(seed); std::uniform_int_distribution<> dis(0, rows - 1); // Find the position (GPU idx and idx on that GPU) of the initial centroid int first_center = dis(gen); int first_center_idx = first_center % rows_per_gpu; int first_center_gpu = first_center / rows_per_gpu; log_verbose(verbose, "KMeans|| - Initial centroid %d on GPU %d.", first_center_idx, first_center_gpu); // Copies the initial centroid to potential centroids vector. That vector will store all potential centroids found // in the previous iteration. thrust::host_vector<T> h_potential_centroids(cols); std::vector<thrust::host_vector<T>> h_potential_centroids_per_gpu(num_gpu); CUDACHECK(cudaSetDevice(first_center_gpu)); thrust::copy( (*data[first_center_gpu]).begin() + first_center_idx * cols, (*data[first_center_gpu]).begin() + (first_center_idx + 1) * cols, h_potential_centroids.begin() ); thrust::host_vector<T> h_all_potential_centroids = h_potential_centroids; // Initial the cost-to-potential-centroids and cost-to-closest-potential-centroid matrices. Initial cost is +infinity std::vector<thrust::device_vector<T>> d_min_costs(num_gpu); for (int q = 0; q < num_gpu; q++) { CUDACHECK(cudaSetDevice(q)); d_min_costs[q].resize(rows_per_gpu); thrust::fill(d_min_costs[q].begin(), d_min_costs[q].end(), std::numeric_limits<T>::max()); } double t0 = timer<double>(); // The original white paper claims 8 should be enough int max_iter = std::min(8, (int)(2 + log(k)) ); for (int counter = 0; counter < max_iter; counter++) { log_verbose(verbose, "KMeans|| - Iteration %d.", counter); T total_min_cost = 0.0; int new_potential_centroids = 0; #pragma omp parallel for for (int i = 0; i < num_gpu; i++) { CUDACHECK(cudaSetDevice(i)); thrust::device_vector<T> d_potential_centroids = h_potential_centroids; int potential_k_rows = d_potential_centroids.size() / cols; // Compute all the costs to each potential centroid from previous iteration thrust::device_vector<T> centroid_dots(potential_k_rows); kmeans::detail::batch_calculate_distances(verbose, 0, rows_per_gpu, cols, potential_k_rows, *data[i], d_potential_centroids, *data_dots[i], centroid_dots, [&](int rows_per_run, size_t offset, thrust::device_vector<T> &pairwise_distances) { // Find the closest potential center cost for each row auto min_cost_counter = thrust::make_counting_iterator(0); auto all_costs_ptr = thrust::raw_pointer_cast(pairwise_distances.data()); auto min_costs_ptr = thrust::raw_pointer_cast(d_min_costs[i].data() + offset); thrust::for_each(min_cost_counter, min_cost_counter + rows_per_run, // Functor instead of a lambda b/c nvcc is complaining about // nesting a __device__ lambda inside a regular lambda min_calc_functor<T>(all_costs_ptr, min_costs_ptr, potential_k_rows, rows_per_run)); } ); } for (int i = 0; i < num_gpu; i++) { CUDACHECK(cudaSetDevice(i)); total_min_cost += thrust::reduce( d_min_costs[i].begin(), d_min_costs[i].end() ); } log_verbose(verbose, "KMeans|| - Total min cost from centers %g.", total_min_cost); if(total_min_cost == (T) 0.0) { continue; } std::set<int> copy_from_gpus; #pragma omp parallel for for (int i = 0; i < num_gpu; i++) { CUDACHECK(cudaSetDevice(i)); // Count how many potential centroids there are using probabilities // The further the row is from the closest cluster center the higher the probability auto pot_cent_filter_counter = thrust::make_counting_iterator(0); auto min_costs_ptr = thrust::raw_pointer_cast(d_min_costs[i].data()); int pot_cent_num = thrust::count_if( pot_cent_filter_counter, pot_cent_filter_counter + rows_per_gpu, [=]__device__(int idx){ thrust::default_random_engine rng(seed); thrust::uniform_real_distribution<> dist(0.0, 1.0); int device; cudaGetDevice(&device); rng.discard(idx + device * rows_per_gpu); T prob_threshold = (T) dist(rng); T prob_x = (( 2.0 * k * min_costs_ptr[idx]) / total_min_cost); return prob_x > prob_threshold; } ); log_debug(verbose, "KMeans|| - Potential centroids on GPU %d = %d.", i, pot_cent_num); if (pot_cent_num > 0) { copy_from_gpus.insert(i); // Copy all potential cluster centers thrust::device_vector<T> d_new_potential_centroids(pot_cent_num * cols); auto range = thrust::make_counting_iterator(0); thrust::copy_if( (*data[i]).begin(), (*data[i]).end(), range, d_new_potential_centroids.begin(), [=] __device__(int idx){ int row = idx / cols; thrust::default_random_engine rng(seed); thrust::uniform_real_distribution<> dist(0.0, 1.0); int device; cudaGetDevice(&device); rng.discard(row + device * rows_per_gpu); T prob_threshold = (T) dist(rng); T prob_x = (( 2.0 * k * min_costs_ptr[row]) / total_min_cost); return prob_x > prob_threshold; }); h_potential_centroids_per_gpu[i].clear(); h_potential_centroids_per_gpu[i].resize(d_new_potential_centroids.size()); new_potential_centroids += d_new_potential_centroids.size(); thrust::copy( d_new_potential_centroids.begin(), d_new_potential_centroids.end(), h_potential_centroids_per_gpu[i].begin() ); } } log_verbose(verbose, "KMeans|| - New potential centroids %d.", new_potential_centroids); // Gather potential cluster centers from all GPUs if (new_potential_centroids > 0) { h_potential_centroids.clear(); h_potential_centroids.resize(new_potential_centroids); int old_pot_centroids_size = h_all_potential_centroids.size(); h_all_potential_centroids.resize(old_pot_centroids_size + new_potential_centroids); int offset = 0; for (int i = 0; i < num_gpu; i++) { if(copy_from_gpus.find(i) != copy_from_gpus.end()) { thrust::copy( h_potential_centroids_per_gpu[i].begin(), h_potential_centroids_per_gpu[i].end(), h_potential_centroids.begin() + offset ); offset += h_potential_centroids_per_gpu[i].size(); } } thrust::copy( h_potential_centroids.begin(), h_potential_centroids.end(), h_all_potential_centroids.begin() + old_pot_centroids_size ); } } double timeloop = static_cast<double>(timer<double>() - t0); thrust::host_vector<T> final_centroids(0); int potential_centroids_num = h_all_potential_centroids.size() / cols; if (potential_centroids_num <= k) { final_centroids.resize(k * cols); thrust::copy( h_all_potential_centroids.begin(), h_all_potential_centroids.end(), final_centroids.begin() ); // TODO what if potential_centroids_num < k ?? we don't want 0s } else { // If we found more than k potential cluster centers we need to take only a subset // This is done using a weighted k-means++ method, since the set should be very small // it should converge very fast and is all done on the CPU. thrust::host_vector<T> weights(potential_centroids_num); double tc0 = timer<double>(); // Weights correspond to the number of data points assigned to each potential cluster center count_pts_per_centroid( verbose, num_gpu, rows_per_gpu, cols, data, data_dots, h_all_potential_centroids, weights ); double timecount = static_cast<double>(timer<double>() - tc0); double tkpp = timer<double>(); kmeans_plus_plus( verbose, seed, h_all_potential_centroids, weights, k, cols, final_centroids ); double timekpp = static_cast<double>(timer<double>() - tkpp); log_verbose(verbose, "KMeans|| - Time loop: %g Time count: %g Time kpp: %g.", timeloop, timecount, timekpp); } return final_centroids; } volatile std::atomic_int flaggpu(0); inline void my_function_gpu(int sig) { // can be called asynchronously fprintf(stderr, "Caught signal %d. Terminating shortly.\n", sig); flaggpu = 1; } std::vector<int> kmeans_init(int verbose, int *final_n_gpu, int n_gputry, int gpu_idtry, int rows) { if (rows > std::numeric_limits<int>::max()) { fprintf(stderr, "rows > %d not implemented\n", std::numeric_limits<int>::max()); fflush(stderr); exit(0); } std::signal(SIGINT, my_function_gpu); std::signal(SIGTERM, my_function_gpu); // no more gpus than visible gpus int n_gpuvis; cudaGetDeviceCount(&n_gpuvis); int n_gpu = std::min(n_gpuvis, n_gputry); // no more than rows n_gpu = std::min(n_gpu, rows); if (verbose) { std::cout << n_gpu << " gpus." << std::endl; } int gpu_id = gpu_idtry % n_gpuvis; // setup GPU list to use std::vector<int> dList(n_gpu); for (int idx = 0; idx < n_gpu; idx++) { int device_idx = (gpu_id + idx) % n_gpuvis; dList[idx] = device_idx; } *final_n_gpu = n_gpu; return dList; } template<typename T> H2O4GPUKMeans<T>::H2O4GPUKMeans(const T *A, int k, int n, int d) { _A = A; _k = k; _n = n; _d = d; } template<typename T> int kmeans_fit(int verbose, int seed, int gpu_idtry, int n_gputry, size_t rows, size_t cols, const char ord, int k, int max_iterations, int init_from_data, T threshold, const T *srcdata, void **pred_centroids, void **pred_labels) { // init random seed if use the C function rand() if (seed >= 0) { srand(seed); } else { srand(unsigned(time(NULL))); } // no more clusters than rows if (k > rows) { k = static_cast<int>(rows); fprintf(stderr, "Number of clusters adjusted to be equal to number of rows.\n"); fflush(stderr); } int n_gpu; std::vector<int> dList = kmeans_init(verbose, &n_gpu, n_gputry, gpu_idtry, rows); double t0t = timer<double>(); thrust::device_vector<T> *data[n_gpu]; thrust::device_vector<int> *labels[n_gpu]; thrust::device_vector<T> *d_centroids[n_gpu]; thrust::device_vector<T> *data_dots[n_gpu]; for (int q = 0; q < n_gpu; q++) { CUDACHECK(cudaSetDevice(dList[q])); data[q] = new thrust::device_vector<T>(rows / n_gpu * cols); d_centroids[q] = new thrust::device_vector<T>(k * cols); data_dots[q] = new thrust::device_vector<T>(rows / n_gpu); kmeans::detail::labels_init(); } log_debug(verbose, "Number of points: %d", rows); log_debug(verbose, "Number of dimensions: %d", cols); log_debug(verbose, "Number of clusters: %d", k); log_debug(verbose, "Max. number of iterations: %d", max_iterations); log_debug(verbose, "Stopping threshold: %d", threshold); std::vector<int> v(rows); std::iota(std::begin(v), std::end(v), 0); // Fill with 0, 1, ..., rows. if (seed >= 0) { std::shuffle(v.begin(), v.end(), std::default_random_engine(seed)); } else { std::random_shuffle(v.begin(), v.end()); } // Copy the data to devices for (int q = 0; q < n_gpu; q++) { CUDACHECK(cudaSetDevice(dList[q])); if (verbose) { std::cout << "Copying data to device: " << dList[q] << std::endl; } copy_data(verbose, ord, *data[q], &srcdata[0], q, rows, rows / n_gpu, cols); // Pre-compute the data matrix norms kmeans::detail::make_self_dots(rows / n_gpu, cols, *data[q], *data_dots[q]); } // Get random points as centroids int bytecount = cols * k * sizeof(T); // all centroids if (0 == init_from_data) { log_debug(verbose, "KMeans - Using random initialization."); int masterq = 0; CUDACHECK(cudaSetDevice(dList[masterq])); copy_centroids_shuffled(verbose, v, ord, *d_centroids[masterq], &srcdata[0], rows, k, cols); // Copy centroids to all devices std::vector < cudaStream_t * > streams; streams.resize(n_gpu); for (int q = 0; q < n_gpu; q++) { if (q == masterq) continue; CUDACHECK(cudaSetDevice(dList[q])); if (verbose > 0) { std::cout << "Copying centroid data to device: " << dList[q] << std::endl; } streams[q] = reinterpret_cast<cudaStream_t *>(malloc(sizeof(cudaStream_t))); cudaStreamCreate(streams[q]); cudaMemcpyPeerAsync(thrust::raw_pointer_cast(&(*d_centroids[q])[0]), dList[q], thrust::raw_pointer_cast(&(*d_centroids[masterq])[0]), dList[masterq], bytecount, *(streams[q])); } for (int q = 0; q < n_gpu; q++) { if (q == masterq) continue; cudaSetDevice(dList[q]); cudaStreamDestroy(*(streams[q])); #if(DEBUGKMEANS) thrust::host_vector<T> h_centroidq=*d_centroids[q]; for(int ii=0;ii<k*d;ii++){ fprintf(stderr,"q=%d initcent[%d]=%g\n",q,ii,h_centroidq[ii]); fflush(stderr); } #endif } } else if (1 == init_from_data) { // kmeans|| log_debug(verbose, "KMeans - Using K-Means|| initialization."); thrust::host_vector<T> final_centroids = kmeans_parallel(verbose, seed, ord, data, data_dots, rows, cols, k, n_gpu, threshold); for (int q = 0; q < n_gpu; q++) { CUDACHECK(cudaSetDevice(dList[q])); cudaMemcpy( thrust::raw_pointer_cast(&(*d_centroids[q])[0]), thrust::raw_pointer_cast(&final_centroids[0]), bytecount, cudaMemcpyHostToDevice); } } for (int q = 0; q < n_gpu; q++) { CUDACHECK(cudaSetDevice(dList[q])); labels[q] = new thrust::device_vector<int>(rows / n_gpu); } double timetransfer = static_cast<double>(timer<double>() - t0t); double t0 = timer<double>(); int iter = kmeans::kmeans<T>(verbose, &flaggpu, rows, cols, k, data, labels, d_centroids, data_dots, dList, n_gpu, max_iterations, threshold, true); if (iter < 0) { log_error(verbose, "KMeans algorithm failed."); return iter; } double timefit = static_cast<double>(timer<double>() - t0); double t1 = timer<double>(); // copy result of centroids (sitting entirely on each device) back to host thrust::host_vector<T> *ctr = new thrust::host_vector<T>(*d_centroids[0]); // TODO FIXME: When do delete this ctr memory? // cudaMemcpy(ctr->data().get(), centroids[0]->data().get(), sizeof(T)*k*d, cudaMemcpyDeviceToHost); *pred_centroids = ctr->data(); // copy assigned labels thrust::host_vector<int> *h_labels = new thrust::host_vector<int>(rows); int offset = 0; for (int q = 0; q < n_gpu; q++) { h_labels->insert(h_labels->begin() + offset, labels[q]->begin(), labels[q]->end()); offset += labels[q]->size(); } *pred_labels = h_labels->data(); // debug if (verbose >= H2O4GPU_LOG_VERBOSE) { for (unsigned int ii = 0; ii < k; ii++) { fprintf(stderr, "ii=%d of k=%d ", ii, k); for (unsigned int jj = 0; jj < cols; jj++) { fprintf(stderr, "%g ", (*ctr)[cols * ii + jj]); } fprintf(stderr, "\n"); fflush(stderr); } } for (int q = 0; q < n_gpu; q++) { CUDACHECK(cudaSetDevice(dList[q])); delete (data[q]); delete (labels[q]); delete (d_centroids[q]); delete (data_dots[q]); kmeans::detail::labels_close(); } double timecleanup = static_cast<double>(timer<double>() - t1); if (verbose) { std::cout << " Time fit: " << timefit << " s" << std::endl; fprintf(stderr, "Timetransfer: %g Timefit: %g Timecleanup: %g\n", timetransfer, timefit, timecleanup); fflush(stderr); } return 0; } template<typename T> int kmeans_predict(int verbose, int gpu_idtry, int n_gputry, size_t rows, size_t cols, const char ord, int k, const T *srcdata, const T *centroids, void **pred_labels) { // Print centroids if (verbose >= H2O4GPU_LOG_VERBOSE) { std::cout << std::endl; for (int i = 0; i < cols * k; i++) { std::cout << centroids[i] << " "; if (i % cols == 1) { std::cout << std::endl; } } } int n_gpu; std::vector<int> dList = kmeans_init(verbose, &n_gpu, n_gputry, gpu_idtry, rows); thrust::device_vector<T> *d_data[n_gpu]; thrust::device_vector<T> *d_centroids[n_gpu]; thrust::device_vector<T> *data_dots[n_gpu]; thrust::device_vector<T> *centroid_dots[n_gpu]; thrust::host_vector<int> *h_labels = new thrust::host_vector<int>(0); for (int q = 0; q < n_gpu; q++) { CUDACHECK(cudaSetDevice(dList[q])); kmeans::detail::labels_init(); data_dots[q] = new thrust::device_vector<T>(rows / n_gpu); centroid_dots[q] = new thrust::device_vector<T>(k); d_centroids[q] = new thrust::device_vector<T>(k * cols); d_data[q] = new thrust::device_vector<T>(rows / n_gpu * cols); copy_data(verbose, 'r', *d_centroids[q], &centroids[0], 0, k, k, cols); copy_data(verbose, ord, *d_data[q], &srcdata[0], q, rows, rows / n_gpu, cols); kmeans::detail::make_self_dots(rows / n_gpu, cols, *d_data[q], *data_dots[q]); thrust::device_vector<int> d_labels(rows / n_gpu); kmeans::detail::batch_calculate_distances(verbose, q, rows / n_gpu, cols, k, *d_data[q], *d_centroids[q], *data_dots[q], *centroid_dots[q], [&](int n, size_t offset, thrust::device_vector<T> &pairwise_distances) { kmeans::detail::relabel(n, k, pairwise_distances, d_labels, offset); } ); h_labels->insert(h_labels->end(), d_labels.begin(), d_labels.end()); } *pred_labels = h_labels->data(); for (int q = 0; q < n_gpu; q++) { safe_cuda(cudaSetDevice(dList[q])); kmeans::detail::labels_close(); delete (data_dots[q]); delete (centroid_dots[q]); delete (d_centroids[q]); delete (d_data[q]); } return 0; } template<typename T> int kmeans_transform(int verbose, int gpu_idtry, int n_gputry, size_t rows, size_t cols, const char ord, int k, const T *srcdata, const T *centroids, void **preds) { // Print centroids if (verbose >= H2O4GPU_LOG_VERBOSE) { std::cout << std::endl; for (int i = 0; i < cols * k; i++) { std::cout << centroids[i] << " "; if (i % cols == 1) { std::cout << std::endl; } } } int n_gpu; std::vector<int> dList = kmeans_init(verbose, &n_gpu, n_gputry, gpu_idtry, rows); thrust::device_vector<T> *d_data[n_gpu]; thrust::device_vector<T> *d_centroids[n_gpu]; thrust::device_vector<T> *d_pairwise_distances[n_gpu]; thrust::device_vector<T> *data_dots[n_gpu]; thrust::device_vector<T> *centroid_dots[n_gpu]; for (int q = 0; q < n_gpu; q++) { CUDACHECK(cudaSetDevice(dList[q])); kmeans::detail::labels_init(); data_dots[q] = new thrust::device_vector<T>(rows / n_gpu); centroid_dots[q] = new thrust::device_vector<T>(k); d_pairwise_distances[q] = new thrust::device_vector<T>(rows / n_gpu * k); d_centroids[q] = new thrust::device_vector<T>(k * cols); d_data[q] = new thrust::device_vector<T>(rows / n_gpu * cols); copy_data(verbose, 'r', *d_centroids[q], &centroids[0], 0, k, k, cols); copy_data(verbose, ord, *d_data[q], &srcdata[0], q, rows, rows / n_gpu, cols); kmeans::detail::make_self_dots(rows / n_gpu, cols, *d_data[q], *data_dots[q]); // TODO batch this kmeans::detail::calculate_distances(verbose, q, rows / n_gpu, cols, k, *d_data[q], 0, *d_centroids[q], *data_dots[q], *centroid_dots[q], *d_pairwise_distances[q]); } // Move the resulting labels into host memory from all devices thrust::host_vector<T> *h_pairwise_distances = new thrust::host_vector<T>(0); for (int q = 0; q < n_gpu; q++) { h_pairwise_distances->insert(h_pairwise_distances->end(), d_pairwise_distances[q]->begin(), d_pairwise_distances[q]->end()); } *preds = h_pairwise_distances->data(); // Print centroids if (verbose >= H2O4GPU_LOG_VERBOSE) { std::cout << std::endl; for (int i = 0; i < rows * cols; i++) { std::cout << h_pairwise_distances->data()[i] << " "; if (i % cols == 1) { std::cout << std::endl; } } } for (int q = 0; q < n_gpu; q++) { safe_cuda(cudaSetDevice(dList[q])); kmeans::detail::labels_close(); delete (d_pairwise_distances[q]); delete (data_dots[q]); delete (centroid_dots[q]); delete (d_centroids[q]); delete (d_data[q]); } return 0; } template<typename T> int makePtr_dense(int dopredict, int verbose, int seed, int gpu_idtry, int n_gputry, size_t rows, size_t cols, const char ord, int k, int max_iterations, int init_from_data, T threshold, const T *srcdata, const T *centroids, void **pred_centroids, void **pred_labels) { if (dopredict == 0) { return kmeans_fit(verbose, seed, gpu_idtry, n_gputry, rows, cols, ord, k, max_iterations, init_from_data, threshold, srcdata, pred_centroids, pred_labels); } else { return kmeans_predict(verbose, gpu_idtry, n_gputry, rows, cols, ord, k, srcdata, centroids, pred_labels); } } template int makePtr_dense<float>(int dopredict, int verbose, int seed, int gpu_id, int n_gpu, size_t rows, size_t cols, const char ord, int k, int max_iterations, int init_from_data, float threshold, const float *srcdata, const float *centroids, void **pred_centroids, void **pred_labels); template int makePtr_dense<double>(int dopredict, int verbose, int seed, int gpu_id, int n_gpu, size_t rows, size_t cols, const char ord, int k, int max_iterations, int init_from_data, double threshold, const double *srcdata, const double *centroids, void **pred_centroids, void **pred_labels); template int kmeans_fit<float>(int verbose, int seed, int gpu_idtry, int n_gputry, size_t rows, size_t cols, const char ord, int k, int max_iterations, int init_from_data, float threshold, const float *srcdata, void **pred_centroids, void **pred_labels); template int kmeans_fit<double>(int verbose, int seed, int gpu_idtry, int n_gputry, size_t rows, size_t cols, const char ord, int k, int max_iterations, int init_from_data, double threshold, const double *srcdata, void **pred_centroids, void **pred_labels); template int kmeans_predict<float>(int verbose, int gpu_idtry, int n_gputry, size_t rows, size_t cols, const char ord, int k, const float *srcdata, const float *centroids, void **pred_labels); template int kmeans_predict<double>(int verbose, int gpu_idtry, int n_gputry, size_t rows, size_t cols, const char ord, int k, const double *srcdata, const double *centroids, void **pred_labels); template int kmeans_transform<float>(int verbose, int gpu_id, int n_gpu, size_t m, size_t n, const char ord, int k, const float *src_data, const float *centroids, void **preds); template int kmeans_transform<double>(int verbose, int gpu_id, int n_gpu, size_t m, size_t n, const char ord, int k, const double *src_data, const double *centroids, void **preds); // Explicit template instantiation. #if !defined(H2O4GPU_DOUBLE) || H2O4GPU_DOUBLE == 1 template class H2O4GPUKMeans<double>; #endif #if !defined(H2O4GPU_SINGLE) || H2O4GPU_SINGLE == 1 template class H2O4GPUKMeans<float>; #endif } // namespace h2o4gpukmeans #ifdef __cplusplus extern "C" { #endif /* * Interface for other languages */ // Fit and Predict int make_ptr_float_kmeans(int dopredict, int verbose, int seed, int gpu_id, int n_gpu, size_t mTrain, size_t n, const char ord, int k, int max_iterations, int init_from_data, float threshold, const float *srcdata, const float *centroids, void **pred_centroids, void **pred_labels) { return h2o4gpukmeans::makePtr_dense<float>(dopredict, verbose, seed, gpu_id, n_gpu, mTrain, n, ord, k, max_iterations, init_from_data, threshold, srcdata, centroids, pred_centroids, pred_labels); } int make_ptr_double_kmeans(int dopredict, int verbose, int seed, int gpu_id, int n_gpu, size_t mTrain, size_t n, const char ord, int k, int max_iterations, int init_from_data, double threshold, const double *srcdata, const double *centroids, void **pred_centroids, void **pred_labels) { return h2o4gpukmeans::makePtr_dense<double>(dopredict, verbose, seed, gpu_id, n_gpu, mTrain, n, ord, k, max_iterations, init_from_data, threshold, srcdata, centroids, pred_centroids, pred_labels); } // Transform int kmeans_transform_float(int verbose, int gpu_id, int n_gpu, size_t m, size_t n, const char ord, int k, const float *src_data, const float *centroids, void **preds) { return h2o4gpukmeans::kmeans_transform<float>(verbose, gpu_id, n_gpu, m, n, ord, k, src_data, centroids, preds); } int kmeans_transform_double(int verbose, int gpu_id, int n_gpu, size_t m, size_t n, const char ord, int k, const double *src_data, const double *centroids, void **preds) { return h2o4gpukmeans::kmeans_transform<double>(verbose, gpu_id, n_gpu, m, n, ord, k, src_data, centroids, preds); } #ifdef __cplusplus } #endif
e804cb6fcd1dd21e3eb68acb26a544cbc5298c59.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * Copyright (c) 2016-present, Facebook, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // Based on https://github.com/daijifeng001/caffe-rfcn/blob/r-fcn/src/caffe/layers/psroi_pooling_layer.cu // // ------------------------------------------------------------------ // R-FCN // Copyright (c) 2016 Microsoft // Licensed under The MIT License [see r-fcn/LICENSE for details] // Written by Yi Li // ------------------------------------------------------------------ // // COPYRIGHT // // All contributions by the University of California: // Copyright (c) 2014, 2015, The Regents of the University of California // (Regents) // All rights reserved. // // All other contributions: // Copyright (c) 2014, 2015, the respective contributors // All rights reserved. // // Caffe uses a shared copyright model: each contributor holds copyright over // their contributions to Caffe. The project versioning records all such // contribution and copyright details. If a contributor wants to further mark // their specific copyright on a particular contribution, they should indicate // their copyright solely in the commit message of the change when it is // committed. // // LICENSE // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE // POSSIBILITY OF SUCH DAMAGE. // // CONTRIBUTION AGREEMENT // // By contributing to the BVLC/caffe repository through pull-request, comment, // or otherwise, the contributor releases their content to the // license and copyright terms herein. #include <cfloat> #include "caffe2/core/context_gpu.h" #include "ps_roi_pool_op.h" namespace caffe2 { namespace { template <typename T> inline __device__ T gpu_atomic_add(const T val, T* address); template <> inline __device__ float gpu_atomic_add(const float val, float* address) { return atomicAdd(address, val); } template <typename T> __global__ void PSRoIPoolForward( const int nthreads, const T* bottom_data, const T spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const T* bottom_rois, const int output_dim, const int group_size, T* top_data, int* mapping_channel) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // The output is in order (n, ctop, ph, pw) int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int ctop = (index / pooled_width / pooled_height) % output_dim; int n = index / pooled_width / pooled_height / output_dim; // [start, end) interval for spatial sampling const T* offset_bottom_rois = bottom_rois + n * 5; int roi_batch_ind = offset_bottom_rois[0]; T roi_start_w = static_cast<T>( roundf(offset_bottom_rois[1])) * spatial_scale; T roi_start_h = static_cast<T>( roundf(offset_bottom_rois[2])) * spatial_scale; T roi_end_w = static_cast<T>( roundf(offset_bottom_rois[3]) + 1.) * spatial_scale; T roi_end_h = static_cast<T>( roundf(offset_bottom_rois[4]) + 1.) * spatial_scale; // Force too small ROIs to be 1x1 T roi_width = max(roi_end_w - roi_start_w, 0.1); // avoid 0 T roi_height = max(roi_end_h - roi_start_h, 0.1); // Compute w and h at bottom T bin_size_h = roi_height / static_cast<T>(pooled_height); T bin_size_w = roi_width / static_cast<T>(pooled_width); // Add roi offsets and clip to input boundaries int hstart = floor( static_cast<T>(ph) * bin_size_h + roi_start_h); int wstart = floor( static_cast<T>(pw)* bin_size_w + roi_start_w); int hend = ceil( static_cast<T>(ph + 1) * bin_size_h + roi_start_h); int wend = ceil( static_cast<T>(pw + 1) * bin_size_w + roi_start_w); hstart = min(max(hstart, 0), height); hend = min(max(hend, 0), height); wstart = min(max(wstart, 0),width); wend = min(max(wend, 0), width); bool is_empty = (hend <= hstart) || (wend <= wstart); int gw = pw; int gh = ph; int c = (ctop * group_size + gh) * group_size + gw; const T* offset_bottom_data = bottom_data + (roi_batch_ind * channels + c) * height * width; T out_sum = 0; for (int h = hstart; h < hend; ++h){ for (int w = wstart; w < wend; ++w){ int bottom_index = h*width + w; out_sum += offset_bottom_data[bottom_index]; } } T bin_area = (hend - hstart) * (wend - wstart); top_data[index] = is_empty ? 0. : out_sum / bin_area; mapping_channel[index] = c; } } template <typename T> __global__ void PSRoIPoolBackward( const int nthreads, const T* top_diff, const int* mapping_channel, const int num_rois, const T spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int output_dim, T* bottom_diff, const T* bottom_rois) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // The output is in order (n, ctop, ph, pw) int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int n = index / pooled_width / pooled_height / output_dim; // [start, end) interval for spatial sampling const T* offset_bottom_rois = bottom_rois + n * 5; int roi_batch_ind = offset_bottom_rois[0]; T roi_start_w = static_cast<T>( roundf(offset_bottom_rois[1])) * spatial_scale; T roi_start_h = static_cast<T>( roundf(offset_bottom_rois[2])) * spatial_scale; T roi_end_w = static_cast<T>( roundf(offset_bottom_rois[3]) + 1.) * spatial_scale; T roi_end_h = static_cast<T>( roundf(offset_bottom_rois[4]) + 1.) * spatial_scale; // Force too small ROIs to be 1x1 T roi_width = max(roi_end_w - roi_start_w, 0.1); //avoid 0 T roi_height = max(roi_end_h - roi_start_h, 0.1); // Compute w and h at bottom T bin_size_h = roi_height / static_cast<T>(pooled_height); T bin_size_w = roi_width / static_cast<T>(pooled_width); int hstart = floor( static_cast<T>(ph)* bin_size_h + roi_start_h); int wstart = floor( static_cast<T>(pw)* bin_size_w + roi_start_w); int hend = ceil( static_cast<T>(ph + 1) * bin_size_h + roi_start_h); int wend = ceil( static_cast<T>(pw + 1) * bin_size_w + roi_start_w); // Add roi offsets and clip to input boundaries hstart = min(max(hstart, 0), height); hend = min(max(hend, 0), height); wstart = min(max(wstart, 0), width); wend = min(max(wend, 0), width); bool is_empty = (hend <= hstart) || (wend <= wstart); // Compute c at bottom int c = mapping_channel[index]; T* offset_bottom_diff = bottom_diff + (roi_batch_ind * channels + c) * height * width; T bin_area = (hend - hstart) * (wend - wstart); T diff_val = is_empty ? 0. : top_diff[index] / bin_area; for (int h = hstart; h < hend; ++h){ for (int w = wstart; w < wend; ++w){ int bottom_index = h * width + w; gpu_atomic_add(diff_val, offset_bottom_diff + bottom_index); } } } } } // namespace template<> bool PSRoIPoolOp<float, CUDAContext>::RunOnDevice() { auto& X = Input(0); // Input data to pool auto& R = Input(1); // RoIs auto* Y = Output(0); // PSRoI pooled data auto* A = Output(1); // mapping_channel Y->Resize(R.dim32(0), output_dim_, pooled_height_, pooled_width_); A->Resize(Y->dims()); int output_size = Y->size(); hipLaunchKernelGGL(( PSRoIPoolForward<float>), dim3(CAFFE_GET_BLOCKS(output_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), output_size, X.data<float>(), spatial_scale_, X.dim32(1), X.dim32(2), X.dim32(3), pooled_height_, pooled_width_, R.data<float>(), output_dim_, group_size_, Y->mutable_data<float>(), A->mutable_data<int>()); return true; } template<> bool PSRoIPoolGradientOp<float, CUDAContext>::RunOnDevice() { auto& X = Input(0); // Input data to pool auto& R = Input(1); // RoIs auto& A = Input(2); // mapping channels auto& dY = Input(3); // Gradient of net w.r.t. output of "forward" op // (aka "gradOutput") auto* dX = Output(0); // Gradient of net w.r.t. input to "forward" op // (aka "gradInput") dX->ResizeLike(X); // Must zero-out dX before accumulating gradients math::Set<float, CUDAContext>( dX->size(), 0.f, dX->mutable_data<float>(), &context_); hipLaunchKernelGGL(( PSRoIPoolBackward<float>), dim3(CAFFE_GET_BLOCKS(dY.size())), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), dY.size(), dY.data<float>(), A.data<int>(), R.dim32(0), spatial_scale_, X.dim32(1), X.dim32(2), X.dim32(3), pooled_height_, pooled_width_, output_dim_, dX->mutable_data<float>(), R.data<float>()); return true; } REGISTER_CUDA_OPERATOR(PSRoIPool, PSRoIPoolOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR(PSRoIPoolGradient, PSRoIPoolGradientOp<float, CUDAContext>); } // namespace caffe2
e804cb6fcd1dd21e3eb68acb26a544cbc5298c59.cu
/** * Copyright (c) 2016-present, Facebook, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // Based on https://github.com/daijifeng001/caffe-rfcn/blob/r-fcn/src/caffe/layers/psroi_pooling_layer.cu // // ------------------------------------------------------------------ // R-FCN // Copyright (c) 2016 Microsoft // Licensed under The MIT License [see r-fcn/LICENSE for details] // Written by Yi Li // ------------------------------------------------------------------ // // COPYRIGHT // // All contributions by the University of California: // Copyright (c) 2014, 2015, The Regents of the University of California // (Regents) // All rights reserved. // // All other contributions: // Copyright (c) 2014, 2015, the respective contributors // All rights reserved. // // Caffe uses a shared copyright model: each contributor holds copyright over // their contributions to Caffe. The project versioning records all such // contribution and copyright details. If a contributor wants to further mark // their specific copyright on a particular contribution, they should indicate // their copyright solely in the commit message of the change when it is // committed. // // LICENSE // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE // POSSIBILITY OF SUCH DAMAGE. // // CONTRIBUTION AGREEMENT // // By contributing to the BVLC/caffe repository through pull-request, comment, // or otherwise, the contributor releases their content to the // license and copyright terms herein. #include <cfloat> #include "caffe2/core/context_gpu.h" #include "ps_roi_pool_op.h" namespace caffe2 { namespace { template <typename T> inline __device__ T gpu_atomic_add(const T val, T* address); template <> inline __device__ float gpu_atomic_add(const float val, float* address) { return atomicAdd(address, val); } template <typename T> __global__ void PSRoIPoolForward( const int nthreads, const T* bottom_data, const T spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const T* bottom_rois, const int output_dim, const int group_size, T* top_data, int* mapping_channel) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // The output is in order (n, ctop, ph, pw) int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int ctop = (index / pooled_width / pooled_height) % output_dim; int n = index / pooled_width / pooled_height / output_dim; // [start, end) interval for spatial sampling const T* offset_bottom_rois = bottom_rois + n * 5; int roi_batch_ind = offset_bottom_rois[0]; T roi_start_w = static_cast<T>( roundf(offset_bottom_rois[1])) * spatial_scale; T roi_start_h = static_cast<T>( roundf(offset_bottom_rois[2])) * spatial_scale; T roi_end_w = static_cast<T>( roundf(offset_bottom_rois[3]) + 1.) * spatial_scale; T roi_end_h = static_cast<T>( roundf(offset_bottom_rois[4]) + 1.) * spatial_scale; // Force too small ROIs to be 1x1 T roi_width = max(roi_end_w - roi_start_w, 0.1); // avoid 0 T roi_height = max(roi_end_h - roi_start_h, 0.1); // Compute w and h at bottom T bin_size_h = roi_height / static_cast<T>(pooled_height); T bin_size_w = roi_width / static_cast<T>(pooled_width); // Add roi offsets and clip to input boundaries int hstart = floor( static_cast<T>(ph) * bin_size_h + roi_start_h); int wstart = floor( static_cast<T>(pw)* bin_size_w + roi_start_w); int hend = ceil( static_cast<T>(ph + 1) * bin_size_h + roi_start_h); int wend = ceil( static_cast<T>(pw + 1) * bin_size_w + roi_start_w); hstart = min(max(hstart, 0), height); hend = min(max(hend, 0), height); wstart = min(max(wstart, 0),width); wend = min(max(wend, 0), width); bool is_empty = (hend <= hstart) || (wend <= wstart); int gw = pw; int gh = ph; int c = (ctop * group_size + gh) * group_size + gw; const T* offset_bottom_data = bottom_data + (roi_batch_ind * channels + c) * height * width; T out_sum = 0; for (int h = hstart; h < hend; ++h){ for (int w = wstart; w < wend; ++w){ int bottom_index = h*width + w; out_sum += offset_bottom_data[bottom_index]; } } T bin_area = (hend - hstart) * (wend - wstart); top_data[index] = is_empty ? 0. : out_sum / bin_area; mapping_channel[index] = c; } } template <typename T> __global__ void PSRoIPoolBackward( const int nthreads, const T* top_diff, const int* mapping_channel, const int num_rois, const T spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int output_dim, T* bottom_diff, const T* bottom_rois) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // The output is in order (n, ctop, ph, pw) int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int n = index / pooled_width / pooled_height / output_dim; // [start, end) interval for spatial sampling const T* offset_bottom_rois = bottom_rois + n * 5; int roi_batch_ind = offset_bottom_rois[0]; T roi_start_w = static_cast<T>( roundf(offset_bottom_rois[1])) * spatial_scale; T roi_start_h = static_cast<T>( roundf(offset_bottom_rois[2])) * spatial_scale; T roi_end_w = static_cast<T>( roundf(offset_bottom_rois[3]) + 1.) * spatial_scale; T roi_end_h = static_cast<T>( roundf(offset_bottom_rois[4]) + 1.) * spatial_scale; // Force too small ROIs to be 1x1 T roi_width = max(roi_end_w - roi_start_w, 0.1); //avoid 0 T roi_height = max(roi_end_h - roi_start_h, 0.1); // Compute w and h at bottom T bin_size_h = roi_height / static_cast<T>(pooled_height); T bin_size_w = roi_width / static_cast<T>(pooled_width); int hstart = floor( static_cast<T>(ph)* bin_size_h + roi_start_h); int wstart = floor( static_cast<T>(pw)* bin_size_w + roi_start_w); int hend = ceil( static_cast<T>(ph + 1) * bin_size_h + roi_start_h); int wend = ceil( static_cast<T>(pw + 1) * bin_size_w + roi_start_w); // Add roi offsets and clip to input boundaries hstart = min(max(hstart, 0), height); hend = min(max(hend, 0), height); wstart = min(max(wstart, 0), width); wend = min(max(wend, 0), width); bool is_empty = (hend <= hstart) || (wend <= wstart); // Compute c at bottom int c = mapping_channel[index]; T* offset_bottom_diff = bottom_diff + (roi_batch_ind * channels + c) * height * width; T bin_area = (hend - hstart) * (wend - wstart); T diff_val = is_empty ? 0. : top_diff[index] / bin_area; for (int h = hstart; h < hend; ++h){ for (int w = wstart; w < wend; ++w){ int bottom_index = h * width + w; gpu_atomic_add(diff_val, offset_bottom_diff + bottom_index); } } } } } // namespace template<> bool PSRoIPoolOp<float, CUDAContext>::RunOnDevice() { auto& X = Input(0); // Input data to pool auto& R = Input(1); // RoIs auto* Y = Output(0); // PSRoI pooled data auto* A = Output(1); // mapping_channel Y->Resize(R.dim32(0), output_dim_, pooled_height_, pooled_width_); A->Resize(Y->dims()); int output_size = Y->size(); PSRoIPoolForward<float><<<CAFFE_GET_BLOCKS(output_size), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( output_size, X.data<float>(), spatial_scale_, X.dim32(1), X.dim32(2), X.dim32(3), pooled_height_, pooled_width_, R.data<float>(), output_dim_, group_size_, Y->mutable_data<float>(), A->mutable_data<int>()); return true; } template<> bool PSRoIPoolGradientOp<float, CUDAContext>::RunOnDevice() { auto& X = Input(0); // Input data to pool auto& R = Input(1); // RoIs auto& A = Input(2); // mapping channels auto& dY = Input(3); // Gradient of net w.r.t. output of "forward" op // (aka "gradOutput") auto* dX = Output(0); // Gradient of net w.r.t. input to "forward" op // (aka "gradInput") dX->ResizeLike(X); // Must zero-out dX before accumulating gradients math::Set<float, CUDAContext>( dX->size(), 0.f, dX->mutable_data<float>(), &context_); PSRoIPoolBackward<float><<<CAFFE_GET_BLOCKS(dY.size()), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( dY.size(), dY.data<float>(), A.data<int>(), R.dim32(0), spatial_scale_, X.dim32(1), X.dim32(2), X.dim32(3), pooled_height_, pooled_width_, output_dim_, dX->mutable_data<float>(), R.data<float>()); return true; } REGISTER_CUDA_OPERATOR(PSRoIPool, PSRoIPoolOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR(PSRoIPoolGradient, PSRoIPoolGradientOp<float, CUDAContext>); } // namespace caffe2
b61b9efb6d7dc4a20c77ff50d900f1dfe02d767a.hip
// !!! This is a file automatically generated by hipify!!! // nvcc 001 isamax .c -lcublas #include <iostream> #include </usr/include/stdio.h> #include </usr/include/stdlib.h> #include <hip/hip_runtime.h> #include <cuda_device_runtime_api.h> #include "rocblas.h" #include "hiprand/hiprand.h" #include "hip/hip_fp16.h" #include <math.h> #include <time.h> #include <hip/library_types.h> #include <hip/hip_runtime.h> #include "device_launch_parameters.h" #include <hip/hip_runtime_api.h> #include <ctime> #include <unistd.h> #include <sys/time.h> #include "common.h" using namespace std; __global__ void convertFp32ToFp16 (half *out, float *in, int n) { int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx < n) { out[idx] = (in[idx]); } } __global__ void convertFp16ToFp32 (float *out, half *in, int n) { int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx < n) { out[idx] = (in[idx]); } } /* __global__ void convertFp32ToFp16 (__half *out, float *in, int rows, int cols) { for(int i = 0; i < rows; i++){ for(int j = 0; j < cols; j++){ out[i * cols + j] = __float2half(in[i * cols + j]); } } } */ void print_matrix(float *A, int nr_rows_A, int nr_cols_A) { for(int i = 0; i < nr_rows_A; i++){ for(int j = 0; j < nr_cols_A; j++){ std::cout << A[i * nr_cols_A + j] << " "; } std::cout << std::endl; } std::cout << std::endl; } // Fill the array with random numbers on GPU void GPU_fill_rand(float *A, int nr_rows_A, int nr_cols_A) { // Create a pseudo-random number generator hiprandGenerator_t prng; hiprandCreateGenerator(&prng, HIPRAND_RNG_PSEUDO_DEFAULT); // Set the seed for the random number generator using the system clock hiprandSetPseudoRandomGeneratorSeed(prng, (unsigned long long) clock()); // Fill the array with random numbers on the device hiprandGenerateUniform(prng, A, nr_rows_A * nr_cols_A); } void gpu_blas_mmul(__half *A, __half *B, __half *C, int m, int k, int n) { __half alphah = __half(1.0f); __half betah = __half(0.0f); // Create a handle for CUBLAS hipblasHandle_t handle; hipblasStatus_t cublasStat = hipblasCreate(&handle); // Set the math mode to allow cuBLAS to use Tensor Cores: cublasStat = cublasSetMathMode(handle, CUBLAS_TENSOR_OP_MATH); //cublasStat = cublasSetMathMode(handle, CUBLAS_DEFAULT_MATH); //n maps to the output dimension. //m is the batch size * seq length. //k maps to the input dimension. //leading dimension of B will be cols in B(host) and it will be accessed as T. //leading dimension of A will be cols in A(host) and it will be accessed as N. //Leading dimension of C will be cols in C(host) and it will be accesses as N. //A is m * k in host k * m in device. //B is n * K in host k * n in device. //C is m * n in host n * m in device. //m will be rows A, C. //k will be cols A, B. //n will be rows B, cols in C. int lda = k, ldb = k, ldc = n; //--------------------------------------------------performing warmup runs------------------------------// for(int i = 0; i < 500; i++){ // Do the actual multiplication check_cuda_error(hipblasGemmEx(handle, HIPBLAS_OP_T, HIPBLAS_OP_N, n, m, k, &alphah, B, HIP_R_16F, ldb, A, HIP_R_16F, lda, &betah, C, HIP_R_16F, ldc, HIP_R_16F, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); } //-------------------------------------------------running actual runs----------------------------------// hipDeviceSynchronize(); hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, NULL); int niter = 10000; for(int i = 0; i < niter; i++){ // Do the actual multiplication // cublasStat = hipblasGemmEx(handle, HIPBLAS_OP_T, HIPBLAS_OP_N, n, m, k, alpha, B, HIP_R_16F, ldb, A, HIP_R_16F, lda, beta, C, HIP_R_16F, ldc, HIP_R_32F, CUBLAS_GEMM_DEFAULT_TENSOR_OP); check_cuda_error(hipblasGemmEx(handle, HIPBLAS_OP_T, HIPBLAS_OP_N, n, m, k, &alphah, B, HIP_R_16F, ldb, A, HIP_R_16F, lda, &betah, C, HIP_R_16F, ldc, HIP_R_16F, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); //cout<<cublasStat<<endl; //cublasStat = hipblasGemmEx(handle, HIPBLAS_OP_T, HIPBLAS_OP_N, 1024, 1536, 4096, alpha, B, HIP_R_16F, 1024, A, HIP_R_16F, 4096, beta, C,HIP_R_16F,1024 ,HIP_R_32F, CUBLAS_GEMM_DEFAULT_TENSOR_OP); } hipEventRecord(stop, NULL); //stop event to complete hipEventSynchronize(stop); float msecTotal = 0.0f; hipEventElapsedTime(&msecTotal, start, stop); cout<<"Total time Taken: "<<msecTotal<<" msec"<<endl; // Compute and print the performance float msecPerMatrixMul = msecTotal/niter; cout<<"Average time taken per matmul: "<<msecPerMatrixMul<<" msec"<<endl; double flopsPerMatrixMul = 2.0 * (double) m * (double) n * (double) k; double teraFlops = (flopsPerMatrixMul * 1.0e-12f) / (msecPerMatrixMul / 1000.0f); printf( "Performance= %.2f TFlop/s, Time= %.3f msec, Size= %.0f Ops\n", teraFlops, msecPerMatrixMul, flopsPerMatrixMul); /* for(int i = 0; i < 20; i++){ hipblasGemmStridedBatchedEx(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, m, n, k, alpha, A, HIP_R_16F, lda, 384 * 384, B, HIP_R_16F, ldb, 384 * 64, beta, C, HIP_R_16F, ldc, 384 * 64, 4, HIP_R_32F, CUBLAS_GEMM_DEFAULT_TENSOR_OP ); } */ // Destroy the handle hipblasDestroy(handle); } int main() { int nr_rows_A, nr_cols_A, nr_rows_B, nr_cols_B, nr_rows_C, nr_cols_C; //A is for the Activations. has dimensions m * k, where m is (seq length * batchsize), k is no of inputs to the layer. //B is for the weights. stored as B' at host. has dimensions n * k. n is the number of outputs k is the not of inputs to the layer. //C is the output matrix. has dimensions m * n. //Matmul will be A B'. //set dims according to operation c = a * b' nr_rows_A = 4096; nr_cols_A = 4096; nr_rows_B = 4096; nr_cols_B = 4096; nr_rows_C = 4096; nr_cols_C = 4096; // Allocate 6 arrays on GPU. // array on device of type half. // float because hiprand generates only fp32 numbers. // __half arrays for fp16 numbers. float *df_A, *df_B, *df_C; __half *d_A, *d_B, *d_C; check_cuda_error(hipMalloc(&d_A,nr_rows_A * nr_cols_A * sizeof(__half))); check_cuda_error(hipMalloc(&df_A,nr_rows_A * nr_cols_A * sizeof(float))); GPU_fill_rand(df_A, nr_rows_A, nr_cols_A); hipLaunchKernelGGL(( convertFp32ToFp16) , dim3((nr_rows_A * nr_cols_A+ 255) / 256), dim3(256) , 0, 0, d_A, df_A, nr_rows_A * nr_cols_A); check_cuda_error(hipMalloc(&d_B,nr_rows_B * nr_cols_B * sizeof(__half))); check_cuda_error(hipMalloc(&df_B,nr_rows_B * nr_cols_B * sizeof(float))); GPU_fill_rand(df_B, nr_rows_B, nr_cols_B); hipLaunchKernelGGL(( convertFp32ToFp16) , dim3((nr_rows_B * nr_cols_B + 255) / 256), dim3(256) , 0, 0, d_B, df_B, nr_rows_B * nr_cols_B); check_cuda_error(hipMalloc(&d_C,nr_rows_C * nr_cols_C * sizeof(__half))); check_cuda_error(hipMalloc(&df_C,nr_rows_C * nr_cols_C * sizeof(float))); //m will be rows a. //k will be cols a. //n will be rows b. //call the matmul function. gpu_blas_mmul(d_A, d_B, d_C, nr_rows_A, nr_cols_A, nr_rows_B); hipFree(d_A); hipFree(d_B); hipFree(d_C); hipFree(df_A); hipFree(df_B); hipFree(df_C); return 0; }
b61b9efb6d7dc4a20c77ff50d900f1dfe02d767a.cu
// nvcc 001 isamax .c -lcublas #include <iostream> #include </usr/include/stdio.h> #include </usr/include/stdlib.h> #include <cuda_runtime.h> #include <cuda_device_runtime_api.h> #include "cublas_v2.h" #include "curand.h" #include "cuda_fp16.h" #include <math.h> #include <time.h> #include <library_types.h> #include <cuda.h> #include "device_launch_parameters.h" #include <cuda_profiler_api.h> #include <ctime> #include <unistd.h> #include <sys/time.h> #include "common.h" using namespace std; __global__ void convertFp32ToFp16 (half *out, float *in, int n) { int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx < n) { out[idx] = (in[idx]); } } __global__ void convertFp16ToFp32 (float *out, half *in, int n) { int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx < n) { out[idx] = (in[idx]); } } /* __global__ void convertFp32ToFp16 (__half *out, float *in, int rows, int cols) { for(int i = 0; i < rows; i++){ for(int j = 0; j < cols; j++){ out[i * cols + j] = __float2half(in[i * cols + j]); } } } */ void print_matrix(float *A, int nr_rows_A, int nr_cols_A) { for(int i = 0; i < nr_rows_A; i++){ for(int j = 0; j < nr_cols_A; j++){ std::cout << A[i * nr_cols_A + j] << " "; } std::cout << std::endl; } std::cout << std::endl; } // Fill the array with random numbers on GPU void GPU_fill_rand(float *A, int nr_rows_A, int nr_cols_A) { // Create a pseudo-random number generator curandGenerator_t prng; curandCreateGenerator(&prng, CURAND_RNG_PSEUDO_DEFAULT); // Set the seed for the random number generator using the system clock curandSetPseudoRandomGeneratorSeed(prng, (unsigned long long) clock()); // Fill the array with random numbers on the device curandGenerateUniform(prng, A, nr_rows_A * nr_cols_A); } void gpu_blas_mmul(__half *A, __half *B, __half *C, int m, int k, int n) { __half alphah = __half(1.0f); __half betah = __half(0.0f); // Create a handle for CUBLAS cublasHandle_t handle; cublasStatus_t cublasStat = cublasCreate(&handle); // Set the math mode to allow cuBLAS to use Tensor Cores: cublasStat = cublasSetMathMode(handle, CUBLAS_TENSOR_OP_MATH); //cublasStat = cublasSetMathMode(handle, CUBLAS_DEFAULT_MATH); //n maps to the output dimension. //m is the batch size * seq length. //k maps to the input dimension. //leading dimension of B will be cols in B(host) and it will be accessed as T. //leading dimension of A will be cols in A(host) and it will be accessed as N. //Leading dimension of C will be cols in C(host) and it will be accesses as N. //A is m * k in host k * m in device. //B is n * K in host k * n in device. //C is m * n in host n * m in device. //m will be rows A, C. //k will be cols A, B. //n will be rows B, cols in C. int lda = k, ldb = k, ldc = n; //--------------------------------------------------performing warmup runs------------------------------// for(int i = 0; i < 500; i++){ // Do the actual multiplication check_cuda_error(cublasGemmEx(handle, CUBLAS_OP_T, CUBLAS_OP_N, n, m, k, &alphah, B, CUDA_R_16F, ldb, A, CUDA_R_16F, lda, &betah, C, CUDA_R_16F, ldc, CUDA_R_16F, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); } //-------------------------------------------------running actual runs----------------------------------// cudaDeviceSynchronize(); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, NULL); int niter = 10000; for(int i = 0; i < niter; i++){ // Do the actual multiplication // cublasStat = cublasGemmEx(handle, CUBLAS_OP_T, CUBLAS_OP_N, n, m, k, alpha, B, CUDA_R_16F, ldb, A, CUDA_R_16F, lda, beta, C, CUDA_R_16F, ldc, CUDA_R_32F, CUBLAS_GEMM_DEFAULT_TENSOR_OP); check_cuda_error(cublasGemmEx(handle, CUBLAS_OP_T, CUBLAS_OP_N, n, m, k, &alphah, B, CUDA_R_16F, ldb, A, CUDA_R_16F, lda, &betah, C, CUDA_R_16F, ldc, CUDA_R_16F, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); //cout<<cublasStat<<endl; //cublasStat = cublasGemmEx(handle, CUBLAS_OP_T, CUBLAS_OP_N, 1024, 1536, 4096, alpha, B, CUDA_R_16F, 1024, A, CUDA_R_16F, 4096, beta, C,CUDA_R_16F,1024 ,CUDA_R_32F, CUBLAS_GEMM_DEFAULT_TENSOR_OP); } cudaEventRecord(stop, NULL); //stop event to complete cudaEventSynchronize(stop); float msecTotal = 0.0f; cudaEventElapsedTime(&msecTotal, start, stop); cout<<"Total time Taken: "<<msecTotal<<" msec"<<endl; // Compute and print the performance float msecPerMatrixMul = msecTotal/niter; cout<<"Average time taken per matmul: "<<msecPerMatrixMul<<" msec"<<endl; double flopsPerMatrixMul = 2.0 * (double) m * (double) n * (double) k; double teraFlops = (flopsPerMatrixMul * 1.0e-12f) / (msecPerMatrixMul / 1000.0f); printf( "Performance= %.2f TFlop/s, Time= %.3f msec, Size= %.0f Ops\n", teraFlops, msecPerMatrixMul, flopsPerMatrixMul); /* for(int i = 0; i < 20; i++){ cublasGemmStridedBatchedEx(handle, CUBLAS_OP_N, CUBLAS_OP_N, m, n, k, alpha, A, CUDA_R_16F, lda, 384 * 384, B, CUDA_R_16F, ldb, 384 * 64, beta, C, CUDA_R_16F, ldc, 384 * 64, 4, CUDA_R_32F, CUBLAS_GEMM_DEFAULT_TENSOR_OP ); } */ // Destroy the handle cublasDestroy(handle); } int main() { int nr_rows_A, nr_cols_A, nr_rows_B, nr_cols_B, nr_rows_C, nr_cols_C; //A is for the Activations. has dimensions m * k, where m is (seq length * batchsize), k is no of inputs to the layer. //B is for the weights. stored as B' at host. has dimensions n * k. n is the number of outputs k is the not of inputs to the layer. //C is the output matrix. has dimensions m * n. //Matmul will be A B'. //set dims according to operation c = a * b' nr_rows_A = 4096; nr_cols_A = 4096; nr_rows_B = 4096; nr_cols_B = 4096; nr_rows_C = 4096; nr_cols_C = 4096; // Allocate 6 arrays on GPU. // array on device of type half. // float because curand generates only fp32 numbers. // __half arrays for fp16 numbers. float *df_A, *df_B, *df_C; __half *d_A, *d_B, *d_C; check_cuda_error(cudaMalloc(&d_A,nr_rows_A * nr_cols_A * sizeof(__half))); check_cuda_error(cudaMalloc(&df_A,nr_rows_A * nr_cols_A * sizeof(float))); GPU_fill_rand(df_A, nr_rows_A, nr_cols_A); convertFp32ToFp16 <<< (nr_rows_A * nr_cols_A+ 255) / 256, 256 >>> (d_A, df_A, nr_rows_A * nr_cols_A); check_cuda_error(cudaMalloc(&d_B,nr_rows_B * nr_cols_B * sizeof(__half))); check_cuda_error(cudaMalloc(&df_B,nr_rows_B * nr_cols_B * sizeof(float))); GPU_fill_rand(df_B, nr_rows_B, nr_cols_B); convertFp32ToFp16 <<< (nr_rows_B * nr_cols_B + 255) / 256, 256 >>> (d_B, df_B, nr_rows_B * nr_cols_B); check_cuda_error(cudaMalloc(&d_C,nr_rows_C * nr_cols_C * sizeof(__half))); check_cuda_error(cudaMalloc(&df_C,nr_rows_C * nr_cols_C * sizeof(float))); //m will be rows a. //k will be cols a. //n will be rows b. //call the matmul function. gpu_blas_mmul(d_A, d_B, d_C, nr_rows_A, nr_cols_A, nr_rows_B); cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); cudaFree(df_A); cudaFree(df_B); cudaFree(df_C); return 0; }
96d2033061edd42d337f754a1b7c66cfb9c31271.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> // includes, project #include <cutil.h> #define THREADNO 16 // . (0,0) #define THREADNO 1 #define THREADIDX_Y threadIdx.x #define THREADIDX_X threadIdx.y /** @brief @param _input @param _width @param _height **/ int* calculateSquareSumTableCuda(unsigned char* _input, int _width, int _height) { int size = (_width + 1) * (_height + 1) * sizeof(int); int* result = (int*) malloc( size ); int* origResult = result; memset(result, 0, size); _width += 1; result += _width; int* origResult2 = result; int* current = result; unsigned char* currentInput = _input; for( int i = 0; i < _height; ++i ) { ++current; for ( int j = 0; j < _width - 1; ++j ) { *current = (*currentInput) * (*currentInput) + *(current - 1) - *(current - _width - 1) + *(current - _width); ++current; ++currentInput; } } return origResult; } /** @brief @param _input @param _width @param _height @param _sx X @param _sy Y @param _sw @param _sh **/ __device__ int getSumFromTable(int* _input, int _width, int _height, int _sx, int _sy, int _sw, int _sh) { _width += 1; int* corner = _input + (_width * (_sy)) + _sx; return *corner - *(corner + _sw) - *(corner + _sh * _width) + *(corner + _sh * _width + _sw); } /** @brief @param _input @param _width @param _height @param _pattern @param _ptnWidth @param _ptnHeight @param _output **/ __global__ void doMatching(unsigned char* _input, int _width, int _height, unsigned char* _pattern, int _ptnWidth, int _ptnHeight, unsigned char* _output, int * squareSumTable) { // , int blockHeight = _height / THREADNO; int blockWidth = _width / THREADNO; int startY = blockHeight * THREADIDX_Y; int startX = blockWidth * THREADIDX_X; int endY = min(_height - _ptnHeight + 1, startY + blockHeight); int endX = min(_width - _ptnWidth + 1, startX + blockWidth); // // unsigned char* input = _input; // unsigned char* input2 = _pattern; int WIDTH = _width; int HEIGHT = _height; int PTNWIDTH = _ptnWidth; int PTNHEIGHT = _ptnHeight; // unsigned char* inputCurrent = input; unsigned char* ptnCurrent = input2; for(int py = startY; py < endY; ++py) { unsigned char* curInputRow = _input + _width * py; unsigned char* curOutputRow = _output + _width * py; for(int px = startX; px < endX; ++px) { // double nom = 0; // double denom = sqrt( (double)getSumFromTable(squareSumTable, WIDTH, HEIGHT, px, py, PTNWIDTH, PTNHEIGHT) ) ; unsigned char* curPattern = _pattern; for(int y = 0; y < _ptnHeight; ++y) { unsigned char* ptCurInputRow = curInputRow + _width * y; for(int x = 0; x < _ptnWidth; ++x) { int val = (*ptCurInputRow) - (*curPattern); nom += val * val; ++ptCurInputRow; ++curPattern; } } // double val = 1 - sqrt(nom) / denom; // *curOutputRow = 255.0 * val; ++curInputRow; ++curOutputRow; } } } /** @brief @param argc . CUDA @param argv . CUDA @param _input @param _width @param _height @param _pattern @param _ptnWidth @param _ptnHeight **/ extern "C" unsigned char* simpleMatchingCuda( int argc, char** argv, unsigned char* _input, int _width, int _height, unsigned char* _pattern, int _ptnWidth, int _ptnHeight) { CUT_DEVICE_INIT(argc, argv); unsigned int timer = 0; CUT_SAFE_CALL( cutCreateTimer( &timer)); CUT_SAFE_CALL( cutStartTimer( timer)); // allocate device memory int imsize = _width * _height; int ptnsize = _ptnWidth * _ptnHeight; unsigned char* fImage = new unsigned char[imsize]; unsigned char* fResult = new unsigned char[imsize]; //memset( fImage, 0, imsize ); memcpy( fImage, _input, imsize ); unsigned char* d_pdata = NULL; CUDA_SAFE_CALL( hipMalloc( (void**) &d_pdata, ptnsize)); // copy host memory to device CUDA_SAFE_CALL( hipMemcpy( d_pdata, _pattern, ptnsize, hipMemcpyHostToDevice) ); unsigned char* d_idata = NULL; CUDA_SAFE_CALL( hipMalloc( (void**) &d_idata, imsize * sizeof(unsigned char) )); unsigned char* d_resdata = NULL; CUDA_SAFE_CALL( hipMalloc( (void**) &d_resdata, imsize * sizeof(unsigned char) )); hipMemset(d_resdata, 255, imsize * sizeof(unsigned char)); // copy host memory to device CUDA_SAFE_CALL( hipMemcpy( d_idata, fImage, imsize * sizeof(unsigned char), hipMemcpyHostToDevice) ); // int* squareSumTable = calculateSquareSumTableCuda(_input, _width, _height); int* d_sumdata = NULL; CUDA_SAFE_CALL( hipMalloc( (void**) &d_sumdata, imsize * sizeof(int) )); // copy host memory to device CUDA_SAFE_CALL( hipMemcpy( d_sumdata, squareSumTable, imsize * sizeof(int), hipMemcpyHostToDevice) ); // execute the kernel // setup execution parameters dim3 threads( THREADNO, THREADNO, 1 ); hipLaunchKernelGGL(( doMatching), dim3(1), dim3(threads), 0, 0, d_idata, _width, _height, d_pdata, _ptnWidth, _ptnHeight, d_resdata, d_sumdata); // check if kernel execution generated and error CUT_CHECK_ERROR("Kernel execution failed"); // copy result from device to host CUDA_SAFE_CALL( hipMemcpy( fImage, d_resdata, imsize, hipMemcpyDeviceToHost) ); CUT_CHECK_ERROR("Output copy failed"); CUT_SAFE_CALL( cutStopTimer( timer)); printf( "Processing time: %f (ms)\n", cutGetTimerValue( timer)); CUT_SAFE_CALL( cutDeleteTimer( timer)); return fImage; }
96d2033061edd42d337f754a1b7c66cfb9c31271.cu
// includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> // includes, project #include <cutil.h> #define THREADNO 16 // эмуляция многоядерности куды на цпу. используется один поток с идентификатором (0,0) #define THREADNO 1 #define THREADIDX_Y threadIdx.x #define THREADIDX_X threadIdx.y /** @brief Расчитать таблицу квадратичной суммации @param _input Входное построчное изображение @param _width Ширина @param _height Высота **/ int* calculateSquareSumTableCuda(unsigned char* _input, int _width, int _height) { int size = (_width + 1) * (_height + 1) * sizeof(int); int* result = (int*) malloc( size ); int* origResult = result; memset(result, 0, size); _width += 1; result += _width; int* origResult2 = result; int* current = result; unsigned char* currentInput = _input; for( int i = 0; i < _height; ++i ) { ++current; for ( int j = 0; j < _width - 1; ++j ) { *current = (*currentInput) * (*currentInput) + *(current - 1) - *(current - _width - 1) + *(current - _width); ++current; ++currentInput; } } return origResult; } /** @brief Получить значение суммы области из просчитанной таблицы суммации @param _input Построчное входное изображение @param _width Ширина изображения @param _height Высота изображения @param _sx Координата X левого верхнего угла области @param _sy Координата Y левого верхнего угла области @param _sw Ширина области @param _sh Высота области **/ __device__ int getSumFromTable(int* _input, int _width, int _height, int _sx, int _sy, int _sw, int _sh) { _width += 1; int* corner = _input + (_width * (_sy)) + _sx; return *corner - *(corner + _sw) - *(corner + _sh * _width) + *(corner + _sh * _width + _sw); } /** @brief Интерфейс к методу корреляции @param _input Построчное входное изображение оригинала @param _width Ширина изображения @param _height Высота изображения @param _pattern Построчное входное изображение шаблона @param _ptnWidth Ширина изображения шаблона @param _ptnHeight Высота изображения шаблона @param _output Результат **/ __global__ void doMatching(unsigned char* _input, int _width, int _height, unsigned char* _pattern, int _ptnWidth, int _ptnHeight, unsigned char* _output, int * squareSumTable) { // нижеприведенный блок будет использоваться в кудовском коде, здесь для сравнения дан просто int blockHeight = _height / THREADNO; int blockWidth = _width / THREADNO; int startY = blockHeight * THREADIDX_Y; int startX = blockWidth * THREADIDX_X; int endY = min(_height - _ptnHeight + 1, startY + blockHeight); int endX = min(_width - _ptnWidth + 1, startX + blockWidth); // конец блока // входное изображение unsigned char* input = _input; // паттерн unsigned char* input2 = _pattern; int WIDTH = _width; int HEIGHT = _height; int PTNWIDTH = _ptnWidth; int PTNHEIGHT = _ptnHeight; // стартовые указатели для оригинала и шаблона unsigned char* inputCurrent = input; unsigned char* ptnCurrent = input2; for(int py = startY; py < endY; ++py) { unsigned char* curInputRow = _input + _width * py; unsigned char* curOutputRow = _output + _width * py; for(int px = startX; px < endX; ++px) { // расчет числителя как суммы квадратов отклонений double nom = 0; // расчет знаменателя как суммы квадратов области оригинала double denom = sqrt( (double)getSumFromTable(squareSumTable, WIDTH, HEIGHT, px, py, PTNWIDTH, PTNHEIGHT) ) ; unsigned char* curPattern = _pattern; for(int y = 0; y < _ptnHeight; ++y) { unsigned char* ptCurInputRow = curInputRow + _width * y; for(int x = 0; x < _ptnWidth; ++x) { int val = (*ptCurInputRow) - (*curPattern); nom += val * val; ++ptCurInputRow; ++curPattern; } } // коэффициент корреляции double val = 1 - sqrt(nom) / denom; // форматирование для вывода *curOutputRow = 255.0 * val; ++curInputRow; ++curOutputRow; } } } /** @brief Интерфейс к методу корреляции @param argc Не используется. Для совместимости с CUDA @param argv Не используется. Для совместимости с CUDA @param _input Построчное входное изображение оригинала @param _width Ширина изображения @param _height Высота изображения @param _pattern Построчное входное изображение шаблона @param _ptnWidth Ширина изображения шаблона @param _ptnHeight Высота изображения шаблона **/ extern "C" unsigned char* simpleMatchingCuda( int argc, char** argv, unsigned char* _input, int _width, int _height, unsigned char* _pattern, int _ptnWidth, int _ptnHeight) { CUT_DEVICE_INIT(argc, argv); unsigned int timer = 0; CUT_SAFE_CALL( cutCreateTimer( &timer)); CUT_SAFE_CALL( cutStartTimer( timer)); // allocate device memory int imsize = _width * _height; int ptnsize = _ptnWidth * _ptnHeight; unsigned char* fImage = new unsigned char[imsize]; unsigned char* fResult = new unsigned char[imsize]; //memset( fImage, 0, imsize ); memcpy( fImage, _input, imsize ); unsigned char* d_pdata = NULL; CUDA_SAFE_CALL( cudaMalloc( (void**) &d_pdata, ptnsize)); // copy host memory to device CUDA_SAFE_CALL( cudaMemcpy( d_pdata, _pattern, ptnsize, cudaMemcpyHostToDevice) ); unsigned char* d_idata = NULL; CUDA_SAFE_CALL( cudaMalloc( (void**) &d_idata, imsize * sizeof(unsigned char) )); unsigned char* d_resdata = NULL; CUDA_SAFE_CALL( cudaMalloc( (void**) &d_resdata, imsize * sizeof(unsigned char) )); cudaMemset(d_resdata, 255, imsize * sizeof(unsigned char)); // copy host memory to device CUDA_SAFE_CALL( cudaMemcpy( d_idata, fImage, imsize * sizeof(unsigned char), cudaMemcpyHostToDevice) ); // таблица суммации квадратичная int* squareSumTable = calculateSquareSumTableCuda(_input, _width, _height); int* d_sumdata = NULL; CUDA_SAFE_CALL( cudaMalloc( (void**) &d_sumdata, imsize * sizeof(int) )); // copy host memory to device CUDA_SAFE_CALL( cudaMemcpy( d_sumdata, squareSumTable, imsize * sizeof(int), cudaMemcpyHostToDevice) ); // execute the kernel // setup execution parameters dim3 threads( THREADNO, THREADNO, 1 ); doMatching<<< 1, threads>>>(d_idata, _width, _height, d_pdata, _ptnWidth, _ptnHeight, d_resdata, d_sumdata); // check if kernel execution generated and error CUT_CHECK_ERROR("Kernel execution failed"); // copy result from device to host CUDA_SAFE_CALL( cudaMemcpy( fImage, d_resdata, imsize, cudaMemcpyDeviceToHost) ); CUT_CHECK_ERROR("Output copy failed"); CUT_SAFE_CALL( cutStopTimer( timer)); printf( "Processing time: %f (ms)\n", cutGetTimerValue( timer)); CUT_SAFE_CALL( cutDeleteTimer( timer)); return fImage; }
e7a20db0ee20db8d69885c66b27dffc387414785.hip
// !!! This is a file automatically generated by hipify!!! /* Copyright 2012 Jeffrey Blanchard, Erik Opavsky, and Emircan Uysaler * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <hip/hip_runtime.h> #include <hiprand/hiprand.h> #include <hip/hip_runtime_api.h> #include <time.h> #include <stdio.h> #include <stdlib.h> #include <iostream> #include <fstream> #include <sys/time.h> #include <algorithm> //Include various thrust items that are used #include <thrust/reduce.h> #include <thrust/functional.h> #include <thrust/device_ptr.h> #include <thrust/scan.h> #include <thrust/extrema.h> #include <thrust/pair.h> #include <thrust/transform_reduce.h> #include <thrust/random.h> //various functions, include the functions //that print numbers in binary. #include "printFunctions.cu" //the algorithms #include "bucketMultiselect.cu" //#include "naiveBucketMultiselect.cu" #include "generateProblems.cu" #include "multiselectTimingFunctions.cu" #define NUMBEROFALGORITHMS 2 char* namesOfMultiselectTimingFunctions[NUMBEROFALGORITHMS] = {"Sort and Choose Multiselect", "Bucket Multiselect"}; using namespace std; namespace CompareMultiselect { /* This function compares bucketMultiselect with the other algorithms given in the defined range of kVals and array size. */ template<typename T> void compareMultiselectAlgorithms(uint size, uint* kVals, uint numKs, uint numTests , uint *algorithmsToTest, uint generateType, uint kGenerateType, char* fileNamecsv , T* data = NULL) { // allocate space for operations T *h_vec, *h_vec_copy; float timeArray[NUMBEROFALGORITHMS][numTests]; T * resultsArray[NUMBEROFALGORITHMS][numTests]; float totalTimesPerAlgorithm[NUMBEROFALGORITHMS]; uint winnerArray[numTests]; uint timesWon[NUMBEROFALGORITHMS]; uint i,j,m,x; int runOrder[NUMBEROFALGORITHMS]; unsigned long long seed; //, seed2; results_t<T> *temp; ofstream fileCsv; timeval t1; //, t2; typedef results_t<T>* (*ptrToTimingFunction)(T*, uint, uint *, uint); typedef void (*ptrToGeneratingFunction)(T*, uint, hiprandGenerator_t); //these are the functions that can be called ptrToTimingFunction arrayOfTimingFunctions[NUMBEROFALGORITHMS] = {&timeSortAndChooseMultiselect<T>, &timeBucketMultiselect<T>}; ptrToGeneratingFunction *arrayOfGenerators; char** namesOfGeneratingFunctions; // this is the array of names of functions that generate problems of this type, // ie float, double, or uint namesOfGeneratingFunctions = returnNamesOfGenerators<T>(); arrayOfGenerators = (ptrToGeneratingFunction *) returnGenFunctions<T>(); printf("Files will be written to %s\n", fileNamecsv); fileCsv.open(fileNamecsv, ios_base::app); //zero out the totals and times won bzero(totalTimesPerAlgorithm, NUMBEROFALGORITHMS * sizeof(uint)); bzero(timesWon, NUMBEROFALGORITHMS * sizeof(uint)); //allocate space for h_vec, and h_vec_copy h_vec = (T *) malloc(size * sizeof(T)); h_vec_copy = (T *) malloc(size * sizeof(T)); //create the random generators. hiprandGenerator_t generator; srand(unsigned(time(NULL))); printf("The distribution is: %s\n", namesOfGeneratingFunctions[generateType]); printf("The k distribution is: %s\n", namesOfKGenerators[kGenerateType]); /***********************************************/ /*********** START RUNNING TESTS ************ /***********************************************/ for(i = 0; i < numTests; i++) { //hipDeviceReset(); gettimeofday(&t1, NULL); seed = t1.tv_usec * t1.tv_sec; for(m = 0; m < NUMBEROFALGORITHMS;m++) runOrder[m] = m; std::random_shuffle(runOrder, runOrder + NUMBEROFALGORITHMS); fileCsv << size << "," << numKs << "," << namesOfGeneratingFunctions[generateType] << "," << namesOfKGenerators[kGenerateType] << ","; hiprandCreateGenerator(&generator, HIPRAND_RNG_PSEUDO_DEFAULT); hiprandSetPseudoRandomGeneratorSeed(generator,seed); printf("Running test %u of %u for size: %u and numK: %u\n", i + 1, numTests, size, numKs); //generate the random vector using the specified distribution if(data == NULL) arrayOfGenerators[generateType](h_vec, size, generator); else h_vec = data; //copy the vector to h_vec_copy, which will be used to restore it later memcpy(h_vec_copy, h_vec, size * sizeof(T)); /* *************************************************** ****** In this file, the kDistribution is always set to UNIFORM (kGenerateType = 1) ****** so this regeneration of the order statistics is not needed. ****** It is saved here in case one wants to run these tests for a different kDistribution *************************************************** // if the kdistribution is random, we need to generate new a kList for each new random problem instance. if ( (kGenerateType != 1) && (i>0) ){ gettimeofday(&t2, NULL); seed2 = t2.tv_usec * t2.tv_sec; hiprandGenerator_t generator2; srand(unsigned(time(NULL))); hiprandCreateGenerator(&generator2, HIPRAND_RNG_PSEUDO_DEFAULT); hiprandSetPseudoRandomGeneratorSeed(generator2,seed2); arrayOfKDistributionGenerators[kGenerateType](kVals, numKs, size, generator2); } */ winnerArray[i] = 0; float currentWinningTime = INFINITY; //run the various timing functions for(x = 0; x < NUMBEROFALGORITHMS; x++){ j = runOrder[x]; if(algorithmsToTest[j]){ //run timing function j printf("TESTING: %u\n", j); temp = arrayOfTimingFunctions[j](h_vec_copy, size, kVals, numKs); //record the time result timeArray[j][i] = temp->time; //record the value returned resultsArray[j][i] = temp->vals; //update the current "winner" if necessary if(timeArray[j][i] < currentWinningTime){ currentWinningTime = temp->time; winnerArray[i] = j; } //perform clean up free(temp); memcpy(h_vec_copy, h_vec, size * sizeof(T)); } } hiprandDestroyGenerator(generator); for(x = 0; x < NUMBEROFALGORITHMS; x++) if(algorithmsToTest[x]) fileCsv << namesOfMultiselectTimingFunctions[x] << "," << timeArray[x][i] << ","; // check for errors, and output information to recreate problem uint flag = 0; for(m = 1; m < NUMBEROFALGORITHMS;m++) if(algorithmsToTest[m]) for (j = 0; j < numKs; j++) { if(resultsArray[m][i][j] != resultsArray[0][i][j]) { flag++; fileCsv << "\nERROR ON TEST " << i << " of " << numTests << " tests!!!!!\n"; fileCsv << "vector size = " << size << "\nvector seed = " << seed << "\n"; fileCsv << "numKs = " << numKs << "\n"; fileCsv << "wrong k = " << kVals[j] << " kIndex = " << j << " wrong result = " << resultsArray[m][i][j] << " correct result = " << resultsArray[0][i][j] << "\n"; std::cout <<namesOfMultiselectTimingFunctions[m] << " did not return the correct answer on test " << i + 1 << " at k[" << j << "]. It got "<< resultsArray[m][i][j]; std::cout << " instead of " << resultsArray[0][i][j] << ".\n" ; std::cout << "RESULT:\t"; PrintFunctions::printBinary(resultsArray[m][i][j]); std::cout << "Right:\t"; PrintFunctions::printBinary(resultsArray[0][i][j]); } } fileCsv << flag << "\n"; } //calculate the total time each algorithm took for(i = 0; i < numTests; i++) for(j = 0; j < NUMBEROFALGORITHMS;j++) if(algorithmsToTest[j]) totalTimesPerAlgorithm[j] += timeArray[j][i]; //count the number of times each algorithm won. for(i = 0; i < numTests;i++) timesWon[winnerArray[i]]++; printf("\n\n"); //print out the average times for(i = 0; i < NUMBEROFALGORITHMS; i++) if(algorithmsToTest[i]) printf("%-20s averaged: %f ms\n", namesOfMultiselectTimingFunctions[i], totalTimesPerAlgorithm[i] / numTests); for(i = 0; i < NUMBEROFALGORITHMS; i++) if(algorithmsToTest[i]) printf("%s won %u times\n", namesOfMultiselectTimingFunctions[i], timesWon[i]); // free results for(i = 0; i < numTests; i++) for(m = 0; m < NUMBEROFALGORITHMS; m++) if(algorithmsToTest[m]) free(resultsArray[m][i]); //free h_vec and h_vec_copy if(data == NULL) free(h_vec); free(h_vec_copy); //close the file fileCsv.close(); } /* This function generates the array of kVals to work on and acts as a wrapper for comparison. */ template<typename T> void runTests (uint generateType, char* fileName, uint startPower, uint stopPower , uint timesToTestEachK, uint kDistribution, uint startK, uint stopK, uint kJump) { uint algorithmsToRun[NUMBEROFALGORITHMS]= {1, 1}; uint size; uint i; uint arrayOfKs[stopK+1]; /* ***************************** **** In this file, the kDistribution is not random. Thus the generator **** is only needed to call the function. It's value is irrelevant. ***************************** */ unsigned long long seed; timeval t1; gettimeofday(&t1, NULL); seed = t1.tv_usec * t1.tv_sec; hiprandGenerator_t generator; srand(unsigned(time(NULL))); hiprandCreateGenerator(&generator, HIPRAND_RNG_PSEUDO_DEFAULT); hiprandSetPseudoRandomGeneratorSeed(generator,seed); // double the array size to the next powers of 2 for(size = (1 << startPower); size <= (1 << stopPower); size *= 2) { for(i = startK; i <= stopK; i+=kJump) { hipDeviceReset(); hipDeviceReset(); arrayOfKDistributionGenerators[kDistribution](arrayOfKs, i, size, generator); compareMultiselectAlgorithms<T>(size, arrayOfKs, i, timesToTestEachK, algorithmsToRun, generateType, kDistribution, fileName); } // end for(i=starK) hiprandDestroyGenerator(generator); } // end for(size) } // end runTests } // end namespace CompareMultiselect int main (int argc, char *argv[]) { using namespace CompareMultiselect; char *fileName, *hostName, *typeString; fileName = (char*) malloc(128 * sizeof(char)); typeString = (char*) malloc(10 * sizeof(char)); hostName = (char*) malloc(20 * sizeof(char)); gethostname(hostName, 20); time_t rawtime; struct tm * timeinfo; time ( &rawtime ); timeinfo = localtime ( &rawtime ); char * humanTime = asctime(timeinfo); humanTime[strlen(humanTime)-1] = '\0'; uint testCount, type,distributionType,startPower,stopPower,kDistribution,startK ,stopK,jumpK; uint vecDistr[4]; vecDistr[0]=0; vecDistr[1]=1; vecDistr[2]=3; vecDistr[3]=9; kDistribution=1; startPower=26; stopPower=26; startK=100; jumpK=10; stopK=500; testCount=25; for(int j=0; j<4; j++){ distributionType = vecDistr[j]; for(type=0; type<2; type++){ switch(type){ case 0: typeString = "float"; snprintf(fileName, 128, "%s %s k-dist:%s 2^%d to 2^%d (%d:%d:%d) %d-tests on %s at %s", typeString, getDistributionOptions(type, distributionType), getKDistributionOptions(kDistribution), startPower, stopPower, startK, jumpK, stopK, testCount, hostName, humanTime); printf("File Name: %s \n", fileName); runTests<float>(distributionType,fileName,startPower,stopPower,testCount, kDistribution,startK,stopK,jumpK); break; case 1: typeString = "double"; if (distributionType<2){ snprintf(fileName, 128, "%s %s k-dist:%s 2^%d to 2^%d (%d:%d:%d) %d-tests on %s at %s", typeString, getDistributionOptions(type, distributionType), getKDistributionOptions(kDistribution), startPower, stopPower, startK, jumpK, stopK, testCount, hostName, humanTime); printf("File Name: %s \n", fileName); runTests<double>(distributionType,fileName,startPower,stopPower,testCount, kDistribution,startK,stopK,jumpK); } // end if(distributionType) break; case 2: typeString = "uint"; runTests<uint>(distributionType,fileName,startPower,stopPower,testCount, kDistribution,startK,stopK,jumpK); break; default: printf("You entered and invalid option, now exiting\n"); break; } // end switch(type) } // end for(type) } // end for (int j) free (fileName); return 0; }
e7a20db0ee20db8d69885c66b27dffc387414785.cu
/* Copyright 2012 Jeffrey Blanchard, Erik Opavsky, and Emircan Uysaler * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cuda.h> #include <curand.h> #include <cuda_runtime_api.h> #include <time.h> #include <stdio.h> #include <stdlib.h> #include <iostream> #include <fstream> #include <sys/time.h> #include <algorithm> //Include various thrust items that are used #include <thrust/reduce.h> #include <thrust/functional.h> #include <thrust/device_ptr.h> #include <thrust/scan.h> #include <thrust/extrema.h> #include <thrust/pair.h> #include <thrust/transform_reduce.h> #include <thrust/random.h> //various functions, include the functions //that print numbers in binary. #include "printFunctions.cu" //the algorithms #include "bucketMultiselect.cu" //#include "naiveBucketMultiselect.cu" #include "generateProblems.cu" #include "multiselectTimingFunctions.cu" #define NUMBEROFALGORITHMS 2 char* namesOfMultiselectTimingFunctions[NUMBEROFALGORITHMS] = {"Sort and Choose Multiselect", "Bucket Multiselect"}; using namespace std; namespace CompareMultiselect { /* This function compares bucketMultiselect with the other algorithms given in the defined range of kVals and array size. */ template<typename T> void compareMultiselectAlgorithms(uint size, uint* kVals, uint numKs, uint numTests , uint *algorithmsToTest, uint generateType, uint kGenerateType, char* fileNamecsv , T* data = NULL) { // allocate space for operations T *h_vec, *h_vec_copy; float timeArray[NUMBEROFALGORITHMS][numTests]; T * resultsArray[NUMBEROFALGORITHMS][numTests]; float totalTimesPerAlgorithm[NUMBEROFALGORITHMS]; uint winnerArray[numTests]; uint timesWon[NUMBEROFALGORITHMS]; uint i,j,m,x; int runOrder[NUMBEROFALGORITHMS]; unsigned long long seed; //, seed2; results_t<T> *temp; ofstream fileCsv; timeval t1; //, t2; typedef results_t<T>* (*ptrToTimingFunction)(T*, uint, uint *, uint); typedef void (*ptrToGeneratingFunction)(T*, uint, curandGenerator_t); //these are the functions that can be called ptrToTimingFunction arrayOfTimingFunctions[NUMBEROFALGORITHMS] = {&timeSortAndChooseMultiselect<T>, &timeBucketMultiselect<T>}; ptrToGeneratingFunction *arrayOfGenerators; char** namesOfGeneratingFunctions; // this is the array of names of functions that generate problems of this type, // ie float, double, or uint namesOfGeneratingFunctions = returnNamesOfGenerators<T>(); arrayOfGenerators = (ptrToGeneratingFunction *) returnGenFunctions<T>(); printf("Files will be written to %s\n", fileNamecsv); fileCsv.open(fileNamecsv, ios_base::app); //zero out the totals and times won bzero(totalTimesPerAlgorithm, NUMBEROFALGORITHMS * sizeof(uint)); bzero(timesWon, NUMBEROFALGORITHMS * sizeof(uint)); //allocate space for h_vec, and h_vec_copy h_vec = (T *) malloc(size * sizeof(T)); h_vec_copy = (T *) malloc(size * sizeof(T)); //create the random generators. curandGenerator_t generator; srand(unsigned(time(NULL))); printf("The distribution is: %s\n", namesOfGeneratingFunctions[generateType]); printf("The k distribution is: %s\n", namesOfKGenerators[kGenerateType]); /***********************************************/ /*********** START RUNNING TESTS ************ /***********************************************/ for(i = 0; i < numTests; i++) { //cudaDeviceReset(); gettimeofday(&t1, NULL); seed = t1.tv_usec * t1.tv_sec; for(m = 0; m < NUMBEROFALGORITHMS;m++) runOrder[m] = m; std::random_shuffle(runOrder, runOrder + NUMBEROFALGORITHMS); fileCsv << size << "," << numKs << "," << namesOfGeneratingFunctions[generateType] << "," << namesOfKGenerators[kGenerateType] << ","; curandCreateGenerator(&generator, CURAND_RNG_PSEUDO_DEFAULT); curandSetPseudoRandomGeneratorSeed(generator,seed); printf("Running test %u of %u for size: %u and numK: %u\n", i + 1, numTests, size, numKs); //generate the random vector using the specified distribution if(data == NULL) arrayOfGenerators[generateType](h_vec, size, generator); else h_vec = data; //copy the vector to h_vec_copy, which will be used to restore it later memcpy(h_vec_copy, h_vec, size * sizeof(T)); /* *************************************************** ****** In this file, the kDistribution is always set to UNIFORM (kGenerateType = 1) ****** so this regeneration of the order statistics is not needed. ****** It is saved here in case one wants to run these tests for a different kDistribution *************************************************** // if the kdistribution is random, we need to generate new a kList for each new random problem instance. if ( (kGenerateType != 1) && (i>0) ){ gettimeofday(&t2, NULL); seed2 = t2.tv_usec * t2.tv_sec; curandGenerator_t generator2; srand(unsigned(time(NULL))); curandCreateGenerator(&generator2, CURAND_RNG_PSEUDO_DEFAULT); curandSetPseudoRandomGeneratorSeed(generator2,seed2); arrayOfKDistributionGenerators[kGenerateType](kVals, numKs, size, generator2); } */ winnerArray[i] = 0; float currentWinningTime = INFINITY; //run the various timing functions for(x = 0; x < NUMBEROFALGORITHMS; x++){ j = runOrder[x]; if(algorithmsToTest[j]){ //run timing function j printf("TESTING: %u\n", j); temp = arrayOfTimingFunctions[j](h_vec_copy, size, kVals, numKs); //record the time result timeArray[j][i] = temp->time; //record the value returned resultsArray[j][i] = temp->vals; //update the current "winner" if necessary if(timeArray[j][i] < currentWinningTime){ currentWinningTime = temp->time; winnerArray[i] = j; } //perform clean up free(temp); memcpy(h_vec_copy, h_vec, size * sizeof(T)); } } curandDestroyGenerator(generator); for(x = 0; x < NUMBEROFALGORITHMS; x++) if(algorithmsToTest[x]) fileCsv << namesOfMultiselectTimingFunctions[x] << "," << timeArray[x][i] << ","; // check for errors, and output information to recreate problem uint flag = 0; for(m = 1; m < NUMBEROFALGORITHMS;m++) if(algorithmsToTest[m]) for (j = 0; j < numKs; j++) { if(resultsArray[m][i][j] != resultsArray[0][i][j]) { flag++; fileCsv << "\nERROR ON TEST " << i << " of " << numTests << " tests!!!!!\n"; fileCsv << "vector size = " << size << "\nvector seed = " << seed << "\n"; fileCsv << "numKs = " << numKs << "\n"; fileCsv << "wrong k = " << kVals[j] << " kIndex = " << j << " wrong result = " << resultsArray[m][i][j] << " correct result = " << resultsArray[0][i][j] << "\n"; std::cout <<namesOfMultiselectTimingFunctions[m] << " did not return the correct answer on test " << i + 1 << " at k[" << j << "]. It got "<< resultsArray[m][i][j]; std::cout << " instead of " << resultsArray[0][i][j] << ".\n" ; std::cout << "RESULT:\t"; PrintFunctions::printBinary(resultsArray[m][i][j]); std::cout << "Right:\t"; PrintFunctions::printBinary(resultsArray[0][i][j]); } } fileCsv << flag << "\n"; } //calculate the total time each algorithm took for(i = 0; i < numTests; i++) for(j = 0; j < NUMBEROFALGORITHMS;j++) if(algorithmsToTest[j]) totalTimesPerAlgorithm[j] += timeArray[j][i]; //count the number of times each algorithm won. for(i = 0; i < numTests;i++) timesWon[winnerArray[i]]++; printf("\n\n"); //print out the average times for(i = 0; i < NUMBEROFALGORITHMS; i++) if(algorithmsToTest[i]) printf("%-20s averaged: %f ms\n", namesOfMultiselectTimingFunctions[i], totalTimesPerAlgorithm[i] / numTests); for(i = 0; i < NUMBEROFALGORITHMS; i++) if(algorithmsToTest[i]) printf("%s won %u times\n", namesOfMultiselectTimingFunctions[i], timesWon[i]); // free results for(i = 0; i < numTests; i++) for(m = 0; m < NUMBEROFALGORITHMS; m++) if(algorithmsToTest[m]) free(resultsArray[m][i]); //free h_vec and h_vec_copy if(data == NULL) free(h_vec); free(h_vec_copy); //close the file fileCsv.close(); } /* This function generates the array of kVals to work on and acts as a wrapper for comparison. */ template<typename T> void runTests (uint generateType, char* fileName, uint startPower, uint stopPower , uint timesToTestEachK, uint kDistribution, uint startK, uint stopK, uint kJump) { uint algorithmsToRun[NUMBEROFALGORITHMS]= {1, 1}; uint size; uint i; uint arrayOfKs[stopK+1]; /* ***************************** **** In this file, the kDistribution is not random. Thus the generator **** is only needed to call the function. It's value is irrelevant. ***************************** */ unsigned long long seed; timeval t1; gettimeofday(&t1, NULL); seed = t1.tv_usec * t1.tv_sec; curandGenerator_t generator; srand(unsigned(time(NULL))); curandCreateGenerator(&generator, CURAND_RNG_PSEUDO_DEFAULT); curandSetPseudoRandomGeneratorSeed(generator,seed); // double the array size to the next powers of 2 for(size = (1 << startPower); size <= (1 << stopPower); size *= 2) { for(i = startK; i <= stopK; i+=kJump) { cudaDeviceReset(); cudaThreadExit(); arrayOfKDistributionGenerators[kDistribution](arrayOfKs, i, size, generator); compareMultiselectAlgorithms<T>(size, arrayOfKs, i, timesToTestEachK, algorithmsToRun, generateType, kDistribution, fileName); } // end for(i=starK) curandDestroyGenerator(generator); } // end for(size) } // end runTests } // end namespace CompareMultiselect int main (int argc, char *argv[]) { using namespace CompareMultiselect; char *fileName, *hostName, *typeString; fileName = (char*) malloc(128 * sizeof(char)); typeString = (char*) malloc(10 * sizeof(char)); hostName = (char*) malloc(20 * sizeof(char)); gethostname(hostName, 20); time_t rawtime; struct tm * timeinfo; time ( &rawtime ); timeinfo = localtime ( &rawtime ); char * humanTime = asctime(timeinfo); humanTime[strlen(humanTime)-1] = '\0'; uint testCount, type,distributionType,startPower,stopPower,kDistribution,startK ,stopK,jumpK; uint vecDistr[4]; vecDistr[0]=0; vecDistr[1]=1; vecDistr[2]=3; vecDistr[3]=9; kDistribution=1; startPower=26; stopPower=26; startK=100; jumpK=10; stopK=500; testCount=25; for(int j=0; j<4; j++){ distributionType = vecDistr[j]; for(type=0; type<2; type++){ switch(type){ case 0: typeString = "float"; snprintf(fileName, 128, "%s %s k-dist:%s 2^%d to 2^%d (%d:%d:%d) %d-tests on %s at %s", typeString, getDistributionOptions(type, distributionType), getKDistributionOptions(kDistribution), startPower, stopPower, startK, jumpK, stopK, testCount, hostName, humanTime); printf("File Name: %s \n", fileName); runTests<float>(distributionType,fileName,startPower,stopPower,testCount, kDistribution,startK,stopK,jumpK); break; case 1: typeString = "double"; if (distributionType<2){ snprintf(fileName, 128, "%s %s k-dist:%s 2^%d to 2^%d (%d:%d:%d) %d-tests on %s at %s", typeString, getDistributionOptions(type, distributionType), getKDistributionOptions(kDistribution), startPower, stopPower, startK, jumpK, stopK, testCount, hostName, humanTime); printf("File Name: %s \n", fileName); runTests<double>(distributionType,fileName,startPower,stopPower,testCount, kDistribution,startK,stopK,jumpK); } // end if(distributionType) break; case 2: typeString = "uint"; runTests<uint>(distributionType,fileName,startPower,stopPower,testCount, kDistribution,startK,stopK,jumpK); break; default: printf("You entered and invalid option, now exiting\n"); break; } // end switch(type) } // end for(type) } // end for (int j) free (fileName); return 0; }
9ab4c362af3a7be584e2b22e121cab24aa5caf69.hip
// !!! This is a file automatically generated by hipify!!! #include "LBM3D_1D_indices.h" #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <glm\gtx\norm.hpp> #include <glm\gtc\matrix_transform.hpp> #include <iostream> #include "CUDAUtils.cuh" #include "ParticleSystem.h" #include "StreamlineParticleSystem.h" __constant__ int d_latticeWidth; //!< Lattice width constant on the device __constant__ int d_latticeHeight; //!< Lattice height constant on the device __constant__ int d_latticeDepth; //!< Lattice depth constant on the device __constant__ int d_latticeSize; //!< Lattice size constant on the device (latticeWidth * latticeHeight * latticeDepth) __constant__ float d_tau; //!< Tau value on the device __constant__ float d_itau; //!< Inverse tau value (1.0f / tau) on the device __constant__ float d_worldSizeRatio; __constant__ glm::vec3 d_position; __device__ int d_respawnY = 0; //!< Respawn y coordinate on the device, not used (random respawn now used) __device__ int d_respawnZ = 0; //!< Respawn z coordinate on the device, not used (random respawn now used) __constant__ glm::vec3 dirVectorsConst[19]; //! Returns the flattened index using the device constants and provided coordinates. __device__ int getIdxKer(int x, int y, int z) { return (x + d_latticeWidth * (y + d_latticeHeight * z)); } //! Returns uniform random between 0.0 and 1.0. Provided from different student's work. __device__ __host__ float rand(int x, int y) { int n = x + y * 57; n = (n << 13) ^ n; return ((1.0f - ((n * (n * n * 15731 + 789221) + 1376312589) & 0x7fffffff) / 1073741824.0f) + 1.0f) * 0.5f; } ///// Maps the value to the viridis color map. //__device__ glm::vec3 mapToViridis3D(float val) { // val = glm::clamp(val, 0.0f, 1.0f); // int discreteVal = (int)(val * 255.0f); // return glm::vec3(viridis_cm[discreteVal][0], viridis_cm[discreteVal][1], viridis_cm[discreteVal][2]); //} //! Maps the world position vector to the lattice position vector. __device__ glm::vec3 getLatticePosition(glm::vec3 worldPosition) { // TODO - offsets (model matrix?), maybe even scaling (model matrix scale) worldPosition -= d_position; return (worldPosition / d_worldSizeRatio); } //! Maps the lattice position vector to world position vector. __device__ glm::vec3 getWorldPosition(glm::vec3 latticePosition) { latticePosition *= d_worldSizeRatio; return (latticePosition + d_position); } //! Moves the streamline particles. __global__ void moveStreamlineParticlesKernel(glm::vec3 *streamlineVertices, glm::vec3 *velocities, int *streamlineLengths, int maxStreamlineLength, int maxNumStreamlines, int respawnMode, int outOfBoundsMode, float velocityMultiplier = 1.0f, bool useCorrectInterpolation = true) { int idx = threadIdx.x + blockDim.x * threadIdx.y; // idx in block idx += blockDim.x * blockDim.y * blockIdx.x; glm::vec3 adjVelocities[8]; while (idx < maxNumStreamlines) { if (streamlineLengths[idx] >= maxStreamlineLength) { idx += blockDim.x * blockDim.y * gridDim.x; continue; } int off = idx * maxStreamlineLength + streamlineLengths[idx]; // buffer offset for current vertex of the streamline with idx glm::vec3 pos = getLatticePosition(streamlineVertices[off]); if (pos.x < 0.0f || pos.x > d_latticeWidth - 1 || pos.y < 0.0f || pos.y > d_latticeHeight - 1 || pos.z < 0.0f || pos.z > d_latticeDepth - 1) { //streamlineLengths[idx] = maxStreamlineLength; // so we do not try to draw it again // we actually want to remember the streamline lengths so we can then render the lines cleanly idx += blockDim.x * blockDim.y * gridDim.x; continue; } int leftX = (int)pos.x; int rightX = leftX + 1; if (rightX > d_latticeWidth - 1) { rightX = 0; } int bottomY = (int)pos.y; int topY = bottomY + 1; if (topY > d_latticeHeight - 1) { topY = 0; } int frontZ = (int)pos.z; int backZ = frontZ + 1; if (backZ > d_latticeDepth - 1) { backZ = 0; } adjVelocities[0] = velocities[getIdxKer(leftX, topY, frontZ)]; // 0: V010 adjVelocities[1] = velocities[getIdxKer(rightX, topY, frontZ)]; // 1: V110 adjVelocities[2] = velocities[getIdxKer(leftX, bottomY, frontZ)]; // 2: V000 adjVelocities[3] = velocities[getIdxKer(rightX, bottomY, frontZ)]; // 3: V100 adjVelocities[4] = velocities[getIdxKer(leftX, topY, backZ)]; // 4: V011 adjVelocities[5] = velocities[getIdxKer(rightX, topY, backZ)]; // 5: V111 adjVelocities[6] = velocities[getIdxKer(leftX, bottomY, backZ)]; // 6: V001 adjVelocities[7] = velocities[getIdxKer(rightX, bottomY, backZ)]; // 7: V101 float horizontalRatio = pos.x - (float)leftX; float verticalRatio = pos.y - (float)bottomY; float depthRatio = pos.z - (float)frontZ; glm::vec3 finalVelocity; if (useCorrectInterpolation) { finalVelocity = adjVelocities[2] * (1.0f - horizontalRatio) * (1.0f - verticalRatio) * (1.0f - depthRatio) + adjVelocities[3] * horizontalRatio * (1.0f - verticalRatio) * (1.0f - depthRatio) + adjVelocities[0] * (1.0f - horizontalRatio) * verticalRatio * (1.0f - depthRatio) + adjVelocities[6] * (1.0f - horizontalRatio) * (1.0f - verticalRatio) * depthRatio + adjVelocities[7] * horizontalRatio * (1.0f - verticalRatio) * depthRatio + adjVelocities[4] * (1.0f - horizontalRatio) * verticalRatio * depthRatio + adjVelocities[1] * horizontalRatio * verticalRatio * (1.0f - depthRatio) + adjVelocities[5] * horizontalRatio * verticalRatio * depthRatio; } else { glm::vec3 topBackVelocity = adjVelocities[0] * horizontalRatio + adjVelocities[1] * (1.0f - horizontalRatio); glm::vec3 bottomBackVelocity = adjVelocities[2] * horizontalRatio + adjVelocities[3] * (1.0f - horizontalRatio); glm::vec3 backVelocity = bottomBackVelocity * verticalRatio + topBackVelocity * (1.0f - verticalRatio); glm::vec3 topFrontVelocity = adjVelocities[4] * horizontalRatio + adjVelocities[5] * (1.0f - horizontalRatio); glm::vec3 bottomFrontVelocity = adjVelocities[6] * horizontalRatio + adjVelocities[7] * (1.0f - horizontalRatio); glm::vec3 frontVelocity = bottomFrontVelocity * verticalRatio + topFrontVelocity * (1.0f - verticalRatio); finalVelocity = backVelocity * depthRatio + frontVelocity * (1.0f - depthRatio); } finalVelocity *= velocityMultiplier; pos += finalVelocity; streamlineVertices[off + 1] = getWorldPosition(pos); streamlineLengths[idx]++; idx += blockDim.x * blockDim.y * gridDim.x; } } //! Kernel for moving particles that uses OpenGL interoperability. /*! If the particles venture beyond the simulation bounding volume, they are respawned. Out of bounds mode is not used (not implemented) yet. \param[in] particleVertices Vertices (positions stored in VBO) of particles to be updated/moved. \param[in] velocities Array of velocities that will act on the particles. \param[in] numActiveParticles Number of active particles that should be moved. \param[in] particleColors --- OLD --- VBO of particle colors. \param[in] respawnMode Determines how the particles are respawned. \param[in] outOfBoundsMode --- NOT IMPLEMENTED --- Determines how are particles that are out of bounds treated. \param[in] velocityMultiplier Artifical multiplier that determines how much the particles are moved. \param[in] useCorrectInterpolation Determines mode of trilinear interpolation. */ __global__ void moveParticlesKernelInteropNew2(glm::vec3 *particleVertices, glm::vec3 *velocities, /*int *numParticles*/ int numActiveParticles, glm::vec3 *particleColors, int respawnMode, int outOfBoundsMode, float velocityMultiplier = 1.0f, bool useCorrectInterpolation = true) { int idx = threadIdx.x + blockDim.x * threadIdx.y; // idx in block idx += blockDim.x * blockDim.y * blockIdx.x; glm::vec3 adjVelocities[8]; while (idx < numActiveParticles) { //if (isnan(particleVertices[idx].x) || isnan(particleVertices[idx].y) || isnan(particleVertices[idx].z) || // isinf(particleVertices[idx].x) || isinf(particleVertices[idx].y) || isinf(particleVertices[idx].z)) { // particleVertices[idx] = glm::vec3(0.0f); // continue; //} glm::vec3 pos = getLatticePosition(particleVertices[idx]); if (isnan(pos.x) || isnan(pos.y) || isnan(pos.z) || isinf(pos.x) || isinf(pos.y) || isinf(pos.z)) { particleVertices[idx] = glm::vec3(0.0f); continue; } if (pos.x < 0.0f || pos.x > d_latticeWidth - 1 || pos.y < 0.0f || pos.y > d_latticeHeight - 1 || pos.z < 0.0f || pos.z > d_latticeDepth - 1) { if (respawnMode <= 1) { if (pos.x < 0.0f || pos.x > d_latticeWidth - 1) { pos.x = fmodf(pos.x + d_latticeWidth - 1, d_latticeWidth - 1); } if (respawnMode == 0) { if (pos.y < 0.0f || pos.y > d_latticeHeight - 1) { pos.y = fmodf(pos.y + d_latticeHeight - 1, d_latticeHeight - 1); } } else { if (pos.y < 0.0f) { pos.y = 0.0f; } if (pos.y > d_latticeHeight - 1) { // respawn pos.x = 0.0f; pos.y = rand(idx, pos.y) * (d_latticeHeight - 1); pos.z = rand(idx, pos.z) * (d_latticeDepth - 1); } } if (pos.z < 0.0f || pos.z > d_latticeDepth - 1) { pos.z = fmodf(pos.z + d_latticeDepth - 1, d_latticeDepth - 1); } } else { //pos.x = 0.0f; pos.x = fmodf(pos.x + d_latticeWidth - 1, d_latticeWidth - 1); pos.y = fmodf(pos.y + d_latticeHeight - 1, d_latticeHeight - 1); pos.z = rand(idx, pos.z) * (d_latticeDepth - 1); } } int leftX = (int)pos.x; if (leftX < 0) { leftX = d_latticeWidth - 1; } int rightX = leftX + 1; if (rightX > d_latticeWidth - 1) { rightX = 0; } int bottomY = (int)pos.y; if (bottomY < 0) { bottomY = d_latticeHeight - 1; } int topY = bottomY + 1; if (topY > d_latticeHeight - 1) { topY = 0; } int frontZ = (int)pos.z; if (frontZ < 0) { frontZ = d_latticeDepth - 1; } int backZ = frontZ + 1; if (backZ > d_latticeDepth - 1) { backZ = 0; } adjVelocities[0] = velocities[getIdxKer(leftX, topY, frontZ)]; // 0: V010 adjVelocities[1] = velocities[getIdxKer(rightX, topY, frontZ)]; // 1: V110 adjVelocities[2] = velocities[getIdxKer(leftX, bottomY, frontZ)]; // 2: V000 adjVelocities[3] = velocities[getIdxKer(rightX, bottomY, frontZ)]; // 3: V100 adjVelocities[4] = velocities[getIdxKer(leftX, topY, backZ)]; // 4: V011 adjVelocities[5] = velocities[getIdxKer(rightX, topY, backZ)]; // 5: V111 adjVelocities[6] = velocities[getIdxKer(leftX, bottomY, backZ)]; // 6: V001 adjVelocities[7] = velocities[getIdxKer(rightX, bottomY, backZ)]; // 7: V101 float horizontalRatio = pos.x - (float)leftX; float verticalRatio = pos.y - (float)bottomY; float depthRatio = pos.z - (float)frontZ; glm::vec3 finalVelocity; if (useCorrectInterpolation) { finalVelocity = adjVelocities[2] * (1.0f - horizontalRatio) * (1.0f - verticalRatio) * (1.0f - depthRatio) + adjVelocities[3] * horizontalRatio * (1.0f - verticalRatio) * (1.0f - depthRatio) + adjVelocities[0] * (1.0f - horizontalRatio) * verticalRatio * (1.0f - depthRatio) + adjVelocities[6] * (1.0f - horizontalRatio) * (1.0f - verticalRatio) * depthRatio + adjVelocities[7] * horizontalRatio * (1.0f - verticalRatio) * depthRatio + adjVelocities[4] * (1.0f - horizontalRatio) * verticalRatio * depthRatio + adjVelocities[1] * horizontalRatio * verticalRatio * (1.0f - depthRatio) + adjVelocities[5] * horizontalRatio * verticalRatio * depthRatio; /* glm::vec3 topBackVelocity = adjVelocities[1] * horizontalRatio + adjVelocities[0] * (1.0f - horizontalRatio); glm::vec3 bottomBackVelocity = adjVelocities[3] * horizontalRatio + adjVelocities[2] * (1.0f - horizontalRatio); glm::vec3 backVelocity = topBackVelocity * verticalRatio + bottomBackVelocity * (1.0f - verticalRatio); glm::vec3 topFrontVelocity = adjVelocities[5] * horizontalRatio + adjVelocities[4] * (1.0f - horizontalRatio); glm::vec3 bottomFrontVelocity = adjVelocities[7] * horizontalRatio + adjVelocities[6] * (1.0f - horizontalRatio); glm::vec3 frontVelocity = topFrontVelocity * verticalRatio + bottomFrontVelocity * (1.0f - verticalRatio); finalVelocity = frontVelocity * depthRatio + backVelocity * (1.0f - depthRatio); */ } else { glm::vec3 topBackVelocity = adjVelocities[0] * horizontalRatio + adjVelocities[1] * (1.0f - horizontalRatio); glm::vec3 bottomBackVelocity = adjVelocities[2] * horizontalRatio + adjVelocities[3] * (1.0f - horizontalRatio); glm::vec3 backVelocity = bottomBackVelocity * verticalRatio + topBackVelocity * (1.0f - verticalRatio); glm::vec3 topFrontVelocity = adjVelocities[4] * horizontalRatio + adjVelocities[5] * (1.0f - horizontalRatio); glm::vec3 bottomFrontVelocity = adjVelocities[6] * horizontalRatio + adjVelocities[7] * (1.0f - horizontalRatio); glm::vec3 frontVelocity = bottomFrontVelocity * verticalRatio + topFrontVelocity * (1.0f - verticalRatio); finalVelocity = backVelocity * depthRatio + frontVelocity * (1.0f - depthRatio); } finalVelocity *= velocityMultiplier; pos += finalVelocity; particleVertices[idx] = getWorldPosition(pos); idx += blockDim.x * blockDim.y * gridDim.x; //if (isnan(particleVertices[idx].x) || isnan(particleVertices[idx].y) || isnan(particleVertices[idx].z) || // isinf(particleVertices[idx].x) || isinf(particleVertices[idx].y) || isinf(particleVertices[idx].z)) { // //printf("oh no!"); // particleVertices[idx] = glm::vec3(0.0f); // continue; //} } } __global__ void moveParticlesKernelInteropNew(glm::vec3 *particleVertices, glm::vec3 *velocities, /*int *numParticles*/ int numActiveParticles, glm::vec3 *particleColors, int respawnMode, int outOfBoundsMode, float velocityMultiplier = 1.0f, bool useCorrectInterpolation = true) { int idx = threadIdx.x + blockDim.x * threadIdx.y; // idx in block idx += blockDim.x * blockDim.y * blockIdx.x; glm::vec3 adjVelocities[8]; while (idx < numActiveParticles) { glm::vec3 pos = particleVertices[idx]; if (pos.x < 0.0f || pos.x > d_latticeWidth - 2 || pos.y < 0.0f || pos.y > d_latticeHeight - 2 || pos.z < 0.0f || pos.z > d_latticeDepth - 2) { if (respawnMode == 0) { if (pos.x < 0.0f || pos.x > d_latticeWidth - 2) { //pos.x = (float)((__float2int_rd(pos.x) + d_latticeWidth - 2) % (d_latticeWidth - 2)); pos.x = fmodf(pos.x + d_latticeWidth - 2, d_latticeWidth - 2); } if (pos.y < 0.0f) { pos.y = 0.0f; } if (pos.y > d_latticeHeight - 2) { // respawn pos.x = 0.0f; pos.y = rand(idx, pos.y) * (d_latticeHeight - 2); pos.z = rand(idx, pos.z) * (d_latticeDepth - 2); } if (pos.z < 0.0f || pos.z > d_latticeDepth - 2) { //pos.z = (float)((__float2int_rd(pos.z) + d_latticeDepth - 2) % (d_latticeDepth - 2)); pos.z = fmodf(pos.z + d_latticeDepth - 2, d_latticeDepth - 2); } } else { //pos.x = 0.0f; pos.x = fmodf(pos.x + d_latticeWidth - 2, d_latticeWidth - 2); //pos.y = (float)((__float2int_rd(pos.y) + d_latticeHeight - 2) % (d_latticeHeight - 2)); pos.y = fmodf(pos.y + d_latticeHeight - 2, d_latticeHeight - 2); pos.z = rand(idx, pos.z) * (d_latticeDepth - 2); } } int leftX = (int)pos.x; int rightX = leftX + 1; int bottomY = (int)pos.y; int topY = bottomY + 1; int frontZ = (int)pos.z; int backZ = frontZ + 1; adjVelocities[0] = velocities[getIdxKer(leftX, topY, frontZ)]; adjVelocities[1] = velocities[getIdxKer(rightX, topY, frontZ)]; adjVelocities[2] = velocities[getIdxKer(leftX, bottomY, frontZ)]; adjVelocities[3] = velocities[getIdxKer(rightX, bottomY, frontZ)]; adjVelocities[4] = velocities[getIdxKer(leftX, topY, backZ)]; adjVelocities[5] = velocities[getIdxKer(rightX, topY, backZ)]; adjVelocities[6] = velocities[getIdxKer(leftX, bottomY, backZ)]; adjVelocities[7] = velocities[getIdxKer(rightX, bottomY, backZ)]; float horizontalRatio = pos.x - leftX; float verticalRatio = pos.y - bottomY; float depthRatio = pos.z - frontZ; glm::vec3 finalVelocity; if (useCorrectInterpolation) { glm::vec3 topBackVelocity = adjVelocities[1] * horizontalRatio + adjVelocities[0] * (1.0f - horizontalRatio); glm::vec3 bottomBackVelocity = adjVelocities[3] * horizontalRatio + adjVelocities[2] * (1.0f - horizontalRatio); glm::vec3 backVelocity = topBackVelocity * verticalRatio + bottomBackVelocity * (1.0f - verticalRatio); glm::vec3 topFrontVelocity = adjVelocities[5] * horizontalRatio + adjVelocities[4] * (1.0f - horizontalRatio); glm::vec3 bottomFrontVelocity = adjVelocities[7] * horizontalRatio + adjVelocities[6] * (1.0f - horizontalRatio); glm::vec3 frontVelocity = topFrontVelocity * verticalRatio + bottomFrontVelocity * (1.0f - verticalRatio); finalVelocity = frontVelocity * depthRatio + backVelocity * (1.0f - depthRatio); } else { glm::vec3 topBackVelocity = adjVelocities[0] * horizontalRatio + adjVelocities[1] * (1.0f - horizontalRatio); glm::vec3 bottomBackVelocity = adjVelocities[2] * horizontalRatio + adjVelocities[3] * (1.0f - horizontalRatio); glm::vec3 backVelocity = bottomBackVelocity * verticalRatio + topBackVelocity * (1.0f - verticalRatio); glm::vec3 topFrontVelocity = adjVelocities[4] * horizontalRatio + adjVelocities[5] * (1.0f - horizontalRatio); glm::vec3 bottomFrontVelocity = adjVelocities[6] * horizontalRatio + adjVelocities[7] * (1.0f - horizontalRatio); glm::vec3 frontVelocity = bottomFrontVelocity * verticalRatio + topFrontVelocity * (1.0f - verticalRatio); finalVelocity = backVelocity * depthRatio + frontVelocity * (1.0f - depthRatio); } finalVelocity *= velocityMultiplier; pos += finalVelocity; particleVertices[idx] = pos; idx += blockDim.x * blockDim.y * gridDim.x; } } //! Kernel for moving particles that uses OpenGL interoperability. /*! Kernel for moving particles that uses OpenGL interoperability for setting particle positions and colors. If the particles venture beyond the simulation bounding volume, they are randomly respawned. If we use side mirroring (cycling), particles that go beyond side walls (on the z axis) will be mirrored/cycled to the other side of the bounding volume. \param[in] particleVertices Vertices (positions stored in VBO) of particles to be updated/moved. \param[in] velocities Array of velocities that will act on the particles. \param[in] numParticles Number of particles. \param[in] particleColors VBO of particle colors. */ __global__ void moveParticlesKernelInterop(glm::vec3 *particleVertices, glm::vec3 *velocities, /*int *numParticles*/ int numActiveParticles, glm::vec3 *particleColors, int respawnMode, int outOfBoundsMode) { int idx = threadIdx.x + blockDim.x * threadIdx.y; // idx in block idx += blockDim.x * blockDim.y * blockIdx.x; glm::vec3 adjVelocities[8]; while (idx < numActiveParticles) { // SOLVES CRASHES WITH STLP if (particleVertices[idx].x < 0.0f || particleVertices[idx].x > d_latticeWidth - 1 || particleVertices[idx].y < 0.0f || particleVertices[idx].y > d_latticeHeight - 1 || particleVertices[idx].z < 0.0f || particleVertices[idx].z > d_latticeDepth - 1) { if (outOfBoundsMode == 0) { idx += blockDim.x * blockDim.y * gridDim.x; continue; // beware - while cycle goes through multiple particles! } particleVertices[idx].x = 0.0f; //particleVertices[idx].y = y; particleVertices[idx].y = rand(idx, particleVertices[idx].y) * (d_latticeHeight - 1); //particleVertices[idx].z = z; particleVertices[idx].z = rand(idx, particleVertices[idx].z) * (d_latticeDepth - 1); //particleVertices[idx].y = d_respawnY; //particleVertices[idx].z = d_respawnZ++; } float x = particleVertices[idx].x; float y = particleVertices[idx].y; float z = particleVertices[idx].z; int leftX = (int)x; int rightX = leftX + 1; int bottomY = (int)y; int topY = bottomY + 1; int frontZ = (int)z; int backZ = frontZ + 1; adjVelocities[0] = velocities[getIdxKer(leftX, topY, frontZ)]; adjVelocities[1] = velocities[getIdxKer(rightX, topY, frontZ)]; adjVelocities[2] = velocities[getIdxKer(leftX, bottomY, frontZ)]; adjVelocities[3] = velocities[getIdxKer(rightX, bottomY, frontZ)]; adjVelocities[4] = velocities[getIdxKer(leftX, topY, backZ)]; adjVelocities[5] = velocities[getIdxKer(rightX, topY, backZ)]; adjVelocities[6] = velocities[getIdxKer(leftX, bottomY, backZ)]; adjVelocities[7] = velocities[getIdxKer(rightX, bottomY, backZ)]; float horizontalRatio = x - leftX; float verticalRatio = y - bottomY; float depthRatio = z - frontZ; glm::vec3 topBackVelocity = adjVelocities[0] * horizontalRatio + adjVelocities[1] * (1.0f - horizontalRatio); glm::vec3 bottomBackVelocity = adjVelocities[2] * horizontalRatio + adjVelocities[3] * (1.0f - horizontalRatio); glm::vec3 backVelocity = bottomBackVelocity * verticalRatio + topBackVelocity * (1.0f - verticalRatio); glm::vec3 topFrontVelocity = adjVelocities[4] * horizontalRatio + adjVelocities[5] * (1.0f - horizontalRatio); glm::vec3 bottomFrontVelocity = adjVelocities[6] * horizontalRatio + adjVelocities[7] * (1.0f - horizontalRatio); glm::vec3 frontVelocity = bottomFrontVelocity * verticalRatio + topFrontVelocity * (1.0f - verticalRatio); glm::vec3 finalVelocity = backVelocity * depthRatio + frontVelocity * (1.0f - depthRatio); particleVertices[idx] += finalVelocity; if (particleVertices[idx].x < 0.0f || particleVertices[idx].x > d_latticeWidth - 1 || particleVertices[idx].y < 0.0f || particleVertices[idx].y > d_latticeHeight - 1 || particleVertices[idx].z < 0.0f || particleVertices[idx].z > d_latticeDepth - 1) { particleVertices[idx].x = 0.0f; //particleVertices[idx].y = y; //particleVertices[idx].y = rand(idx, y) * (d_latticeHeight - 1); //particleVertices[idx].z = z; if (respawnMode == 1) { particleVertices[idx].z = rand(idx, z) * (d_latticeDepth - 1); // comment this out if you want to respawn at same z } //particleVertices[idx].y = d_respawnY; //particleVertices[idx].z = d_respawnZ++; } idx += blockDim.x * blockDim.y * gridDim.x; } } //! Kernel for clearing the back lattice. /*! Kernel that clears the back lattice. \param[in] backLattice Pointer to the back lattice to be cleared. */ __global__ void clearBackLatticeKernel(Node3D *backLattice) { int idx = threadIdx.x + blockDim.x * threadIdx.y; // idx in block idx += blockDim.x * blockDim.y * blockIdx.x; //if (idx == 0) { // printf("d_latticeSize = %d\n", d_latticeSize); //} if (idx < d_latticeSize) { for (int i = 0; i < 19; i++) { backLattice[idx].adj[i] = 0.0f; } } } //! Kernel for updating the inlets. /*! Kernel for updating the inlets. Acts the same way as collision step but with predetermined velocity and density. The inlet is the left wall of the simulation bounding volume. \param[in] backLattice The back lattice where we update node values. \param[in] velocities Velocities array for the lattice. \param[in] inletVelocity Our desired inlet velocity. */ __global__ void updateInletsKernel(Node3D *backLattice, glm::vec3 *velocities, glm::vec3 inletVelocity, glm::vec3 *inletVelocities = nullptr, int xLeftInlet = 1, int xRightInlet = 0, int yBottomInlet = 0, int yTopInlet = 0, int zLeftInlet = 0, int zRightInlet = 0) { int idx = threadIdx.x + blockDim.x * threadIdx.y; // idx in block idx += blockDim.x * blockDim.y * blockIdx.x; if (idx < d_latticeSize) { int x = idx % d_latticeWidth; int y = (idx / d_latticeWidth) % d_latticeHeight; int z = idx / (d_latticeHeight * d_latticeWidth); bool shouldBeSet = false; if (xLeftInlet && x == 0) { shouldBeSet = true; } if (xRightInlet && x == d_latticeWidth - 1) { shouldBeSet = true; } if (yBottomInlet && y == 0) { shouldBeSet = true; } if (yTopInlet && y == d_latticeHeight - 1) { shouldBeSet = true; } if (zLeftInlet && z == 0) { shouldBeSet = true; } if (zRightInlet && z == d_latticeDepth - 1) { shouldBeSet = true; } if (shouldBeSet) { //#define USE_SOUNDING_VELOCITIES #ifdef USE_SOUNDING_VELOCITIES inletVelocity = inletVelocities[y]; #endif float macroDensity = 1.0f; //glm::vec3 macroVelocity = inletVelocity; float leftTermMiddle = WEIGHT_MIDDLE * macroDensity; float leftTermAxis = WEIGHT_AXIS * macroDensity; float leftTermNonaxial = WEIGHT_NON_AXIAL * macroDensity; float macroVelocityDot = glm::dot(inletVelocity, inletVelocity); float thirdTerm = 1.5f * macroVelocityDot; float middleEq = leftTermMiddle + leftTermMiddle * (-thirdTerm); float dotProd = glm::dot(dirVectorsConst[DIR_RIGHT_FACE], inletVelocity); float firstTerm = 3.0f * dotProd; float secondTerm = 4.5f * dotProd * dotProd; float rightEq = leftTermAxis + leftTermAxis * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(dirVectorsConst[DIR_LEFT_FACE], inletVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float leftEq = leftTermAxis + leftTermAxis * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(dirVectorsConst[DIR_FRONT_FACE], inletVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float frontEq = leftTermAxis + leftTermAxis * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(dirVectorsConst[DIR_BACK_FACE], inletVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float backEq = leftTermAxis + leftTermAxis * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(dirVectorsConst[DIR_TOP_FACE], inletVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float topEq = leftTermAxis + leftTermAxis * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(dirVectorsConst[DIR_BOTTOM_FACE], inletVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float bottomEq = leftTermAxis + leftTermAxis * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(dirVectorsConst[DIR_BACK_RIGHT_EDGE], inletVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float backRightEq = leftTermNonaxial + leftTermNonaxial * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(dirVectorsConst[DIR_BACK_LEFT_EDGE], inletVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float backLeftEq = leftTermNonaxial + leftTermNonaxial * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(dirVectorsConst[DIR_FRONT_RIGHT_EDGE], inletVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float frontRightEq = leftTermNonaxial + leftTermNonaxial * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(dirVectorsConst[DIR_FRONT_LEFT_EDGE], inletVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float frontLeftEq = leftTermNonaxial + leftTermNonaxial * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(dirVectorsConst[DIR_TOP_BACK_EDGE], inletVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float topBackEq = leftTermNonaxial + leftTermNonaxial * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(dirVectorsConst[DIR_TOP_FRONT_EDGE], inletVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float topFrontEq = leftTermNonaxial + leftTermNonaxial * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(dirVectorsConst[DIR_BOTTOM_BACK_EDGE], inletVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float bottomBackEq = leftTermNonaxial + leftTermNonaxial * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(dirVectorsConst[DIR_BOTTOM_FRONT_EDGE], inletVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float bottomFrontEq = leftTermNonaxial + leftTermNonaxial * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(dirVectorsConst[DIR_TOP_RIGHT_EDGE], inletVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float topRightEq = leftTermNonaxial + leftTermNonaxial * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(dirVectorsConst[DIR_TOP_LEFT_EDGE], inletVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float topLeftEq = leftTermNonaxial + leftTermNonaxial * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(dirVectorsConst[DIR_BOTTOM_RIGHT_EDGE], inletVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float bottomRightEq = leftTermNonaxial + leftTermNonaxial * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(dirVectorsConst[DIR_BOTTOM_LEFT_EDGE], inletVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float bottomLeftEq = leftTermNonaxial + leftTermNonaxial * (firstTerm + secondTerm - thirdTerm); backLattice[idx].adj[DIR_MIDDLE_VERTEX] = middleEq; backLattice[idx].adj[DIR_RIGHT_FACE] = rightEq; backLattice[idx].adj[DIR_LEFT_FACE] = leftEq; backLattice[idx].adj[DIR_BACK_FACE] = backEq; backLattice[idx].adj[DIR_FRONT_FACE] = frontEq; backLattice[idx].adj[DIR_TOP_FACE] = topEq; backLattice[idx].adj[DIR_BOTTOM_FACE] = bottomEq; backLattice[idx].adj[DIR_BACK_RIGHT_EDGE] = backRightEq; backLattice[idx].adj[DIR_BACK_LEFT_EDGE] = backLeftEq; backLattice[idx].adj[DIR_FRONT_RIGHT_EDGE] = frontRightEq; backLattice[idx].adj[DIR_FRONT_LEFT_EDGE] = frontLeftEq; backLattice[idx].adj[DIR_TOP_BACK_EDGE] = topBackEq; backLattice[idx].adj[DIR_TOP_FRONT_EDGE] = topFrontEq; backLattice[idx].adj[DIR_BOTTOM_BACK_EDGE] = bottomBackEq; backLattice[idx].adj[DIR_BOTTOM_FRONT_EDGE] = bottomFrontEq; backLattice[idx].adj[DIR_TOP_RIGHT_EDGE] = topRightEq; backLattice[idx].adj[DIR_TOP_LEFT_EDGE] = topLeftEq; backLattice[idx].adj[DIR_BOTTOM_RIGHT_EDGE] = bottomRightEq; backLattice[idx].adj[DIR_BOTTOM_LEFT_EDGE] = bottomLeftEq; /*for (int i = 0; i < 19; i++) { if (backLattice[idx].adj[i] < 0.0f) { backLattice[idx].adj[i] = 0.0f; } else if (backLattice[idx].adj[i] > 1.0f) { backLattice[idx].adj[i] = 1.0f; } }*/ } } } //! Kernel for calculating the collision operator. /*! Kernel that calculates the collision operator using Bhatnagar-Gross-Krook operator. \param[in] backLattice Back lattice in which we do our calculations. \param[in] velocities Velocities array for the lattice. */ __global__ void collisionStepKernel(Node3D *backLattice, glm::vec3 *velocities, int useSubgridModel = 0) { int idx = threadIdx.x + blockDim.x * threadIdx.y; // idx in block idx += blockDim.x * blockDim.y * blockIdx.x; if (idx < d_latticeSize) { float macroDensity = 0.0f; for (int i = 0; i < 19; i++) { macroDensity += backLattice[idx].adj[i]; } glm::vec3 macroVelocity = glm::vec3(0.0f, 0.0f, 0.0f); //macroVelocity += vMiddle * backLattice[idx].adj[DIR_MIDDLE]; macroVelocity += dirVectorsConst[DIR_LEFT_FACE] * backLattice[idx].adj[DIR_LEFT_FACE]; macroVelocity += dirVectorsConst[DIR_FRONT_FACE] * backLattice[idx].adj[DIR_FRONT_FACE]; macroVelocity += dirVectorsConst[DIR_BOTTOM_FACE] * backLattice[idx].adj[DIR_BOTTOM_FACE]; macroVelocity += dirVectorsConst[DIR_FRONT_LEFT_EDGE] * backLattice[idx].adj[DIR_FRONT_LEFT_EDGE]; macroVelocity += dirVectorsConst[DIR_BACK_LEFT_EDGE] * backLattice[idx].adj[DIR_BACK_LEFT_EDGE]; macroVelocity += dirVectorsConst[DIR_BOTTOM_LEFT_EDGE] * backLattice[idx].adj[DIR_BOTTOM_LEFT_EDGE]; macroVelocity += dirVectorsConst[DIR_TOP_LEFT_EDGE] * backLattice[idx].adj[DIR_TOP_LEFT_EDGE]; macroVelocity += dirVectorsConst[DIR_BOTTOM_FRONT_EDGE] * backLattice[idx].adj[DIR_BOTTOM_FRONT_EDGE]; macroVelocity += dirVectorsConst[DIR_TOP_FRONT_EDGE] * backLattice[idx].adj[DIR_TOP_FRONT_EDGE]; macroVelocity += dirVectorsConst[DIR_RIGHT_FACE] * backLattice[idx].adj[DIR_RIGHT_FACE]; macroVelocity += dirVectorsConst[DIR_BACK_FACE] * backLattice[idx].adj[DIR_BACK_FACE]; macroVelocity += dirVectorsConst[DIR_TOP_FACE] * backLattice[idx].adj[DIR_TOP_FACE]; macroVelocity += dirVectorsConst[DIR_BACK_RIGHT_EDGE] * backLattice[idx].adj[DIR_BACK_RIGHT_EDGE]; macroVelocity += dirVectorsConst[DIR_FRONT_RIGHT_EDGE] * backLattice[idx].adj[DIR_FRONT_RIGHT_EDGE]; macroVelocity += dirVectorsConst[DIR_TOP_RIGHT_EDGE] * backLattice[idx].adj[DIR_TOP_RIGHT_EDGE]; macroVelocity += dirVectorsConst[DIR_BOTTOM_RIGHT_EDGE] * backLattice[idx].adj[DIR_BOTTOM_RIGHT_EDGE]; macroVelocity += dirVectorsConst[DIR_TOP_BACK_EDGE] * backLattice[idx].adj[DIR_TOP_BACK_EDGE]; macroVelocity += dirVectorsConst[DIR_BOTTOM_BACK_EDGE] * backLattice[idx].adj[DIR_BOTTOM_BACK_EDGE]; macroVelocity /= macroDensity; velocities[idx] = macroVelocity; float leftTermMiddle = WEIGHT_MIDDLE * macroDensity; float leftTermAxis = WEIGHT_AXIS * macroDensity; float leftTermNonaxial = WEIGHT_NON_AXIAL * macroDensity; float macroVelocityDot = glm::dot(macroVelocity, macroVelocity); float thirdTerm = 1.5f * macroVelocityDot; float middleEq = leftTermMiddle + leftTermMiddle * (-thirdTerm); float dotProd = glm::dot(dirVectorsConst[DIR_RIGHT_FACE], macroVelocity); float firstTerm = 3.0f * dotProd; float secondTerm = 4.5f * dotProd * dotProd; float rightEq = leftTermAxis + leftTermAxis * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(dirVectorsConst[DIR_LEFT_FACE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float leftEq = leftTermAxis + leftTermAxis * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(dirVectorsConst[DIR_FRONT_FACE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float frontEq = leftTermAxis + leftTermAxis * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(dirVectorsConst[DIR_BACK_FACE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float backEq = leftTermAxis + leftTermAxis * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(dirVectorsConst[DIR_TOP_FACE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float topEq = leftTermAxis + leftTermAxis * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(dirVectorsConst[DIR_BOTTOM_FACE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float bottomEq = leftTermAxis + leftTermAxis * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(dirVectorsConst[DIR_BACK_RIGHT_EDGE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float backRightEq = leftTermNonaxial + leftTermNonaxial * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(dirVectorsConst[DIR_BACK_LEFT_EDGE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float backLeftEq = leftTermNonaxial + leftTermNonaxial * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(dirVectorsConst[DIR_FRONT_RIGHT_EDGE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float frontRightEq = leftTermNonaxial + leftTermNonaxial * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(dirVectorsConst[DIR_FRONT_LEFT_EDGE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float frontLeftEq = leftTermNonaxial + leftTermNonaxial * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(dirVectorsConst[DIR_TOP_BACK_EDGE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float topBackEq = leftTermNonaxial + leftTermNonaxial * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(dirVectorsConst[DIR_TOP_FRONT_EDGE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float topFrontEq = leftTermNonaxial + leftTermNonaxial * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(dirVectorsConst[DIR_BOTTOM_BACK_EDGE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float bottomBackEq = leftTermNonaxial + leftTermNonaxial * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(dirVectorsConst[DIR_BOTTOM_FRONT_EDGE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float bottomFrontEq = leftTermNonaxial + leftTermNonaxial * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(dirVectorsConst[DIR_TOP_RIGHT_EDGE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float topRightEq = leftTermNonaxial + leftTermNonaxial * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(dirVectorsConst[DIR_TOP_LEFT_EDGE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float topLeftEq = leftTermNonaxial + leftTermNonaxial * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(dirVectorsConst[DIR_BOTTOM_RIGHT_EDGE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float bottomRightEq = leftTermNonaxial + leftTermNonaxial * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(dirVectorsConst[DIR_BOTTOM_LEFT_EDGE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float bottomLeftEq = leftTermNonaxial + leftTermNonaxial * (firstTerm + secondTerm - thirdTerm); if (useSubgridModel) { float f[19]; f[0] = (backLattice[idx].adj[DIR_MIDDLE_VERTEX] - middleEq); f[1] = (backLattice[idx].adj[DIR_RIGHT_FACE] - rightEq); f[2] = (backLattice[idx].adj[DIR_LEFT_FACE] - leftEq); f[3] = (backLattice[idx].adj[DIR_BACK_FACE] - backEq); f[4] = (backLattice[idx].adj[DIR_FRONT_FACE] - frontEq); f[5] = (backLattice[idx].adj[DIR_TOP_FACE] - topEq); f[6] = (backLattice[idx].adj[DIR_BOTTOM_FACE] - bottomEq); f[7] = (backLattice[idx].adj[DIR_BACK_RIGHT_EDGE] - backRightEq); f[8] = (backLattice[idx].adj[DIR_BACK_LEFT_EDGE] - backLeftEq); f[9] = (backLattice[idx].adj[DIR_FRONT_RIGHT_EDGE] - frontRightEq); f[10] = (backLattice[idx].adj[DIR_FRONT_LEFT_EDGE] - frontLeftEq); f[11] = (backLattice[idx].adj[DIR_TOP_BACK_EDGE] - topBackEq); f[12] = (backLattice[idx].adj[DIR_TOP_FRONT_EDGE] - topFrontEq); f[13] = (backLattice[idx].adj[DIR_BOTTOM_BACK_EDGE] - bottomBackEq); f[14] = (backLattice[idx].adj[DIR_BOTTOM_FRONT_EDGE] - bottomFrontEq); f[15] = (backLattice[idx].adj[DIR_TOP_RIGHT_EDGE] - topRightEq); f[16] = (backLattice[idx].adj[DIR_TOP_LEFT_EDGE] - topLeftEq); f[17] = (backLattice[idx].adj[DIR_BOTTOM_RIGHT_EDGE] - bottomRightEq); f[18] = (backLattice[idx].adj[DIR_BOTTOM_LEFT_EDGE] - bottomLeftEq); float tmp = -1.0f / (2.0f * d_tau * macroDensity); float sxx = f[3] + f[4] + f[7] + f[8] + f[9] + f[10] + f[15] + f[16] + f[17] + f[18]; float sxz = f[9] - f[8] - f[10] + f[7]; float sxy = f[15] + f[16] + f[17] + f[18]; float szz = f[1] + f[2] + f[7] + f[8] + f[9] + f[10] + f[11] + f[12] + f[13] + f[14]; float szy = f[12] + f[13] - f[14] - f[11]; float syy = f[5] + f[6] + f[11] + f[12] + f[13] + f[14] + f[15] + f[16] + f[17] + f[18]; sxx *= tmp; sxz *= tmp; sxy *= tmp; szz *= tmp; szy *= tmp; syy *= tmp; float magS = sqrtf(2.0f * (sxx * sxx + syy * syy + szz * szz + 2.0f * sqrtf(sxy * sxy + sxz * sxz + szy * szy))); float nu = (2.0f * d_tau - 1.0f) / 6.0f; float itau_new = 1.0f / (3.0f * (nu + SMAG_C * SMAG_C * magS) + 0.5f); backLattice[idx].adj[DIR_MIDDLE_VERTEX] -= itau_new * (backLattice[idx].adj[DIR_MIDDLE_VERTEX] - middleEq); backLattice[idx].adj[DIR_RIGHT_FACE] -= itau_new * (backLattice[idx].adj[DIR_RIGHT_FACE] - rightEq); backLattice[idx].adj[DIR_LEFT_FACE] -= itau_new * (backLattice[idx].adj[DIR_LEFT_FACE] - leftEq); backLattice[idx].adj[DIR_BACK_FACE] -= itau_new * (backLattice[idx].adj[DIR_BACK_FACE] - backEq); backLattice[idx].adj[DIR_FRONT_FACE] -= itau_new * (backLattice[idx].adj[DIR_FRONT_FACE] - frontEq); backLattice[idx].adj[DIR_TOP_FACE] -= itau_new * (backLattice[idx].adj[DIR_TOP_FACE] - topEq); backLattice[idx].adj[DIR_BOTTOM_FACE] -= itau_new * (backLattice[idx].adj[DIR_BOTTOM_FACE] - bottomEq); backLattice[idx].adj[DIR_BACK_RIGHT_EDGE] -= itau_new * (backLattice[idx].adj[DIR_BACK_RIGHT_EDGE] - backRightEq); backLattice[idx].adj[DIR_BACK_LEFT_EDGE] -= itau_new * (backLattice[idx].adj[DIR_BACK_LEFT_EDGE] - backLeftEq); backLattice[idx].adj[DIR_FRONT_RIGHT_EDGE] -= itau_new * (backLattice[idx].adj[DIR_FRONT_RIGHT_EDGE] - frontRightEq); backLattice[idx].adj[DIR_FRONT_LEFT_EDGE] -= itau_new * (backLattice[idx].adj[DIR_FRONT_LEFT_EDGE] - frontLeftEq); backLattice[idx].adj[DIR_TOP_BACK_EDGE] -= itau_new * (backLattice[idx].adj[DIR_TOP_BACK_EDGE] - topBackEq); backLattice[idx].adj[DIR_TOP_FRONT_EDGE] -= itau_new * (backLattice[idx].adj[DIR_TOP_FRONT_EDGE] - topFrontEq); backLattice[idx].adj[DIR_BOTTOM_BACK_EDGE] -= itau_new * (backLattice[idx].adj[DIR_BOTTOM_BACK_EDGE] - bottomBackEq); backLattice[idx].adj[DIR_BOTTOM_FRONT_EDGE] -= itau_new * (backLattice[idx].adj[DIR_BOTTOM_FRONT_EDGE] - bottomFrontEq); backLattice[idx].adj[DIR_TOP_RIGHT_EDGE] -= itau_new * (backLattice[idx].adj[DIR_TOP_RIGHT_EDGE] - topRightEq); backLattice[idx].adj[DIR_TOP_LEFT_EDGE] -= itau_new * (backLattice[idx].adj[DIR_TOP_LEFT_EDGE] - topLeftEq); backLattice[idx].adj[DIR_BOTTOM_RIGHT_EDGE] -= itau_new * (backLattice[idx].adj[DIR_BOTTOM_RIGHT_EDGE] - bottomRightEq); backLattice[idx].adj[DIR_BOTTOM_LEFT_EDGE] -= itau_new * (backLattice[idx].adj[DIR_BOTTOM_LEFT_EDGE] - bottomLeftEq); } else { backLattice[idx].adj[DIR_MIDDLE_VERTEX] -= d_itau * (backLattice[idx].adj[DIR_MIDDLE_VERTEX] - middleEq); backLattice[idx].adj[DIR_RIGHT_FACE] -= d_itau * (backLattice[idx].adj[DIR_RIGHT_FACE] - rightEq); backLattice[idx].adj[DIR_LEFT_FACE] -= d_itau * (backLattice[idx].adj[DIR_LEFT_FACE] - leftEq); backLattice[idx].adj[DIR_BACK_FACE] -= d_itau * (backLattice[idx].adj[DIR_BACK_FACE] - backEq); backLattice[idx].adj[DIR_FRONT_FACE] -= d_itau * (backLattice[idx].adj[DIR_FRONT_FACE] - frontEq); backLattice[idx].adj[DIR_TOP_FACE] -= d_itau * (backLattice[idx].adj[DIR_TOP_FACE] - topEq); backLattice[idx].adj[DIR_BOTTOM_FACE] -= d_itau * (backLattice[idx].adj[DIR_BOTTOM_FACE] - bottomEq); backLattice[idx].adj[DIR_BACK_RIGHT_EDGE] -= d_itau * (backLattice[idx].adj[DIR_BACK_RIGHT_EDGE] - backRightEq); backLattice[idx].adj[DIR_BACK_LEFT_EDGE] -= d_itau * (backLattice[idx].adj[DIR_BACK_LEFT_EDGE] - backLeftEq); backLattice[idx].adj[DIR_FRONT_RIGHT_EDGE] -= d_itau * (backLattice[idx].adj[DIR_FRONT_RIGHT_EDGE] - frontRightEq); backLattice[idx].adj[DIR_FRONT_LEFT_EDGE] -= d_itau * (backLattice[idx].adj[DIR_FRONT_LEFT_EDGE] - frontLeftEq); backLattice[idx].adj[DIR_TOP_BACK_EDGE] -= d_itau * (backLattice[idx].adj[DIR_TOP_BACK_EDGE] - topBackEq); backLattice[idx].adj[DIR_TOP_FRONT_EDGE] -= d_itau * (backLattice[idx].adj[DIR_TOP_FRONT_EDGE] - topFrontEq); backLattice[idx].adj[DIR_BOTTOM_BACK_EDGE] -= d_itau * (backLattice[idx].adj[DIR_BOTTOM_BACK_EDGE] - bottomBackEq); backLattice[idx].adj[DIR_BOTTOM_FRONT_EDGE] -= d_itau * (backLattice[idx].adj[DIR_BOTTOM_FRONT_EDGE] - bottomFrontEq); backLattice[idx].adj[DIR_TOP_RIGHT_EDGE] -= d_itau * (backLattice[idx].adj[DIR_TOP_RIGHT_EDGE] - topRightEq); backLattice[idx].adj[DIR_TOP_LEFT_EDGE] -= d_itau * (backLattice[idx].adj[DIR_TOP_LEFT_EDGE] - topLeftEq); backLattice[idx].adj[DIR_BOTTOM_RIGHT_EDGE] -= d_itau * (backLattice[idx].adj[DIR_BOTTOM_RIGHT_EDGE] - bottomRightEq); backLattice[idx].adj[DIR_BOTTOM_LEFT_EDGE] -= d_itau * (backLattice[idx].adj[DIR_BOTTOM_LEFT_EDGE] - bottomLeftEq); } /*for (int i = 0; i < 19; i++) { if (backLattice[idx].adj[i] < 0.0f) { backLattice[idx].adj[i] = 0.0f; } else if (backLattice[idx].adj[i] > 1.0f) { backLattice[idx].adj[i] = 1.0f; } }*/ } } //! Kernel for calculating the collision operator that uses the shared memory (in naive manner). /*! Kernel that calculates the collision operator using Bhatnagar-Gross-Krook operator. \param[in] backLattice Back lattice in which we do our calculations. \param[in] velocities Velocities array for the lattice. */ __global__ void collisionStepKernelSharedNewReorganized(Node3D *backLattice, glm::vec3 *velocities, int useSubgridModel = 0) { int idx = threadIdx.x + blockDim.x * threadIdx.y; // idx in block idx += blockDim.x * blockDim.y * blockIdx.x; extern __shared__ Node3D cache[]; int cacheIdx = threadIdx.x + blockDim.x * threadIdx.y; if (idx < d_latticeSize) { cache[cacheIdx] = backLattice[idx]; //__syncthreads(); // not needed float macroDensity = 0.0f; for (int i = 0; i < 19; i++) { macroDensity += cache[cacheIdx].adj[i]; } glm::vec3 macroVelocity = glm::vec3(0.0f, 0.0f, 0.0f); //macroVelocity += vMiddle * backLattice[idx].adj[DIR_MIDDLE]; macroVelocity += dirVectorsConst[DIR_LEFT_FACE] * cache[cacheIdx].adj[DIR_LEFT_FACE]; macroVelocity += dirVectorsConst[DIR_FRONT_FACE] * cache[cacheIdx].adj[DIR_FRONT_FACE]; macroVelocity += dirVectorsConst[DIR_BOTTOM_FACE] * cache[cacheIdx].adj[DIR_BOTTOM_FACE]; macroVelocity += dirVectorsConst[DIR_FRONT_LEFT_EDGE] * cache[cacheIdx].adj[DIR_FRONT_LEFT_EDGE]; macroVelocity += dirVectorsConst[DIR_BACK_LEFT_EDGE] * cache[cacheIdx].adj[DIR_BACK_LEFT_EDGE]; macroVelocity += dirVectorsConst[DIR_BOTTOM_LEFT_EDGE] * cache[cacheIdx].adj[DIR_BOTTOM_LEFT_EDGE]; macroVelocity += dirVectorsConst[DIR_TOP_LEFT_EDGE] * cache[cacheIdx].adj[DIR_TOP_LEFT_EDGE]; macroVelocity += dirVectorsConst[DIR_BOTTOM_FRONT_EDGE] * cache[cacheIdx].adj[DIR_BOTTOM_FRONT_EDGE]; macroVelocity += dirVectorsConst[DIR_TOP_FRONT_EDGE] * cache[cacheIdx].adj[DIR_TOP_FRONT_EDGE]; macroVelocity += dirVectorsConst[DIR_RIGHT_FACE] * cache[cacheIdx].adj[DIR_RIGHT_FACE]; macroVelocity += dirVectorsConst[DIR_BACK_FACE] * cache[cacheIdx].adj[DIR_BACK_FACE]; macroVelocity += dirVectorsConst[DIR_TOP_FACE] * cache[cacheIdx].adj[DIR_TOP_FACE]; macroVelocity += dirVectorsConst[DIR_BACK_RIGHT_EDGE] * cache[cacheIdx].adj[DIR_BACK_RIGHT_EDGE]; macroVelocity += dirVectorsConst[DIR_FRONT_RIGHT_EDGE] * cache[cacheIdx].adj[DIR_FRONT_RIGHT_EDGE]; macroVelocity += dirVectorsConst[DIR_TOP_RIGHT_EDGE] * cache[cacheIdx].adj[DIR_TOP_RIGHT_EDGE]; macroVelocity += dirVectorsConst[DIR_BOTTOM_RIGHT_EDGE] * cache[cacheIdx].adj[DIR_BOTTOM_RIGHT_EDGE]; macroVelocity += dirVectorsConst[DIR_TOP_BACK_EDGE] * cache[cacheIdx].adj[DIR_TOP_BACK_EDGE]; macroVelocity += dirVectorsConst[DIR_BOTTOM_BACK_EDGE] * cache[cacheIdx].adj[DIR_BOTTOM_BACK_EDGE]; macroVelocity /= macroDensity; velocities[idx] = macroVelocity; float currLeftTerm = WEIGHT_MIDDLE * macroDensity; //float leftTermAxis = WEIGHT_AXIS * macroDensity; //float leftTermNonaxial = WEIGHT_NON_AXIAL * macroDensity; float macroVelocityDot = glm::dot(macroVelocity, macroVelocity); float thirdTerm = 1.5f * macroVelocityDot; float currEq = currLeftTerm + currLeftTerm * (-thirdTerm); cache[cacheIdx].adj[DIR_MIDDLE_VERTEX] -= d_itau * (cache[cacheIdx].adj[DIR_MIDDLE_VERTEX] - currEq); currLeftTerm = WEIGHT_AXIS * macroDensity; float dotProd = glm::dot(dirVectorsConst[DIR_RIGHT_FACE], macroVelocity); float firstTerm = 3.0f * dotProd; float secondTerm = 4.5f * dotProd * dotProd; currEq = currLeftTerm + currLeftTerm * (firstTerm + secondTerm - thirdTerm); cache[cacheIdx].adj[DIR_RIGHT_FACE] -= d_itau * (cache[cacheIdx].adj[DIR_RIGHT_FACE] - currEq); dotProd = glm::dot(dirVectorsConst[DIR_LEFT_FACE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; currEq = currLeftTerm + currLeftTerm * (firstTerm + secondTerm - thirdTerm); cache[cacheIdx].adj[DIR_LEFT_FACE] -= d_itau * (cache[cacheIdx].adj[DIR_LEFT_FACE] - currEq); dotProd = glm::dot(dirVectorsConst[DIR_FRONT_FACE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; currEq = currLeftTerm + currLeftTerm * (firstTerm + secondTerm - thirdTerm); cache[cacheIdx].adj[DIR_FRONT_FACE] -= d_itau * (cache[cacheIdx].adj[DIR_FRONT_FACE] - currEq); dotProd = glm::dot(dirVectorsConst[DIR_BACK_FACE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; currEq = currLeftTerm + currLeftTerm * (firstTerm + secondTerm - thirdTerm); cache[cacheIdx].adj[DIR_BACK_FACE] -= d_itau * (cache[cacheIdx].adj[DIR_BACK_FACE] - currEq); dotProd = glm::dot(dirVectorsConst[DIR_TOP_FACE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; currEq = currLeftTerm + currLeftTerm * (firstTerm + secondTerm - thirdTerm); cache[cacheIdx].adj[DIR_TOP_FACE] -= d_itau * (cache[cacheIdx].adj[DIR_TOP_FACE] - currEq); dotProd = glm::dot(dirVectorsConst[DIR_BOTTOM_FACE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; currEq = currLeftTerm + currLeftTerm * (firstTerm + secondTerm - thirdTerm); cache[cacheIdx].adj[DIR_BOTTOM_FACE] -= d_itau * (cache[cacheIdx].adj[DIR_BOTTOM_FACE] - currEq); currLeftTerm = WEIGHT_NON_AXIAL * macroDensity; dotProd = glm::dot(dirVectorsConst[DIR_BACK_RIGHT_EDGE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; currEq = currLeftTerm + currLeftTerm * (firstTerm + secondTerm - thirdTerm); cache[cacheIdx].adj[DIR_BACK_RIGHT_EDGE] -= d_itau * (cache[cacheIdx].adj[DIR_BACK_RIGHT_EDGE] - currEq); dotProd = glm::dot(dirVectorsConst[DIR_BACK_LEFT_EDGE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; currEq = currLeftTerm + currLeftTerm * (firstTerm + secondTerm - thirdTerm); cache[cacheIdx].adj[DIR_BACK_LEFT_EDGE] -= d_itau * (cache[cacheIdx].adj[DIR_BACK_LEFT_EDGE] - currEq); dotProd = glm::dot(dirVectorsConst[DIR_FRONT_RIGHT_EDGE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; currEq = currLeftTerm + currLeftTerm * (firstTerm + secondTerm - thirdTerm); cache[cacheIdx].adj[DIR_FRONT_RIGHT_EDGE] -= d_itau * (cache[cacheIdx].adj[DIR_FRONT_RIGHT_EDGE] - currEq); dotProd = glm::dot(dirVectorsConst[DIR_FRONT_LEFT_EDGE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; currEq = currLeftTerm + currLeftTerm * (firstTerm + secondTerm - thirdTerm); cache[cacheIdx].adj[DIR_FRONT_LEFT_EDGE] -= d_itau * (cache[cacheIdx].adj[DIR_FRONT_LEFT_EDGE] - currEq); dotProd = glm::dot(dirVectorsConst[DIR_TOP_BACK_EDGE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; currEq = currLeftTerm + currLeftTerm * (firstTerm + secondTerm - thirdTerm); cache[cacheIdx].adj[DIR_TOP_BACK_EDGE] -= d_itau * (cache[cacheIdx].adj[DIR_TOP_BACK_EDGE] - currEq); dotProd = glm::dot(dirVectorsConst[DIR_TOP_FRONT_EDGE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; currEq = currLeftTerm + currLeftTerm * (firstTerm + secondTerm - thirdTerm); cache[cacheIdx].adj[DIR_TOP_FRONT_EDGE] -= d_itau * (cache[cacheIdx].adj[DIR_TOP_FRONT_EDGE] - currEq); dotProd = glm::dot(dirVectorsConst[DIR_BOTTOM_BACK_EDGE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; currEq = currLeftTerm + currLeftTerm * (firstTerm + secondTerm - thirdTerm); cache[cacheIdx].adj[DIR_BOTTOM_BACK_EDGE] -= d_itau * (cache[cacheIdx].adj[DIR_BOTTOM_BACK_EDGE] - currEq); dotProd = glm::dot(dirVectorsConst[DIR_BOTTOM_FRONT_EDGE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; currEq = currLeftTerm + currLeftTerm * (firstTerm + secondTerm - thirdTerm); cache[cacheIdx].adj[DIR_BOTTOM_FRONT_EDGE] -= d_itau * (cache[cacheIdx].adj[DIR_BOTTOM_FRONT_EDGE] - currEq); dotProd = glm::dot(dirVectorsConst[DIR_TOP_RIGHT_EDGE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; currEq = currLeftTerm + currLeftTerm * (firstTerm + secondTerm - thirdTerm); cache[cacheIdx].adj[DIR_TOP_RIGHT_EDGE] -= d_itau * (cache[cacheIdx].adj[DIR_TOP_RIGHT_EDGE] - currEq); dotProd = glm::dot(dirVectorsConst[DIR_TOP_LEFT_EDGE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; currEq = currLeftTerm + currLeftTerm * (firstTerm + secondTerm - thirdTerm); cache[cacheIdx].adj[DIR_TOP_LEFT_EDGE] -= d_itau * (cache[cacheIdx].adj[DIR_TOP_LEFT_EDGE] - currEq); dotProd = glm::dot(dirVectorsConst[DIR_BOTTOM_RIGHT_EDGE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; currEq = currLeftTerm + currLeftTerm * (firstTerm + secondTerm - thirdTerm); cache[cacheIdx].adj[DIR_BOTTOM_RIGHT_EDGE] -= d_itau * (cache[cacheIdx].adj[DIR_BOTTOM_RIGHT_EDGE] - currEq); dotProd = glm::dot(dirVectorsConst[DIR_BOTTOM_LEFT_EDGE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; currEq = currLeftTerm + currLeftTerm * (firstTerm + secondTerm - thirdTerm); cache[cacheIdx].adj[DIR_BOTTOM_LEFT_EDGE] -= d_itau * (cache[cacheIdx].adj[DIR_BOTTOM_LEFT_EDGE] - currEq); backLattice[idx] = cache[cacheIdx]; } } //! Kernel for calculating the collision operator that uses the shared memory (in naive manner). /*! Kernel that calculates the collision operator using Bhatnagar-Gross-Krook operator. \param[in] backLattice Back lattice in which we do our calculations. \param[in] velocities Velocities array for the lattice. */ __global__ void collisionStepKernelSharedNewReorganizedExtended(Node3D *backLattice, glm::vec3 *velocities, int useSubgridModel = 0) { int idx = threadIdx.x + blockDim.x * threadIdx.y; // idx in block idx += blockDim.x * blockDim.y * blockIdx.x; extern __shared__ Node3D cache[]; int cacheIdx = threadIdx.x + blockDim.x * threadIdx.y; if (idx < d_latticeSize) { cache[cacheIdx] = backLattice[idx]; //__syncthreads(); // not needed float macroDensity = 0.0f; for (int i = 0; i < 19; i++) { macroDensity += cache[cacheIdx].adj[i]; } glm::vec3 macroVelocity = glm::vec3(0.0f, 0.0f, 0.0f); //macroVelocity += vMiddle * backLattice[idx].adj[DIR_MIDDLE]; macroVelocity += dirVectorsConst[DIR_LEFT_FACE] * cache[cacheIdx].adj[DIR_LEFT_FACE]; macroVelocity += dirVectorsConst[DIR_FRONT_FACE] * cache[cacheIdx].adj[DIR_FRONT_FACE]; macroVelocity += dirVectorsConst[DIR_BOTTOM_FACE] * cache[cacheIdx].adj[DIR_BOTTOM_FACE]; macroVelocity += dirVectorsConst[DIR_FRONT_LEFT_EDGE] * cache[cacheIdx].adj[DIR_FRONT_LEFT_EDGE]; macroVelocity += dirVectorsConst[DIR_BACK_LEFT_EDGE] * cache[cacheIdx].adj[DIR_BACK_LEFT_EDGE]; macroVelocity += dirVectorsConst[DIR_BOTTOM_LEFT_EDGE] * cache[cacheIdx].adj[DIR_BOTTOM_LEFT_EDGE]; macroVelocity += dirVectorsConst[DIR_TOP_LEFT_EDGE] * cache[cacheIdx].adj[DIR_TOP_LEFT_EDGE]; macroVelocity += dirVectorsConst[DIR_BOTTOM_FRONT_EDGE] * cache[cacheIdx].adj[DIR_BOTTOM_FRONT_EDGE]; macroVelocity += dirVectorsConst[DIR_TOP_FRONT_EDGE] * cache[cacheIdx].adj[DIR_TOP_FRONT_EDGE]; macroVelocity += dirVectorsConst[DIR_RIGHT_FACE] * cache[cacheIdx].adj[DIR_RIGHT_FACE]; macroVelocity += dirVectorsConst[DIR_BACK_FACE] * cache[cacheIdx].adj[DIR_BACK_FACE]; macroVelocity += dirVectorsConst[DIR_TOP_FACE] * cache[cacheIdx].adj[DIR_TOP_FACE]; macroVelocity += dirVectorsConst[DIR_BACK_RIGHT_EDGE] * cache[cacheIdx].adj[DIR_BACK_RIGHT_EDGE]; macroVelocity += dirVectorsConst[DIR_FRONT_RIGHT_EDGE] * cache[cacheIdx].adj[DIR_FRONT_RIGHT_EDGE]; macroVelocity += dirVectorsConst[DIR_TOP_RIGHT_EDGE] * cache[cacheIdx].adj[DIR_TOP_RIGHT_EDGE]; macroVelocity += dirVectorsConst[DIR_BOTTOM_RIGHT_EDGE] * cache[cacheIdx].adj[DIR_BOTTOM_RIGHT_EDGE]; macroVelocity += dirVectorsConst[DIR_TOP_BACK_EDGE] * cache[cacheIdx].adj[DIR_TOP_BACK_EDGE]; macroVelocity += dirVectorsConst[DIR_BOTTOM_BACK_EDGE] * cache[cacheIdx].adj[DIR_BOTTOM_BACK_EDGE]; macroVelocity /= macroDensity; velocities[idx] = macroVelocity; float currLeftTerm = WEIGHT_MIDDLE * macroDensity; //float leftTermAxis = WEIGHT_AXIS * macroDensity; //float leftTermNonaxial = WEIGHT_NON_AXIAL * macroDensity; float macroVelocityDot = glm::dot(macroVelocity, macroVelocity); float thirdTerm = 1.5f * macroVelocityDot; float currEq = currLeftTerm + currLeftTerm * (-thirdTerm); cache[cacheIdx].adj[DIR_MIDDLE_VERTEX] -= d_itau * (cache[cacheIdx].adj[DIR_MIDDLE_VERTEX] - currEq); currLeftTerm = WEIGHT_AXIS * macroDensity; float dotProd = glm::dot(dirVectorsConst[DIR_RIGHT_FACE], macroVelocity); float firstTerm = 3.0f * dotProd; float secondTerm = 4.5f * dotProd * dotProd; float fourthTerm = dotProd * ((9.0f * dotProd) * (9.0f * dotProd) - 3 * macroVelocityDot); currEq = currLeftTerm + currLeftTerm * (firstTerm + secondTerm - thirdTerm); cache[cacheIdx].adj[DIR_RIGHT_FACE] -= d_itau * (cache[cacheIdx].adj[DIR_RIGHT_FACE] - currEq); dotProd = glm::dot(dirVectorsConst[DIR_LEFT_FACE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; fourthTerm = dotProd * ((9.0f * dotProd) * (9.0f * dotProd) - 3 * macroVelocityDot); currEq = currLeftTerm + currLeftTerm * (firstTerm + secondTerm - thirdTerm + fourthTerm); cache[cacheIdx].adj[DIR_LEFT_FACE] -= d_itau * (cache[cacheIdx].adj[DIR_LEFT_FACE] - currEq); dotProd = glm::dot(dirVectorsConst[DIR_FRONT_FACE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; fourthTerm = dotProd * ((9.0f * dotProd) * (9.0f * dotProd) - 3 * macroVelocityDot); currEq = currLeftTerm + currLeftTerm * (firstTerm + secondTerm - thirdTerm + fourthTerm); cache[cacheIdx].adj[DIR_FRONT_FACE] -= d_itau * (cache[cacheIdx].adj[DIR_FRONT_FACE] - currEq); dotProd = glm::dot(dirVectorsConst[DIR_BACK_FACE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; fourthTerm = dotProd * ((9.0f * dotProd) * (9.0f * dotProd) - 3 * macroVelocityDot); currEq = currLeftTerm + currLeftTerm * (firstTerm + secondTerm - thirdTerm + fourthTerm); cache[cacheIdx].adj[DIR_BACK_FACE] -= d_itau * (cache[cacheIdx].adj[DIR_BACK_FACE] - currEq); dotProd = glm::dot(dirVectorsConst[DIR_TOP_FACE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; fourthTerm = dotProd * ((9.0f * dotProd) * (9.0f * dotProd) - 3 * macroVelocityDot); currEq = currLeftTerm + currLeftTerm * (firstTerm + secondTerm - thirdTerm + fourthTerm); cache[cacheIdx].adj[DIR_TOP_FACE] -= d_itau * (cache[cacheIdx].adj[DIR_TOP_FACE] - currEq); dotProd = glm::dot(dirVectorsConst[DIR_BOTTOM_FACE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; fourthTerm = dotProd * ((9.0f * dotProd) * (9.0f * dotProd) - 3 * macroVelocityDot); currEq = currLeftTerm + currLeftTerm * (firstTerm + secondTerm - thirdTerm + fourthTerm); cache[cacheIdx].adj[DIR_BOTTOM_FACE] -= d_itau * (cache[cacheIdx].adj[DIR_BOTTOM_FACE] - currEq); currLeftTerm = WEIGHT_NON_AXIAL * macroDensity; dotProd = glm::dot(dirVectorsConst[DIR_BACK_RIGHT_EDGE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; fourthTerm = dotProd * ((9.0f * dotProd) * (9.0f * dotProd) - 3 * macroVelocityDot); currEq = currLeftTerm + currLeftTerm * (firstTerm + secondTerm - thirdTerm + fourthTerm); cache[cacheIdx].adj[DIR_BACK_RIGHT_EDGE] -= d_itau * (cache[cacheIdx].adj[DIR_BACK_RIGHT_EDGE] - currEq); dotProd = glm::dot(dirVectorsConst[DIR_BACK_LEFT_EDGE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; fourthTerm = dotProd * ((9.0f * dotProd) * (9.0f * dotProd) - 3 * macroVelocityDot); currEq = currLeftTerm + currLeftTerm * (firstTerm + secondTerm - thirdTerm + fourthTerm); cache[cacheIdx].adj[DIR_BACK_LEFT_EDGE] -= d_itau * (cache[cacheIdx].adj[DIR_BACK_LEFT_EDGE] - currEq); dotProd = glm::dot(dirVectorsConst[DIR_FRONT_RIGHT_EDGE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; fourthTerm = dotProd * ((9.0f * dotProd) * (9.0f * dotProd) - 3 * macroVelocityDot); currEq = currLeftTerm + currLeftTerm * (firstTerm + secondTerm - thirdTerm + fourthTerm); cache[cacheIdx].adj[DIR_FRONT_RIGHT_EDGE] -= d_itau * (cache[cacheIdx].adj[DIR_FRONT_RIGHT_EDGE] - currEq); dotProd = glm::dot(dirVectorsConst[DIR_FRONT_LEFT_EDGE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; fourthTerm = dotProd * ((9.0f * dotProd) * (9.0f * dotProd) - 3 * macroVelocityDot); currEq = currLeftTerm + currLeftTerm * (firstTerm + secondTerm - thirdTerm + fourthTerm); cache[cacheIdx].adj[DIR_FRONT_LEFT_EDGE] -= d_itau * (cache[cacheIdx].adj[DIR_FRONT_LEFT_EDGE] - currEq); dotProd = glm::dot(dirVectorsConst[DIR_TOP_BACK_EDGE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; fourthTerm = dotProd * ((9.0f * dotProd) * (9.0f * dotProd) - 3 * macroVelocityDot); currEq = currLeftTerm + currLeftTerm * (firstTerm + secondTerm - thirdTerm + fourthTerm); cache[cacheIdx].adj[DIR_TOP_BACK_EDGE] -= d_itau * (cache[cacheIdx].adj[DIR_TOP_BACK_EDGE] - currEq); dotProd = glm::dot(dirVectorsConst[DIR_TOP_FRONT_EDGE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; fourthTerm = dotProd * ((9.0f * dotProd) * (9.0f * dotProd) - 3 * macroVelocityDot); currEq = currLeftTerm + currLeftTerm * (firstTerm + secondTerm - thirdTerm + fourthTerm); cache[cacheIdx].adj[DIR_TOP_FRONT_EDGE] -= d_itau * (cache[cacheIdx].adj[DIR_TOP_FRONT_EDGE] - currEq); dotProd = glm::dot(dirVectorsConst[DIR_BOTTOM_BACK_EDGE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; fourthTerm = dotProd * ((9.0f * dotProd) * (9.0f * dotProd) - 3 * macroVelocityDot); currEq = currLeftTerm + currLeftTerm * (firstTerm + secondTerm - thirdTerm + fourthTerm); cache[cacheIdx].adj[DIR_BOTTOM_BACK_EDGE] -= d_itau * (cache[cacheIdx].adj[DIR_BOTTOM_BACK_EDGE] - currEq); dotProd = glm::dot(dirVectorsConst[DIR_BOTTOM_FRONT_EDGE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; fourthTerm = dotProd * ((9.0f * dotProd) * (9.0f * dotProd) - 3 * macroVelocityDot); currEq = currLeftTerm + currLeftTerm * (firstTerm + secondTerm - thirdTerm + fourthTerm); cache[cacheIdx].adj[DIR_BOTTOM_FRONT_EDGE] -= d_itau * (cache[cacheIdx].adj[DIR_BOTTOM_FRONT_EDGE] - currEq); dotProd = glm::dot(dirVectorsConst[DIR_TOP_RIGHT_EDGE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; fourthTerm = dotProd * ((9.0f * dotProd) * (9.0f * dotProd) - 3 * macroVelocityDot); currEq = currLeftTerm + currLeftTerm * (firstTerm + secondTerm - thirdTerm + fourthTerm); cache[cacheIdx].adj[DIR_TOP_RIGHT_EDGE] -= d_itau * (cache[cacheIdx].adj[DIR_TOP_RIGHT_EDGE] - currEq); dotProd = glm::dot(dirVectorsConst[DIR_TOP_LEFT_EDGE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; fourthTerm = dotProd * ((9.0f * dotProd) * (9.0f * dotProd) - 3 * macroVelocityDot); currEq = currLeftTerm + currLeftTerm * (firstTerm + secondTerm - thirdTerm + fourthTerm); cache[cacheIdx].adj[DIR_TOP_LEFT_EDGE] -= d_itau * (cache[cacheIdx].adj[DIR_TOP_LEFT_EDGE] - currEq); dotProd = glm::dot(dirVectorsConst[DIR_BOTTOM_RIGHT_EDGE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; fourthTerm = dotProd * ((9.0f * dotProd) * (9.0f * dotProd) - 3 * macroVelocityDot); currEq = currLeftTerm + currLeftTerm * (firstTerm + secondTerm - thirdTerm + fourthTerm); cache[cacheIdx].adj[DIR_BOTTOM_RIGHT_EDGE] -= d_itau * (cache[cacheIdx].adj[DIR_BOTTOM_RIGHT_EDGE] - currEq); dotProd = glm::dot(dirVectorsConst[DIR_BOTTOM_LEFT_EDGE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; fourthTerm = dotProd * ((9.0f * dotProd) * (9.0f * dotProd) - 3 * macroVelocityDot); currEq = currLeftTerm + currLeftTerm * (firstTerm + secondTerm - thirdTerm + fourthTerm); cache[cacheIdx].adj[DIR_BOTTOM_LEFT_EDGE] -= d_itau * (cache[cacheIdx].adj[DIR_BOTTOM_LEFT_EDGE] - currEq); backLattice[idx] = cache[cacheIdx]; } } //! Kernel for calculating the collision operator that uses the shared memory (in naive manner). /*! Kernel that calculates the collision operator using Bhatnagar-Gross-Krook operator. \param[in] backLattice Back lattice in which we do our calculations. \param[in] velocities Velocities array for the lattice. */ __global__ void collisionStepKernelShared(Node3D *backLattice, glm::vec3 *velocities, int useSubgridModel = 0) { int idx = threadIdx.x + blockDim.x * threadIdx.y; // idx in block idx += blockDim.x * blockDim.y * blockIdx.x; extern __shared__ Node3D cache[]; int cacheIdx = threadIdx.x + blockDim.x * threadIdx.y; if (idx < d_latticeSize) { cache[cacheIdx] = backLattice[idx]; //__syncthreads(); // not needed float macroDensity = 0.0f; for (int i = 0; i < 19; i++) { macroDensity += cache[cacheIdx].adj[i]; } glm::vec3 macroVelocity = glm::vec3(0.0f, 0.0f, 0.0f); //macroVelocity += vMiddle * backLattice[idx].adj[DIR_MIDDLE]; macroVelocity += dirVectorsConst[DIR_LEFT_FACE] * cache[cacheIdx].adj[DIR_LEFT_FACE]; macroVelocity += dirVectorsConst[DIR_FRONT_FACE] * cache[cacheIdx].adj[DIR_FRONT_FACE]; macroVelocity += dirVectorsConst[DIR_BOTTOM_FACE] * cache[cacheIdx].adj[DIR_BOTTOM_FACE]; macroVelocity += dirVectorsConst[DIR_FRONT_LEFT_EDGE] * cache[cacheIdx].adj[DIR_FRONT_LEFT_EDGE]; macroVelocity += dirVectorsConst[DIR_BACK_LEFT_EDGE] * cache[cacheIdx].adj[DIR_BACK_LEFT_EDGE]; macroVelocity += dirVectorsConst[DIR_BOTTOM_LEFT_EDGE] * cache[cacheIdx].adj[DIR_BOTTOM_LEFT_EDGE]; macroVelocity += dirVectorsConst[DIR_TOP_LEFT_EDGE] * cache[cacheIdx].adj[DIR_TOP_LEFT_EDGE]; macroVelocity += dirVectorsConst[DIR_BOTTOM_FRONT_EDGE] * cache[cacheIdx].adj[DIR_BOTTOM_FRONT_EDGE]; macroVelocity += dirVectorsConst[DIR_TOP_FRONT_EDGE] * cache[cacheIdx].adj[DIR_TOP_FRONT_EDGE]; macroVelocity += dirVectorsConst[DIR_RIGHT_FACE] * cache[cacheIdx].adj[DIR_RIGHT_FACE]; macroVelocity += dirVectorsConst[DIR_BACK_FACE] * cache[cacheIdx].adj[DIR_BACK_FACE]; macroVelocity += dirVectorsConst[DIR_TOP_FACE] * cache[cacheIdx].adj[DIR_TOP_FACE]; macroVelocity += dirVectorsConst[DIR_BACK_RIGHT_EDGE] * cache[cacheIdx].adj[DIR_BACK_RIGHT_EDGE]; macroVelocity += dirVectorsConst[DIR_FRONT_RIGHT_EDGE] * cache[cacheIdx].adj[DIR_FRONT_RIGHT_EDGE]; macroVelocity += dirVectorsConst[DIR_TOP_RIGHT_EDGE] * cache[cacheIdx].adj[DIR_TOP_RIGHT_EDGE]; macroVelocity += dirVectorsConst[DIR_BOTTOM_RIGHT_EDGE] * cache[cacheIdx].adj[DIR_BOTTOM_RIGHT_EDGE]; macroVelocity += dirVectorsConst[DIR_TOP_BACK_EDGE] * cache[cacheIdx].adj[DIR_TOP_BACK_EDGE]; macroVelocity += dirVectorsConst[DIR_BOTTOM_BACK_EDGE] * cache[cacheIdx].adj[DIR_BOTTOM_BACK_EDGE]; macroVelocity /= macroDensity; velocities[idx] = macroVelocity; float leftTermMiddle = WEIGHT_MIDDLE * macroDensity; float leftTermAxis = WEIGHT_AXIS * macroDensity; float leftTermNonaxial = WEIGHT_NON_AXIAL * macroDensity; float macroVelocityDot = glm::dot(macroVelocity, macroVelocity); float thirdTerm = 1.5f * macroVelocityDot; float middleEq = leftTermMiddle + leftTermMiddle * (-thirdTerm); float dotProd = glm::dot(dirVectorsConst[DIR_RIGHT_FACE], macroVelocity); float firstTerm = 3.0f * dotProd; float secondTerm = 4.5f * dotProd * dotProd; float rightEq = leftTermAxis + leftTermAxis * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(dirVectorsConst[DIR_LEFT_FACE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float leftEq = leftTermAxis + leftTermAxis * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(dirVectorsConst[DIR_FRONT_FACE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float frontEq = leftTermAxis + leftTermAxis * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(dirVectorsConst[DIR_BACK_FACE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float backEq = leftTermAxis + leftTermAxis * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(dirVectorsConst[DIR_TOP_FACE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float topEq = leftTermAxis + leftTermAxis * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(dirVectorsConst[DIR_BOTTOM_FACE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float bottomEq = leftTermAxis + leftTermAxis * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(dirVectorsConst[DIR_BACK_RIGHT_EDGE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float backRightEq = leftTermNonaxial + leftTermNonaxial * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(dirVectorsConst[DIR_BACK_LEFT_EDGE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float backLeftEq = leftTermNonaxial + leftTermNonaxial * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(dirVectorsConst[DIR_FRONT_RIGHT_EDGE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float frontRightEq = leftTermNonaxial + leftTermNonaxial * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(dirVectorsConst[DIR_FRONT_LEFT_EDGE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float frontLeftEq = leftTermNonaxial + leftTermNonaxial * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(dirVectorsConst[DIR_TOP_BACK_EDGE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float topBackEq = leftTermNonaxial + leftTermNonaxial * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(dirVectorsConst[DIR_TOP_FRONT_EDGE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float topFrontEq = leftTermNonaxial + leftTermNonaxial * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(dirVectorsConst[DIR_BOTTOM_BACK_EDGE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float bottomBackEq = leftTermNonaxial + leftTermNonaxial * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(dirVectorsConst[DIR_BOTTOM_FRONT_EDGE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float bottomFrontEq = leftTermNonaxial + leftTermNonaxial * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(dirVectorsConst[DIR_TOP_RIGHT_EDGE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float topRightEq = leftTermNonaxial + leftTermNonaxial * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(dirVectorsConst[DIR_TOP_LEFT_EDGE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float topLeftEq = leftTermNonaxial + leftTermNonaxial * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(dirVectorsConst[DIR_BOTTOM_RIGHT_EDGE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float bottomRightEq = leftTermNonaxial + leftTermNonaxial * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(dirVectorsConst[DIR_BOTTOM_LEFT_EDGE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float bottomLeftEq = leftTermNonaxial + leftTermNonaxial * (firstTerm + secondTerm - thirdTerm); if (useSubgridModel) { float f[19]; f[0] = (cache[cacheIdx].adj[DIR_MIDDLE_VERTEX] - middleEq); f[1] = (cache[cacheIdx].adj[DIR_RIGHT_FACE] - rightEq); f[2] = (cache[cacheIdx].adj[DIR_LEFT_FACE] - leftEq); f[3] = (cache[cacheIdx].adj[DIR_BACK_FACE] - backEq); f[4] = (cache[cacheIdx].adj[DIR_FRONT_FACE] - frontEq); f[5] = (cache[cacheIdx].adj[DIR_TOP_FACE] - topEq); f[6] = (cache[cacheIdx].adj[DIR_BOTTOM_FACE] - bottomEq); f[7] = (cache[cacheIdx].adj[DIR_BACK_RIGHT_EDGE] - backRightEq); f[8] = (cache[cacheIdx].adj[DIR_BACK_LEFT_EDGE] - backLeftEq); f[9] = (cache[cacheIdx].adj[DIR_FRONT_RIGHT_EDGE] - frontRightEq); f[10] = (cache[cacheIdx].adj[DIR_FRONT_LEFT_EDGE] - frontLeftEq); f[11] = (cache[cacheIdx].adj[DIR_TOP_BACK_EDGE] - topBackEq); f[12] = (cache[cacheIdx].adj[DIR_TOP_FRONT_EDGE] - topFrontEq); f[13] = (cache[cacheIdx].adj[DIR_BOTTOM_BACK_EDGE] - bottomBackEq); f[14] = (cache[cacheIdx].adj[DIR_BOTTOM_FRONT_EDGE] - bottomFrontEq); f[15] = (cache[cacheIdx].adj[DIR_TOP_RIGHT_EDGE] - topRightEq); f[16] = (cache[cacheIdx].adj[DIR_TOP_LEFT_EDGE] - topLeftEq); f[17] = (cache[cacheIdx].adj[DIR_BOTTOM_RIGHT_EDGE] - bottomRightEq); f[18] = (cache[cacheIdx].adj[DIR_BOTTOM_LEFT_EDGE] - bottomLeftEq); float tmp = -1.0f / (2.0f * d_tau); float sxx = f[3] + f[4] + f[7] + f[8] + f[9] + f[10] + f[15] + f[16] + f[17] + f[18]; float sxz = f[9] - f[8] - f[10] + f[7]; float sxy = f[15] + f[16] + f[17] + f[18]; float szz = f[1] + f[2] + f[7] + f[8] + f[9] + f[10] + f[11] + f[12] + f[13] + f[14]; float szy = f[12] + f[13] - f[14] - f[11]; float syy = f[5] + f[6] + f[11] + f[12] + f[13] + f[14] + f[15] + f[16] + f[17] + f[18]; sxx *= tmp; sxz *= tmp; sxy *= tmp; szz *= tmp; szy *= tmp; syy *= tmp; float magS = sqrtf(2.0f * (sxx * sxx + syy * syy + szz * szz + 2.0f * sqrtf(sxy * sxy + sxz * sxz + szy * szy))); float nu = (2.0f * d_tau - 1.0f) / 6.0f; float itau_new = 1.0f / (3.0f * (nu + SMAG_C * SMAG_C * magS) + 0.5f); cache[cacheIdx].adj[DIR_MIDDLE_VERTEX] -= itau_new * (cache[cacheIdx].adj[DIR_MIDDLE_VERTEX] - middleEq); cache[cacheIdx].adj[DIR_RIGHT_FACE] -= itau_new * (cache[cacheIdx].adj[DIR_RIGHT_FACE] - rightEq); cache[cacheIdx].adj[DIR_LEFT_FACE] -= itau_new * (cache[cacheIdx].adj[DIR_LEFT_FACE] - leftEq); cache[cacheIdx].adj[DIR_BACK_FACE] -= itau_new * (cache[cacheIdx].adj[DIR_BACK_FACE] - backEq); cache[cacheIdx].adj[DIR_FRONT_FACE] -= itau_new * (cache[cacheIdx].adj[DIR_FRONT_FACE] - frontEq); cache[cacheIdx].adj[DIR_TOP_FACE] -= itau_new * (cache[cacheIdx].adj[DIR_TOP_FACE] - topEq); cache[cacheIdx].adj[DIR_BOTTOM_FACE] -= itau_new * (cache[cacheIdx].adj[DIR_BOTTOM_FACE] - bottomEq); cache[cacheIdx].adj[DIR_BACK_RIGHT_EDGE] -= itau_new * (cache[cacheIdx].adj[DIR_BACK_RIGHT_EDGE] - backRightEq); cache[cacheIdx].adj[DIR_BACK_LEFT_EDGE] -= itau_new * (cache[cacheIdx].adj[DIR_BACK_LEFT_EDGE] - backLeftEq); cache[cacheIdx].adj[DIR_FRONT_RIGHT_EDGE] -= itau_new * (cache[cacheIdx].adj[DIR_FRONT_RIGHT_EDGE] - frontRightEq); cache[cacheIdx].adj[DIR_FRONT_LEFT_EDGE] -= itau_new * (cache[cacheIdx].adj[DIR_FRONT_LEFT_EDGE] - frontLeftEq); cache[cacheIdx].adj[DIR_TOP_BACK_EDGE] -= itau_new * (cache[cacheIdx].adj[DIR_TOP_BACK_EDGE] - topBackEq); cache[cacheIdx].adj[DIR_TOP_FRONT_EDGE] -= itau_new * (cache[cacheIdx].adj[DIR_TOP_FRONT_EDGE] - topFrontEq); cache[cacheIdx].adj[DIR_BOTTOM_BACK_EDGE] -= itau_new * (cache[cacheIdx].adj[DIR_BOTTOM_BACK_EDGE] - bottomBackEq); cache[cacheIdx].adj[DIR_BOTTOM_FRONT_EDGE] -= itau_new * (cache[cacheIdx].adj[DIR_BOTTOM_FRONT_EDGE] - bottomFrontEq); cache[cacheIdx].adj[DIR_TOP_RIGHT_EDGE] -= itau_new * (cache[cacheIdx].adj[DIR_TOP_RIGHT_EDGE] - topRightEq); cache[cacheIdx].adj[DIR_TOP_LEFT_EDGE] -= itau_new * (cache[cacheIdx].adj[DIR_TOP_LEFT_EDGE] - topLeftEq); cache[cacheIdx].adj[DIR_BOTTOM_RIGHT_EDGE] -= itau_new * (cache[cacheIdx].adj[DIR_BOTTOM_RIGHT_EDGE] - bottomRightEq); cache[cacheIdx].adj[DIR_BOTTOM_LEFT_EDGE] -= itau_new * (cache[cacheIdx].adj[DIR_BOTTOM_LEFT_EDGE] - bottomLeftEq); } else { cache[cacheIdx].adj[DIR_MIDDLE_VERTEX] -= d_itau * (cache[cacheIdx].adj[DIR_MIDDLE_VERTEX] - middleEq); cache[cacheIdx].adj[DIR_RIGHT_FACE] -= d_itau * (cache[cacheIdx].adj[DIR_RIGHT_FACE] - rightEq); cache[cacheIdx].adj[DIR_LEFT_FACE] -= d_itau * (cache[cacheIdx].adj[DIR_LEFT_FACE] - leftEq); cache[cacheIdx].adj[DIR_BACK_FACE] -= d_itau * (cache[cacheIdx].adj[DIR_BACK_FACE] - backEq); cache[cacheIdx].adj[DIR_FRONT_FACE] -= d_itau * (cache[cacheIdx].adj[DIR_FRONT_FACE] - frontEq); cache[cacheIdx].adj[DIR_TOP_FACE] -= d_itau * (cache[cacheIdx].adj[DIR_TOP_FACE] - topEq); cache[cacheIdx].adj[DIR_BOTTOM_FACE] -= d_itau * (cache[cacheIdx].adj[DIR_BOTTOM_FACE] - bottomEq); cache[cacheIdx].adj[DIR_BACK_RIGHT_EDGE] -= d_itau * (cache[cacheIdx].adj[DIR_BACK_RIGHT_EDGE] - backRightEq); cache[cacheIdx].adj[DIR_BACK_LEFT_EDGE] -= d_itau * (cache[cacheIdx].adj[DIR_BACK_LEFT_EDGE] - backLeftEq); cache[cacheIdx].adj[DIR_FRONT_RIGHT_EDGE] -= d_itau * (cache[cacheIdx].adj[DIR_FRONT_RIGHT_EDGE] - frontRightEq); cache[cacheIdx].adj[DIR_FRONT_LEFT_EDGE] -= d_itau * (cache[cacheIdx].adj[DIR_FRONT_LEFT_EDGE] - frontLeftEq); cache[cacheIdx].adj[DIR_TOP_BACK_EDGE] -= d_itau * (cache[cacheIdx].adj[DIR_TOP_BACK_EDGE] - topBackEq); cache[cacheIdx].adj[DIR_TOP_FRONT_EDGE] -= d_itau * (cache[cacheIdx].adj[DIR_TOP_FRONT_EDGE] - topFrontEq); cache[cacheIdx].adj[DIR_BOTTOM_BACK_EDGE] -= d_itau * (cache[cacheIdx].adj[DIR_BOTTOM_BACK_EDGE] - bottomBackEq); cache[cacheIdx].adj[DIR_BOTTOM_FRONT_EDGE] -= d_itau * (cache[cacheIdx].adj[DIR_BOTTOM_FRONT_EDGE] - bottomFrontEq); cache[cacheIdx].adj[DIR_TOP_RIGHT_EDGE] -= d_itau * (cache[cacheIdx].adj[DIR_TOP_RIGHT_EDGE] - topRightEq); cache[cacheIdx].adj[DIR_TOP_LEFT_EDGE] -= d_itau * (cache[cacheIdx].adj[DIR_TOP_LEFT_EDGE] - topLeftEq); cache[cacheIdx].adj[DIR_BOTTOM_RIGHT_EDGE] -= d_itau * (cache[cacheIdx].adj[DIR_BOTTOM_RIGHT_EDGE] - bottomRightEq); cache[cacheIdx].adj[DIR_BOTTOM_LEFT_EDGE] -= d_itau * (cache[cacheIdx].adj[DIR_BOTTOM_LEFT_EDGE] - bottomLeftEq); } /*for (int i = 0; i < 19; i++) { if (cache[cacheIdx].adj[i] < 0.0f) { cache[cacheIdx].adj[i] = 0.0f; } else if (cache[cacheIdx].adj[i] > 1.0f) { cache[cacheIdx].adj[i] = 1.0f; } }*/ backLattice[idx] = cache[cacheIdx]; } } //! Kernel for calculating the collision operator using shared memory with smaller register usage. /*! Kernel that calculates the collision operator using Bhatnagar-Gross-Krook operator. Uses shared memory and less registers. Slower than its naive version unfortunately. \param[in] backLattice Back lattice in which we do our calculations. \param[in] velocities Velocities array for the lattice. */ __global__ void collisionStepKernelStreamlinedShared(Node3D *backLattice, glm::vec3 *velocities) { int idx = threadIdx.x + blockDim.x * threadIdx.y; // idx in block idx += blockDim.x * blockDim.y * blockIdx.x; extern __shared__ Node3D cache[]; int cacheIdx = threadIdx.x + blockDim.x * threadIdx.y; if (idx < d_latticeSize) { cache[cacheIdx] = backLattice[idx]; float macroDensity = 0.0f; for (int i = 0; i < 19; i++) { macroDensity += cache[cacheIdx].adj[i]; } glm::vec3 macroVelocity = glm::vec3(0.0f, 0.0f, 0.0f); macroVelocity += dirVectorsConst[DIR_LEFT_FACE] * cache[cacheIdx].adj[DIR_LEFT_FACE]; macroVelocity += dirVectorsConst[DIR_FRONT_FACE] * cache[cacheIdx].adj[DIR_FRONT_FACE]; macroVelocity += dirVectorsConst[DIR_BOTTOM_FACE] * cache[cacheIdx].adj[DIR_BOTTOM_FACE]; macroVelocity += dirVectorsConst[DIR_FRONT_LEFT_EDGE] * cache[cacheIdx].adj[DIR_FRONT_LEFT_EDGE]; macroVelocity += dirVectorsConst[DIR_BACK_LEFT_EDGE] * cache[cacheIdx].adj[DIR_BACK_LEFT_EDGE]; macroVelocity += dirVectorsConst[DIR_BOTTOM_LEFT_EDGE] * cache[cacheIdx].adj[DIR_BOTTOM_LEFT_EDGE]; macroVelocity += dirVectorsConst[DIR_TOP_LEFT_EDGE] * cache[cacheIdx].adj[DIR_TOP_LEFT_EDGE]; macroVelocity += dirVectorsConst[DIR_BOTTOM_FRONT_EDGE] * cache[cacheIdx].adj[DIR_BOTTOM_FRONT_EDGE]; macroVelocity += dirVectorsConst[DIR_TOP_FRONT_EDGE] * cache[cacheIdx].adj[DIR_TOP_FRONT_EDGE]; macroVelocity += dirVectorsConst[DIR_RIGHT_FACE] * cache[cacheIdx].adj[DIR_RIGHT_FACE]; macroVelocity += dirVectorsConst[DIR_BACK_FACE] * cache[cacheIdx].adj[DIR_BACK_FACE]; macroVelocity += dirVectorsConst[DIR_TOP_FACE] * cache[cacheIdx].adj[DIR_TOP_FACE]; macroVelocity += dirVectorsConst[DIR_BACK_RIGHT_EDGE] * cache[cacheIdx].adj[DIR_BACK_RIGHT_EDGE]; macroVelocity += dirVectorsConst[DIR_FRONT_RIGHT_EDGE] * cache[cacheIdx].adj[DIR_FRONT_RIGHT_EDGE]; macroVelocity += dirVectorsConst[DIR_TOP_RIGHT_EDGE] * cache[cacheIdx].adj[DIR_TOP_RIGHT_EDGE]; macroVelocity += dirVectorsConst[DIR_BOTTOM_RIGHT_EDGE] * cache[cacheIdx].adj[DIR_BOTTOM_RIGHT_EDGE]; macroVelocity += dirVectorsConst[DIR_TOP_BACK_EDGE] * cache[cacheIdx].adj[DIR_TOP_BACK_EDGE]; macroVelocity += dirVectorsConst[DIR_BOTTOM_BACK_EDGE] * cache[cacheIdx].adj[DIR_BOTTOM_BACK_EDGE]; macroVelocity /= macroDensity; velocities[idx] = macroVelocity; float leftTermMiddle = WEIGHT_MIDDLE * macroDensity; float leftTermAxis = WEIGHT_AXIS * macroDensity; float leftTermNonaxial = WEIGHT_NON_AXIAL * macroDensity; float thirdTerm = 1.5f * glm::dot(macroVelocity, macroVelocity); float middleEq = leftTermMiddle + leftTermMiddle * (-thirdTerm); float tmp; float rightEq = leftTermAxis * (1.0f + 3.0f * macroVelocity.x + 4.5f * macroVelocity.x * macroVelocity.x - thirdTerm); float leftEq = leftTermAxis * (1.0f - 3.0f * macroVelocity.x + 4.5f * macroVelocity.x * macroVelocity.x - thirdTerm); float frontEq = leftTermAxis * (1.0f + 3.0f * macroVelocity.z + 4.5f * macroVelocity.z * macroVelocity.z - thirdTerm); float backEq = leftTermAxis * (1.0f - 3.0f * macroVelocity.z + 4.5f * macroVelocity.z * macroVelocity.z - thirdTerm); float topEq = leftTermAxis * (1.0f + 3.0f * macroVelocity.y + 4.5f * macroVelocity.y * macroVelocity.y - thirdTerm); float bottomEq = leftTermAxis * (1.0f - 3.0f * macroVelocity.y + 4.5f * macroVelocity.y * macroVelocity.y - thirdTerm); tmp = macroVelocity.x - macroVelocity.z; float backRightEq = leftTermNonaxial * (1.0f + 3.0f * tmp + 4.5f * tmp * tmp - thirdTerm); tmp = macroVelocity.x + macroVelocity.z; float backLeftEq = leftTermNonaxial * (1.0f - 3.0f * tmp + 4.5f * tmp * tmp - thirdTerm); float frontRightEq = leftTermNonaxial * (1.0f + 3.0f * tmp + 4.5f * tmp * tmp - thirdTerm); tmp = -macroVelocity.x + macroVelocity.z; float frontLeftEq = leftTermNonaxial * (1.0f + 3.0f * tmp + 4.5f * tmp * tmp - thirdTerm); tmp = macroVelocity.y - macroVelocity.z; float topBackEq = leftTermNonaxial * (1.0f + 3.0f * tmp + 4.5f * tmp * tmp - thirdTerm); tmp = macroVelocity.y + macroVelocity.z; float topFrontEq = leftTermNonaxial * (1.0f + 3.0f * tmp + 4.5f * tmp * tmp - thirdTerm); tmp = -macroVelocity.y - macroVelocity.z; float bottomBackEq = leftTermNonaxial * (1.0f + 3.0f * tmp + 4.5f * tmp * tmp - thirdTerm); tmp = -macroVelocity.y + macroVelocity.z; float bottomFrontEq = leftTermNonaxial * (1.0f + 3.0f * tmp + 4.5f * tmp * tmp - thirdTerm); tmp = macroVelocity.x + macroVelocity.y; float topRightEq = leftTermNonaxial * (1.0f + 3.0f * tmp + 4.5f * tmp * tmp - thirdTerm); tmp = -macroVelocity.x + macroVelocity.y; float topLeftEq = leftTermNonaxial * (1.0f + 3.0f * tmp + 4.5f * tmp * tmp - thirdTerm); tmp = macroVelocity.x - macroVelocity.y; float bottomRightEq = leftTermNonaxial * (1.0f + 3.0f * tmp + 4.5f * tmp * tmp - thirdTerm); tmp = -macroVelocity.x - macroVelocity.y; float bottomLeftEq = leftTermNonaxial * (1.0f + 3.0f * tmp + 4.5f * tmp * tmp - thirdTerm); cache[cacheIdx].adj[DIR_MIDDLE_VERTEX] -= d_itau * (cache[cacheIdx].adj[DIR_MIDDLE_VERTEX] - middleEq); cache[cacheIdx].adj[DIR_RIGHT_FACE] -= d_itau * (cache[cacheIdx].adj[DIR_RIGHT_FACE] - rightEq); cache[cacheIdx].adj[DIR_LEFT_FACE] -= d_itau * (cache[cacheIdx].adj[DIR_LEFT_FACE] - leftEq); cache[cacheIdx].adj[DIR_BACK_FACE] -= d_itau * (cache[cacheIdx].adj[DIR_BACK_FACE] - backEq); cache[cacheIdx].adj[DIR_FRONT_FACE] -= d_itau * (cache[cacheIdx].adj[DIR_FRONT_FACE] - frontEq); cache[cacheIdx].adj[DIR_TOP_FACE] -= d_itau * (cache[cacheIdx].adj[DIR_TOP_FACE] - topEq); cache[cacheIdx].adj[DIR_BOTTOM_FACE] -= d_itau * (cache[cacheIdx].adj[DIR_BOTTOM_FACE] - bottomEq); cache[cacheIdx].adj[DIR_BACK_RIGHT_EDGE] -= d_itau * (cache[cacheIdx].adj[DIR_BACK_RIGHT_EDGE] - backRightEq); cache[cacheIdx].adj[DIR_BACK_LEFT_EDGE] -= d_itau * (cache[cacheIdx].adj[DIR_BACK_LEFT_EDGE] - backLeftEq); cache[cacheIdx].adj[DIR_FRONT_RIGHT_EDGE] -= d_itau * (cache[cacheIdx].adj[DIR_FRONT_RIGHT_EDGE] - frontRightEq); cache[cacheIdx].adj[DIR_FRONT_LEFT_EDGE] -= d_itau * (cache[cacheIdx].adj[DIR_FRONT_LEFT_EDGE] - frontLeftEq); cache[cacheIdx].adj[DIR_TOP_BACK_EDGE] -= d_itau * (cache[cacheIdx].adj[DIR_TOP_BACK_EDGE] - topBackEq); cache[cacheIdx].adj[DIR_TOP_FRONT_EDGE] -= d_itau * (cache[cacheIdx].adj[DIR_TOP_FRONT_EDGE] - topFrontEq); cache[cacheIdx].adj[DIR_BOTTOM_BACK_EDGE] -= d_itau * (cache[cacheIdx].adj[DIR_BOTTOM_BACK_EDGE] - bottomBackEq); cache[cacheIdx].adj[DIR_BOTTOM_FRONT_EDGE] -= d_itau * (cache[cacheIdx].adj[DIR_BOTTOM_FRONT_EDGE] - bottomFrontEq); cache[cacheIdx].adj[DIR_TOP_RIGHT_EDGE] -= d_itau * (cache[cacheIdx].adj[DIR_TOP_RIGHT_EDGE] - topRightEq); cache[cacheIdx].adj[DIR_TOP_LEFT_EDGE] -= d_itau * (cache[cacheIdx].adj[DIR_TOP_LEFT_EDGE] - topLeftEq); cache[cacheIdx].adj[DIR_BOTTOM_RIGHT_EDGE] -= d_itau * (cache[cacheIdx].adj[DIR_BOTTOM_RIGHT_EDGE] - bottomRightEq); cache[cacheIdx].adj[DIR_BOTTOM_LEFT_EDGE] -= d_itau * (cache[cacheIdx].adj[DIR_BOTTOM_LEFT_EDGE] - bottomLeftEq); //for (int i = 0; i < 19; i++) { // if (cache[cacheIdx].adj[i] < 0.0f) { // cache[cacheIdx].adj[i] = 0.0f; // } else if (cache[cacheIdx].adj[i] > 1.0f) { // cache[cacheIdx].adj[i] = 1.0f; // } //} backLattice[idx] = cache[cacheIdx]; } } //! Kernel for updating colliders/obstacles in the lattice. /*! Updates colliders/obstacles by using the full bounce back approach. \param[in] backLattice Back lattice in which we do our calculations. \param[in] velocities Velocities array for the lattice. \param[in] heightMap Height map of the scene. */ __global__ void updateCollidersKernel(Node3D *backLattice, glm::vec3 *velocities, float *heightMap) { int idx = threadIdx.x + blockDim.x * threadIdx.y; // idx in block idx += blockDim.x * blockDim.y * blockIdx.x; if (idx < d_latticeSize) { int x = idx % d_latticeWidth; int y = (idx / d_latticeWidth) % d_latticeHeight; int z = idx / (d_latticeHeight * d_latticeWidth); if ((heightMap[x + z * d_latticeWidth] >= y && heightMap[x + z * d_latticeWidth] > 0.01f) || y == 0) { #define USE_REGISTER_FRIENDLY_COLLIDERS #ifdef USE_REGISTER_FRIENDLY_COLLIDERS float tmp; float *adj = backLattice[idx].adj; // left and right tmp = adj[DIR_RIGHT_FACE]; adj[DIR_RIGHT_FACE] = adj[DIR_LEFT_FACE]; adj[DIR_LEFT_FACE] = tmp; // top and bottom tmp = adj[DIR_TOP_FACE]; adj[DIR_TOP_FACE] = adj[DIR_BOTTOM_FACE]; adj[DIR_BOTTOM_FACE] = tmp; // front and back tmp = adj[DIR_BACK_FACE]; adj[DIR_BACK_FACE] = adj[DIR_FRONT_FACE]; adj[DIR_FRONT_FACE] = tmp; // frontLeft and backRight tmp = adj[DIR_FRONT_LEFT_EDGE]; adj[DIR_FRONT_LEFT_EDGE] = adj[DIR_BACK_RIGHT_EDGE]; adj[DIR_BACK_RIGHT_EDGE] = tmp; // frontRight and backLeft tmp = adj[DIR_FRONT_RIGHT_EDGE]; adj[DIR_FRONT_RIGHT_EDGE] = adj[DIR_BACK_LEFT_EDGE]; adj[DIR_BACK_LEFT_EDGE] = adj[DIR_FRONT_RIGHT_EDGE]; // bottomFront and topBack tmp = adj[DIR_BOTTOM_FRONT_EDGE]; adj[DIR_BOTTOM_FRONT_EDGE] = adj[DIR_TOP_BACK_EDGE]; adj[DIR_TOP_BACK_EDGE] = tmp; // bottomBack and topFront tmp = adj[DIR_BOTTOM_BACK_EDGE]; adj[DIR_BOTTOM_BACK_EDGE] = adj[DIR_TOP_FRONT_EDGE]; adj[DIR_TOP_FRONT_EDGE] = tmp; // topRight and bottomLeft tmp = adj[DIR_TOP_RIGHT_EDGE]; adj[DIR_TOP_RIGHT_EDGE] = adj[DIR_BOTTOM_LEFT_EDGE]; adj[DIR_BOTTOM_LEFT_EDGE] = tmp; // topLeft and bottomRight tmp = adj[DIR_TOP_LEFT_EDGE]; adj[DIR_TOP_LEFT_EDGE] = adj[DIR_BOTTOM_RIGHT_EDGE]; adj[DIR_BOTTOM_RIGHT_EDGE] = tmp; #else // USE_REGISTER_FRIENDLY_COLLIDERS float right = backLattice[idx].adj[DIR_RIGHT_FACE]; float left = backLattice[idx].adj[DIR_LEFT_FACE]; float back = backLattice[idx].adj[DIR_BACK_FACE]; float front = backLattice[idx].adj[DIR_FRONT_FACE]; float top = backLattice[idx].adj[DIR_TOP_FACE]; float bottom = backLattice[idx].adj[DIR_BOTTOM_FACE]; float backRight = backLattice[idx].adj[DIR_BACK_RIGHT_EDGE]; float backLeft = backLattice[idx].adj[DIR_BACK_LEFT_EDGE]; float frontRight = backLattice[idx].adj[DIR_FRONT_RIGHT_EDGE]; float frontLeft = backLattice[idx].adj[DIR_FRONT_LEFT_EDGE]; float topBack = backLattice[idx].adj[DIR_TOP_BACK_EDGE]; float topFront = backLattice[idx].adj[DIR_TOP_FRONT_EDGE]; float bottomBack = backLattice[idx].adj[DIR_BOTTOM_BACK_EDGE]; float bottomFront = backLattice[idx].adj[DIR_BOTTOM_FRONT_EDGE]; float topRight = backLattice[idx].adj[DIR_TOP_RIGHT_EDGE]; float topLeft = backLattice[idx].adj[DIR_TOP_LEFT_EDGE]; float bottomRight = backLattice[idx].adj[DIR_BOTTOM_RIGHT_EDGE]; float bottomLeft = backLattice[idx].adj[DIR_BOTTOM_LEFT_EDGE]; backLattice[idx].adj[DIR_RIGHT_FACE] = left; backLattice[idx].adj[DIR_LEFT_FACE] = right; backLattice[idx].adj[DIR_BACK_FACE] = front; backLattice[idx].adj[DIR_FRONT_FACE] = back; backLattice[idx].adj[DIR_TOP_FACE] = bottom; backLattice[idx].adj[DIR_BOTTOM_FACE] = top; backLattice[idx].adj[DIR_BACK_RIGHT_EDGE] = frontLeft; backLattice[idx].adj[DIR_BACK_LEFT_EDGE] = frontRight; backLattice[idx].adj[DIR_FRONT_RIGHT_EDGE] = backLeft; backLattice[idx].adj[DIR_FRONT_LEFT_EDGE] = backRight; backLattice[idx].adj[DIR_TOP_BACK_EDGE] = bottomFront; backLattice[idx].adj[DIR_TOP_FRONT_EDGE] = bottomBack; backLattice[idx].adj[DIR_BOTTOM_BACK_EDGE] = topFront; backLattice[idx].adj[DIR_BOTTOM_FRONT_EDGE] = topBack; backLattice[idx].adj[DIR_TOP_RIGHT_EDGE] = bottomLeft; backLattice[idx].adj[DIR_TOP_LEFT_EDGE] = bottomRight; backLattice[idx].adj[DIR_BOTTOM_RIGHT_EDGE] = topLeft; backLattice[idx].adj[DIR_BOTTOM_LEFT_EDGE] = topRight; #endif // USE_REGISTER_FRIENDLY_COLLIDERS } } } //! Kernel that streams the microscopic particles from the previous frame. /*! Kernel that streams the microscopic particles from the previous frame. \param[in] backLatice Lattice that will be used in the current frame (the one we are currently updating). \param[in] frontLattice Lattice from the previous frame from which we stream the particles. */ __global__ void streamingStepKernel(Node3D *backLattice, Node3D *frontLattice) { int idx = threadIdx.x + blockDim.x * threadIdx.y; // idx in block idx += blockDim.x * blockDim.y * blockIdx.x; if (idx < d_latticeSize) { int x = idx % d_latticeWidth; int y = (idx / d_latticeWidth) % d_latticeHeight; int z = idx / (d_latticeHeight * d_latticeWidth); backLattice[idx].adj[DIR_MIDDLE_VERTEX] += frontLattice[idx].adj[DIR_MIDDLE_VERTEX]; int right; int left; int top; int bottom; int front; int back; right = x + 1; left = x - 1; top = y + 1; bottom = y - 1; front = z + 1; back = z - 1; if (right > d_latticeWidth - 1) { //right = d_latticeWidth - 1; right = 0; } if (left < 0) { //left = 0; left = d_latticeWidth - 1; } if (top > d_latticeHeight - 1) { //top = d_latticeHeight - 1; top = 0; } if (bottom < 0) { //bottom = 0; bottom = d_latticeHeight - 1; } if (front > d_latticeDepth - 1) { //front = d_latticeDepth - 1; front = 0; } if (back < 0) { //back = 0; back = d_latticeDepth - 1; } /* backLattice[idx].adj[DIR_LEFT_FACE] = frontLattice[getIdxKer(right, y, z)].adj[DIR_LEFT_FACE]; backLattice[idx].adj[DIR_FRONT_FACE] = frontLattice[getIdxKer(x, y, back)].adj[DIR_FRONT_FACE]; backLattice[idx].adj[DIR_BOTTOM_FACE] = frontLattice[getIdxKer(x, top, z)].adj[DIR_BOTTOM_FACE]; backLattice[idx].adj[DIR_FRONT_LEFT_EDGE] = frontLattice[getIdxKer(right, y, back)].adj[DIR_FRONT_LEFT_EDGE]; backLattice[idx].adj[DIR_BACK_LEFT_EDGE] = frontLattice[getIdxKer(right, y, front)].adj[DIR_BACK_LEFT_EDGE]; backLattice[idx].adj[DIR_BOTTOM_LEFT_EDGE] = frontLattice[getIdxKer(right, top, z)].adj[DIR_BOTTOM_LEFT_EDGE]; backLattice[idx].adj[DIR_TOP_LEFT_EDGE] = frontLattice[getIdxKer(right, bottom, z)].adj[DIR_TOP_LEFT_EDGE]; backLattice[idx].adj[DIR_BOTTOM_FRONT_EDGE] = frontLattice[getIdxKer(x, top, back)].adj[DIR_BOTTOM_FRONT_EDGE]; backLattice[idx].adj[DIR_TOP_FRONT_EDGE] = frontLattice[getIdxKer(x, bottom, back)].adj[DIR_TOP_FRONT_EDGE]; backLattice[idx].adj[DIR_RIGHT_FACE] = frontLattice[getIdxKer(left, y, z)].adj[DIR_RIGHT_FACE]; backLattice[idx].adj[DIR_BACK_FACE] = frontLattice[getIdxKer(x, y, front)].adj[DIR_BACK_FACE]; backLattice[idx].adj[DIR_TOP_FACE] = frontLattice[getIdxKer(x, bottom, z)].adj[DIR_TOP_FACE]; backLattice[idx].adj[DIR_BACK_RIGHT_EDGE] = frontLattice[getIdxKer(left, y, front)].adj[DIR_BACK_RIGHT_EDGE]; backLattice[idx].adj[DIR_FRONT_RIGHT_EDGE] = frontLattice[getIdxKer(left, y, back)].adj[DIR_FRONT_RIGHT_EDGE]; backLattice[idx].adj[DIR_TOP_RIGHT_EDGE] = frontLattice[getIdxKer(left, bottom, z)].adj[DIR_TOP_RIGHT_EDGE]; backLattice[idx].adj[DIR_BOTTOM_RIGHT_EDGE] = frontLattice[getIdxKer(left, top, z)].adj[DIR_BOTTOM_RIGHT_EDGE]; backLattice[idx].adj[DIR_TOP_BACK_EDGE] = frontLattice[getIdxKer(x, bottom, front)].adj[DIR_TOP_BACK_EDGE]; backLattice[idx].adj[DIR_BOTTOM_BACK_EDGE] = frontLattice[getIdxKer(x, top, front)].adj[DIR_BOTTOM_BACK_EDGE]; */ backLattice[idx].adj[DIR_LEFT_FACE] += frontLattice[getIdxKer(right, y, z)].adj[DIR_LEFT_FACE]; backLattice[idx].adj[DIR_FRONT_FACE] += frontLattice[getIdxKer(x, y, back)].adj[DIR_FRONT_FACE]; backLattice[idx].adj[DIR_BOTTOM_FACE] += frontLattice[getIdxKer(x, top, z)].adj[DIR_BOTTOM_FACE]; backLattice[idx].adj[DIR_FRONT_LEFT_EDGE] += frontLattice[getIdxKer(right, y, back)].adj[DIR_FRONT_LEFT_EDGE]; backLattice[idx].adj[DIR_BACK_LEFT_EDGE] += frontLattice[getIdxKer(right, y, front)].adj[DIR_BACK_LEFT_EDGE]; backLattice[idx].adj[DIR_BOTTOM_LEFT_EDGE] += frontLattice[getIdxKer(right, top, z)].adj[DIR_BOTTOM_LEFT_EDGE]; backLattice[idx].adj[DIR_TOP_LEFT_EDGE] += frontLattice[getIdxKer(right, bottom, z)].adj[DIR_TOP_LEFT_EDGE]; backLattice[idx].adj[DIR_BOTTOM_FRONT_EDGE] += frontLattice[getIdxKer(x, top, back)].adj[DIR_BOTTOM_FRONT_EDGE]; backLattice[idx].adj[DIR_TOP_FRONT_EDGE] += frontLattice[getIdxKer(x, bottom, back)].adj[DIR_TOP_FRONT_EDGE]; backLattice[idx].adj[DIR_RIGHT_FACE] += frontLattice[getIdxKer(left, y, z)].adj[DIR_RIGHT_FACE]; backLattice[idx].adj[DIR_BACK_FACE] += frontLattice[getIdxKer(x, y, front)].adj[DIR_BACK_FACE]; backLattice[idx].adj[DIR_TOP_FACE] += frontLattice[getIdxKer(x, bottom, z)].adj[DIR_TOP_FACE]; backLattice[idx].adj[DIR_BACK_RIGHT_EDGE] += frontLattice[getIdxKer(left, y, front)].adj[DIR_BACK_RIGHT_EDGE]; backLattice[idx].adj[DIR_FRONT_RIGHT_EDGE] += frontLattice[getIdxKer(left, y, back)].adj[DIR_FRONT_RIGHT_EDGE]; backLattice[idx].adj[DIR_TOP_RIGHT_EDGE] += frontLattice[getIdxKer(left, bottom, z)].adj[DIR_TOP_RIGHT_EDGE]; backLattice[idx].adj[DIR_BOTTOM_RIGHT_EDGE] += frontLattice[getIdxKer(left, top, z)].adj[DIR_BOTTOM_RIGHT_EDGE]; backLattice[idx].adj[DIR_TOP_BACK_EDGE] += frontLattice[getIdxKer(x, bottom, front)].adj[DIR_TOP_BACK_EDGE]; backLattice[idx].adj[DIR_BOTTOM_BACK_EDGE] += frontLattice[getIdxKer(x, top, front)].adj[DIR_BOTTOM_BACK_EDGE]; for (int i = 0; i < 19; i++) { if (backLattice[idx].adj[i] < 0.0f) { backLattice[idx].adj[i] = 0.0f; } else if (backLattice[idx].adj[i] > 1.0f) { backLattice[idx].adj[i] = 1.0f; } } } } //! Initializes the front lattice with default distribution function values. __global__ void initLatticeKernel(Node3D *frontLattice) { int idx = threadIdx.x + blockDim.x * threadIdx.y; // idx in block idx += blockDim.x * blockDim.y * blockIdx.x; //if (idx == 0) { // printf("d_latticeSize = %d\n", d_latticeSize); //} if (idx < d_latticeSize) { frontLattice[idx].adj[DIR_MIDDLE_VERTEX] = WEIGHT_MIDDLE; for (int i = 1; i <= 6; i++) { frontLattice[idx].adj[i] = WEIGHT_AXIS; } for (int i = 7; i <= 18; i++) { frontLattice[idx].adj[i] = WEIGHT_NON_AXIAL; } } } LBM3D_1D_indices::LBM3D_1D_indices() { } LBM3D_1D_indices::LBM3D_1D_indices(VariableManager *vars, ParticleSystem *particleSystem, STLPDiagram *stlpDiagram) : vars(vars), particleSystem(particleSystem), blockDim(blockDim), stlpDiagram(stlpDiagram) { cout << "Creating LBM 3D..." << endl; position = vars->latticePosition; tau = vars->tau; sceneFilename = vars->sceneFilename; blockDim = dim3(vars->blockDim_3D_x, vars->blockDim_3D_y, 1); itau = 1.0f / tau; nu = (2.0f * tau - 1.0f) / 6.0f; heightMap = vars->heightMap; latticeWidth = vars->latticeWidth; latticeHeight = vars->latticeHeight; latticeDepth = vars->latticeDepth; scale = vars->latticeScale; //latticeWidth = heightMap->width; //latticeDepth = heightMap->height; latticeSize = latticeWidth * latticeHeight * latticeDepth; CHECK_ERROR(hipMalloc((void**)&d_heightMap, sizeof(float) * latticeWidth * latticeDepth)); refreshHeightMap(); //frontLattice = new Node3D[latticeSize](); //backLattice = new Node3D[latticeSize](); //velocities = new glm::vec3[latticeSize](); CHECK_ERROR(hipMalloc((void**)&d_frontLattice, sizeof(Node3D) * latticeSize)); CHECK_ERROR(hipMalloc((void**)&d_backLattice, sizeof(Node3D) * latticeSize)); CHECK_ERROR(hipMalloc((void**)&d_velocities, sizeof(glm::vec3) * latticeSize)); //hipGraphicsGLRegisterBuffer(&cudaParticleVerticesVBO, particleSystem->vbo, hipGraphicsMapFlagsWriteDiscard); //CHECK_ERROR(hipGraphicsGLRegisterBuffer(&cudaParticleColorsVBO, particleSystem->colorsVBO, hipGraphicsMapFlagsWriteDiscard)); CHECK_ERROR(hipMemcpyToSymbol(dirVectorsConst, &directionVectors3D[0], 19 * sizeof(glm::vec3))); CHECK_ERROR(hipMemcpyToSymbol(d_latticeWidth, &latticeWidth, sizeof(int))); CHECK_ERROR(hipMemcpyToSymbol(d_latticeHeight, &latticeHeight, sizeof(int))); CHECK_ERROR(hipMemcpyToSymbol(d_latticeDepth, &latticeDepth, sizeof(int))); CHECK_ERROR(hipMemcpyToSymbol(d_latticeSize, &latticeSize, sizeof(int))); CHECK_ERROR(hipMemcpyToSymbol(d_tau, &tau, sizeof(float))); CHECK_ERROR(hipMemcpyToSymbol(d_itau, &itau, sizeof(float))); CHECK_ERROR(hipMemcpyToSymbol(d_worldSizeRatio, &scale, sizeof(float))); CHECK_ERROR(hipMemcpyToSymbol(d_position, glm::value_ptr(position), sizeof(glm::vec3))); gridDim = dim3((unsigned int)ceil(latticeSize / (blockDim.x * blockDim.y * blockDim.z)) + 1, 1, 1); cacheSize = blockDim.x * blockDim.y * blockDim.z * sizeof(Node3D); initBuffers(); initLattice(); if (vars->useSoundingWindVelocities) { CHECK_ERROR(hipMalloc((void**)&d_inletVelocities, sizeof(glm::vec3) * latticeHeight)); vector<glm::vec3> windDeltas; stlpDiagram->getWindDeltasForLattice(latticeHeight, windDeltas); CHECK_ERROR(hipMemcpy(d_inletVelocities, windDeltas.data(), sizeof(glm::vec3) * latticeHeight, hipMemcpyHostToDevice)); } //CHECK_ERROR(hipMemcpy(d_backLattice, backLattice, sizeof(Node3D) * latticeSize, hipMemcpyHostToDevice)); //CHECK_ERROR(hipMemcpy(d_velocities, velocities, sizeof(glm::vec3) * latticeSize, hipMemcpyHostToDevice)); //CHECK_ERROR(hipMemcpy(d_frontLattice, frontLattice, sizeof(Node3D) * latticeSize, hipMemcpyHostToDevice)); grid = new GridLBM(this); editGrid = new GridLBM(this, glm::vec3(1.0f, 0.2f, 0.2f)); CHECK_ERROR(hipPeekAtLastError()); } LBM3D_1D_indices::~LBM3D_1D_indices() { //delete[] frontLattice; //delete[] backLattice; //delete[] velocities; //delete heightMap; CHECK_ERROR(hipFree(d_frontLattice)); CHECK_ERROR(hipFree(d_backLattice)); CHECK_ERROR(hipFree(d_velocities)); //hipGraphicsUnregisterResource(cudaParticleVerticesVBO); //hipGraphicsUnregisterResource(cudaParticleColorsVBO); if (grid) { delete grid; } if (editGrid) { delete editGrid; } } void LBM3D_1D_indices::recalculateVariables() { itau = 1.0f / tau; nu = (2.0f * tau - 1.0f) / 6.0f; CHECK_ERROR(hipMemcpyToSymbol(d_tau, &tau, sizeof(float))); CHECK_ERROR(hipMemcpyToSymbol(d_itau, &itau, sizeof(float))); } void LBM3D_1D_indices::refreshHeightMap() { float *tempHM = new float[latticeWidth * latticeDepth](); for (int z = 0; z < latticeDepth; z++) { for (int x = 0; x < latticeWidth; x++) { int xidx = vars->terrainXOffset + x; int zidx = vars->terrainZOffset + z; // TESTING xidx = (int)((xidx * scale) + position.x); zidx = (int)((zidx * scale) + position.z); xidx /= (int)heightMap->texelWorldSize; zidx /= (int)heightMap->texelWorldSize; if (xidx < heightMap->width && xidx >= 0 && zidx < heightMap->height && zidx >= 0) { //tempHM[x + z * latticeWidth] = heightMap->data[xidx][zidx]; tempHM[x + z * latticeWidth] = (heightMap->data[xidx + zidx * heightMap->width] - position.y) / scale; } //tempHM[x + z * latticeWidth] = heightMap->data[x][z]; } } CHECK_ERROR(hipMemcpy(d_heightMap, tempHM, sizeof(float) * latticeWidth * latticeDepth, hipMemcpyHostToDevice)); delete[] tempHM; } void LBM3D_1D_indices::startEditing() { editing = true; saveState(); } void LBM3D_1D_indices::stopEditing(bool saveChanges) { editing = false; if (saveChanges) { this->saveChanges(); } else { resetChanges(); } } void LBM3D_1D_indices::saveChanges() { CHECK_ERROR(hipMemcpyToSymbol(d_worldSizeRatio, &scale, sizeof(float))); CHECK_ERROR(hipMemcpyToSymbol(d_position, glm::value_ptr(position), sizeof(glm::vec3))); refreshHeightMap(); CHECK_ERROR(hipGetLastError()); } void LBM3D_1D_indices::resetChanges() { resetToPrevState(); } bool LBM3D_1D_indices::isUnderEdit() { return editing; } void LBM3D_1D_indices::draw() { grid->draw(); if (editing) { editGrid->draw(getPrevStateModelMatrix()); } } void LBM3D_1D_indices::draw(ShaderProgram & shader) { #ifdef DRAW_VELOCITY_ARROWS shader.setVec3("u_Color", glm::vec3(0.2f, 0.3f, 1.0f)); glBindVertexArray(velocityVAO); glBindBuffer(GL_ARRAY_BUFFER, velocityVBO); glBufferData(GL_ARRAY_BUFFER, sizeof(glm::vec3) * velocityArrows.size(), &velocityArrows[0], GL_STATIC_DRAW); glDrawArrays(GL_LINES, 0, velocityArrows.size()); #endif #ifdef DRAW_PARTICLE_VELOCITY_ARROWS shader.setVec3("u_Color", glm::vec3(0.8f, 1.0f, 0.6f)); glBindVertexArray(particleArrowsVAO); glBindBuffer(GL_ARRAY_BUFFER, particleArrowsVBO); glBufferData(GL_ARRAY_BUFFER, sizeof(glm::vec3) * particleArrows.size(), &particleArrows[0], GL_STATIC_DRAW); glDrawArrays(GL_LINES, 0, particleArrows.size()); #endif heightMap->draw(); } void LBM3D_1D_indices::doStep() { clearBackLattice(); updateInlets(); streamingStep(); updateColliders(); collisionStep(); moveParticles(); swapLattices(); } void LBM3D_1D_indices::doStepCUDA() { CHECK_ERROR(hipPeekAtLastError()); // ============================================= clear back lattice CUDA clearBackLatticeKernel << <gridDim, blockDim >> > (d_backLattice); //hipDeviceSynchronize(); // FOR FINDING ERROR - TESTING! CHECK_ERROR(hipPeekAtLastError()); // ============================================= update inlets CUDA //updateInletsKernel << <gridDim, blockDim >> > (d_backLattice, d_velocities, inletVelocity); updateInletsKernel << <gridDim, blockDim >> > (d_backLattice, d_velocities, inletVelocity, d_inletVelocities, xLeftInlet, xRightInlet, yBottomInlet, yTopInlet, zLeftInlet, zRightInlet); //hipDeviceSynchronize(); // FOR FINDING ERROR - TESTING! CHECK_ERROR(hipPeekAtLastError()); // ============================================= streaming step CUDA streamingStepKernel << <gridDim, blockDim >> > (d_backLattice, d_frontLattice); //hipDeviceSynchronize(); // FOR FINDING ERROR - TESTING! CHECK_ERROR(hipPeekAtLastError()); // ============================================= update colliders CUDA updateCollidersKernel << <gridDim, blockDim >> > (d_backLattice, d_velocities, d_heightMap); //hipDeviceSynchronize(); // FOR FINDING ERROR - TESTING! CHECK_ERROR(hipPeekAtLastError()); // ============================================= collision step CUDA //collisionStepKernel << <gridDim, blockDim >> > (d_backLattice, d_velocities, vars->useSubgridModel); //collisionStepKernelShared << <gridDim, blockDim, cacheSize >> > (d_backLattice, d_velocities, vars->useSubgridModel); if (vars->lbmUseExtendedCollisionStep) { collisionStepKernelSharedNewReorganizedExtended << <gridDim, blockDim, cacheSize >> > (d_backLattice, d_velocities, vars->useSubgridModel); } else { collisionStepKernelSharedNewReorganized << <gridDim, blockDim, cacheSize >> > (d_backLattice, d_velocities, vars->useSubgridModel); } //collisionStepKernelStreamlinedShared << <gridDim, blockDim, cacheSize >> > (d_backLattice, d_velocities); //hipDeviceSynchronize(); // FOR FINDING ERROR - TESTING! CHECK_ERROR(hipPeekAtLastError()); // ============================================= move particles CUDA - different respawn from CPU !!! glm::vec3 *d_particleVerticesVBO; CHECK_ERROR(hipGraphicsMapResources(1, &particleSystem->cudaParticleVerticesVBO, 0)); size_t num_bytes; CHECK_ERROR(hipGraphicsResourceGetMappedPointer((void **)&d_particleVerticesVBO, &num_bytes, particleSystem->cudaParticleVerticesVBO)); //printf("CUDA-LBM mapped VBO: May access %ld bytes\n", num_bytes); /* glm::vec3 *d_particleColorsVBO; hipGraphicsMapResources(1, &cudaParticleColorsVBO, 0); hipGraphicsResourceGetMappedPointer((void **)&d_particleColorsVBO, &num_bytes, cudaParticleColorsVBO); */ //hipDeviceSynchronize(); // FOR FINDING ERROR - TESTING! CHECK_ERROR(hipPeekAtLastError()); //moveParticlesKernelInterop << <gridDim, blockDim >> > (d_particleVerticesVBO, d_velocities, /*d_numParticles*/particleSystem->numActiveParticles, nullptr, respawnMode, outOfBoundsMode); //moveParticlesKernelInteropNew << <gridDim, blockDim >> > (d_particleVerticesVBO, d_velocities, /*d_numParticles*/particleSystem->numActiveParticles, nullptr, respawnMode, outOfBoundsMode, vars->lbmVelocityMultiplier, (bool)vars->lbmUseCorrectInterpolation); moveParticlesKernelInteropNew2 << <gridDim, blockDim >> > (d_particleVerticesVBO, d_velocities, /*d_numParticles*/particleSystem->numActiveParticles, nullptr, respawnMode, outOfBoundsMode, vars->lbmVelocityMultiplier, vars->lbmUseCorrectInterpolation != 0); //hipDeviceSynchronize(); // FOR FINDING ERROR - TESTING! CHECK_ERROR(hipPeekAtLastError()); CHECK_ERROR(hipGraphicsUnmapResources(1, &particleSystem->cudaParticleVerticesVBO, 0)); if (streamlineParticleSystem->active) { streamlineParticleSystem->frameCounter++; glm::vec3 *d_streamlinesVBO; CHECK_ERROR(hipGraphicsMapResources(1, &streamlineParticleSystem->cudaStreamlinesVBO, 0)); size_t num_bytes; CHECK_ERROR(hipGraphicsResourceGetMappedPointer((void **)&d_streamlinesVBO, &num_bytes, streamlineParticleSystem->cudaStreamlinesVBO)); CHECK_ERROR(hipPeekAtLastError()); moveStreamlineParticlesKernel << <gridDim, blockDim >> > (d_streamlinesVBO, d_velocities, streamlineParticleSystem->d_currActiveVertices, streamlineParticleSystem->maxStreamlineLength, streamlineParticleSystem->maxNumStreamlines, respawnMode, outOfBoundsMode, vars->lbmVelocityMultiplier, vars->lbmUseCorrectInterpolation != 0); CHECK_ERROR(hipPeekAtLastError()); hipGraphicsUnmapResources(1, &streamlineParticleSystem->cudaStreamlinesVBO, 0); CHECK_ERROR(hipPeekAtLastError()); streamlineParticleSystem->update(); if (streamlineParticleSystem->frameCounter >= streamlineParticleSystem->maxStreamlineLength) { // we should be finished with creating the streamlines streamlineParticleSystem->deactivate(); } } /* hipGraphicsUnmapResources(1, &cudaParticleColorsVBO, 0); */ //CHECK_ERROR(hipPeekAtLastError()); swapLattices(); //CHECK_ERROR(hipPeekAtLastError()); frameId++; //hipDeviceSynchronize(); // FOR FINDING ERROR - TESTING! CHECK_ERROR(hipPeekAtLastError()); } void LBM3D_1D_indices::clearBackLattice() { for (int i = 0; i < latticeSize; i++) { for (int j = 0; j < 19; j++) { backLattice[i].adj[j] = 0.0f; } } #ifdef DRAW_VELOCITY_ARROWS velocityArrows.clear(); #endif #ifdef DRAW_PARTICLE_VELOCITY_ARROWS particleArrows.clear(); #endif } void LBM3D_1D_indices::streamingStep() { for (int x = 0; x < latticeWidth; x++) { for (int y = 0; y < latticeHeight; y++) { for (int z = 0; z < latticeDepth; z++) { int idx = getIdx(x, y, z); backLattice[idx].adj[DIR_MIDDLE_VERTEX] += frontLattice[idx].adj[DIR_MIDDLE_VERTEX]; int right; int left; int top; int bottom; int front; int back; right = x + 1; left = x - 1; top = y + 1; bottom = y - 1; front = z + 1; back = z - 1; if (right > latticeWidth - 1) { right = latticeWidth - 1; } if (left < 0) { left = 0; } if (top > latticeHeight - 1) { top = latticeHeight - 1; } if (bottom < 0) { bottom = 0; } if (front > latticeDepth - 1) { front = latticeDepth - 1; } if (back < 0) { back = 0; } backLattice[idx].adj[DIR_LEFT_FACE] += frontLattice[getIdx(right, y, z)].adj[DIR_LEFT_FACE]; backLattice[idx].adj[DIR_FRONT_FACE] += frontLattice[getIdx(x, y, back)].adj[DIR_FRONT_FACE]; backLattice[idx].adj[DIR_BOTTOM_FACE] += frontLattice[getIdx(x, top, z)].adj[DIR_BOTTOM_FACE]; backLattice[idx].adj[DIR_FRONT_LEFT_EDGE] += frontLattice[getIdx(right, y, back)].adj[DIR_FRONT_LEFT_EDGE]; backLattice[idx].adj[DIR_BACK_LEFT_EDGE] += frontLattice[getIdx(right, y, front)].adj[DIR_BACK_LEFT_EDGE]; backLattice[idx].adj[DIR_BOTTOM_LEFT_EDGE] += frontLattice[getIdx(right, top, z)].adj[DIR_BOTTOM_LEFT_EDGE]; backLattice[idx].adj[DIR_TOP_LEFT_EDGE] += frontLattice[getIdx(right, bottom, z)].adj[DIR_TOP_LEFT_EDGE]; backLattice[idx].adj[DIR_BOTTOM_FRONT_EDGE] += frontLattice[getIdx(x, top, back)].adj[DIR_BOTTOM_FRONT_EDGE]; backLattice[idx].adj[DIR_TOP_FRONT_EDGE] += frontLattice[getIdx(x, bottom, back)].adj[DIR_TOP_FRONT_EDGE]; backLattice[idx].adj[DIR_RIGHT_FACE] += frontLattice[getIdx(left, y, z)].adj[DIR_RIGHT_FACE]; backLattice[idx].adj[DIR_BACK_FACE] += frontLattice[getIdx(x, y, front)].adj[DIR_BACK_FACE]; backLattice[idx].adj[DIR_TOP_FACE] += frontLattice[getIdx(x, bottom, z)].adj[DIR_TOP_FACE]; backLattice[idx].adj[DIR_BACK_RIGHT_EDGE] += frontLattice[getIdx(left, y, front)].adj[DIR_BACK_RIGHT_EDGE]; backLattice[idx].adj[DIR_FRONT_RIGHT_EDGE] += frontLattice[getIdx(left, y, back)].adj[DIR_FRONT_RIGHT_EDGE]; backLattice[idx].adj[DIR_TOP_RIGHT_EDGE] += frontLattice[getIdx(left, bottom, z)].adj[DIR_TOP_RIGHT_EDGE]; backLattice[idx].adj[DIR_BOTTOM_RIGHT_EDGE] += frontLattice[getIdx(left, top, z)].adj[DIR_BOTTOM_RIGHT_EDGE]; backLattice[idx].adj[DIR_TOP_BACK_EDGE] += frontLattice[getIdx(x, bottom, front)].adj[DIR_TOP_BACK_EDGE]; backLattice[idx].adj[DIR_BOTTOM_BACK_EDGE] += frontLattice[getIdx(x, top, front)].adj[DIR_BOTTOM_BACK_EDGE]; for (int i = 0; i < 19; i++) { if (backLattice[idx].adj[i] < 0.0f) { backLattice[idx].adj[i] = 0.0f; } else if (backLattice[idx].adj[i] > 1.0f) { backLattice[idx].adj[i] = 1.0f; } } } } } } void LBM3D_1D_indices::collisionStep() { for (int x = 0; x < latticeWidth; x++) { for (int y = 0; y < latticeHeight; y++) { for (int z = 0; z < latticeDepth; z++) { int idx = getIdx(x, y, z); float macroDensity = calculateMacroscopicDensity(x, y, z); glm::vec3 macroVelocity = calculateMacroscopicVelocity(x, y, z, macroDensity); velocities[idx] = macroVelocity; #ifdef DRAW_VELOCITY_ARROWS velocityArrows.push_back(glm::vec3(x, y, z)); velocityArrows.push_back(glm::vec3(x, y, z) + velocities[idx] * 2.0f); #endif float leftTermMiddle = WEIGHT_MIDDLE * macroDensity; float leftTermAxis = WEIGHT_AXIS * macroDensity; float leftTermNonaxial = WEIGHT_NON_AXIAL * macroDensity; float macroVelocityDot = glm::dot(macroVelocity, macroVelocity); float thirdTerm = 1.5f * macroVelocityDot; float middleEq = leftTermMiddle + leftTermMiddle * (-thirdTerm); float dotProd = glm::dot(vRight, macroVelocity); float firstTerm = 3.0f * dotProd; float secondTerm = 4.5f * dotProd * dotProd; float rightEq = leftTermAxis + leftTermAxis * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(vLeft, macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float leftEq = leftTermAxis + leftTermAxis * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(vFront, macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float frontEq = leftTermAxis + leftTermAxis * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(vBack, macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float backEq = leftTermAxis + leftTermAxis * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(vTop, macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float topEq = leftTermAxis + leftTermAxis * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(vBottom, macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float bottomEq = leftTermAxis + leftTermAxis * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(vBackRight, macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float backRightEq = leftTermNonaxial + leftTermNonaxial * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(vBackLeft, macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float backLeftEq = leftTermNonaxial + leftTermNonaxial * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(vFrontRight, macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float frontRightEq = leftTermNonaxial + leftTermNonaxial * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(vFrontLeft, macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float frontLeftEq = leftTermNonaxial + leftTermNonaxial * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(vTopBack, macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float topBackEq = leftTermNonaxial + leftTermNonaxial * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(vTopFront, macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float topFrontEq = leftTermNonaxial + leftTermNonaxial * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(vBottomBack, macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float bottomBackEq = leftTermNonaxial + leftTermNonaxial * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(vBottomFront, macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float bottomFrontEq = leftTermNonaxial + leftTermNonaxial * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(vTopRight, macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float topRightEq = leftTermNonaxial + leftTermNonaxial * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(vTopLeft, macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float topLeftEq = leftTermNonaxial + leftTermNonaxial * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(vBottomRight, macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float bottomRightEq = leftTermNonaxial + leftTermNonaxial * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(vBottomLeft, macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float bottomLeftEq = leftTermNonaxial + leftTermNonaxial * (firstTerm + secondTerm - thirdTerm); if (useSubgridModel) { // SUBGRID MODEL - EXPERIMENTAL - GIVES INCORRECT VALUES float tensor[3][3]; float diffs[19]; diffs[0] = (backLattice[idx].adj[DIR_MIDDLE_VERTEX] - middleEq); diffs[1] = (backLattice[idx].adj[DIR_RIGHT_FACE] - rightEq); diffs[2] = (backLattice[idx].adj[DIR_LEFT_FACE] - leftEq); diffs[3] = (backLattice[idx].adj[DIR_BACK_FACE] - backEq); diffs[4] = (backLattice[idx].adj[DIR_FRONT_FACE] - frontEq); diffs[5] = (backLattice[idx].adj[DIR_TOP_FACE] - topEq); diffs[6] = (backLattice[idx].adj[DIR_BOTTOM_FACE] - bottomEq); diffs[7] = (backLattice[idx].adj[DIR_BACK_RIGHT_EDGE] - backRightEq); diffs[8] = (backLattice[idx].adj[DIR_BACK_LEFT_EDGE] - backLeftEq); diffs[9] = (backLattice[idx].adj[DIR_FRONT_RIGHT_EDGE] - frontRightEq); diffs[10] = (backLattice[idx].adj[DIR_FRONT_LEFT_EDGE] - frontLeftEq); diffs[11] = (backLattice[idx].adj[DIR_TOP_BACK_EDGE] - topBackEq); diffs[12] = (backLattice[idx].adj[DIR_TOP_FRONT_EDGE] - topFrontEq); diffs[13] = (backLattice[idx].adj[DIR_BOTTOM_BACK_EDGE] - bottomBackEq); diffs[14] = (backLattice[idx].adj[DIR_BOTTOM_FRONT_EDGE] - bottomFrontEq); diffs[15] = (backLattice[idx].adj[DIR_TOP_RIGHT_EDGE] - topRightEq); diffs[16] = (backLattice[idx].adj[DIR_TOP_LEFT_EDGE] - topLeftEq); diffs[17] = (backLattice[idx].adj[DIR_BOTTOM_RIGHT_EDGE] - bottomRightEq); diffs[18] = (backLattice[idx].adj[DIR_BOTTOM_LEFT_EDGE] - bottomLeftEq); float sum = 0.0f; for (int i = 0; i < 19; i++) { sum += diffs[i]; } for (int i = 0; i < 9; i++) { tensor[0][0] = 0.0f; } for (int i = 0; i < 19; i++) { tensor[0][0] += directionVectors3D[i].x * directionVectors3D[i].x * diffs[i]; tensor[0][1] += directionVectors3D[i].x * directionVectors3D[i].y * diffs[i]; tensor[0][2] += directionVectors3D[i].x * directionVectors3D[i].z * diffs[i]; tensor[1][0] += directionVectors3D[i].y * directionVectors3D[i].x * diffs[i]; tensor[1][1] += directionVectors3D[i].y * directionVectors3D[i].y * diffs[i]; tensor[1][2] += directionVectors3D[i].y * directionVectors3D[i].z * diffs[i]; tensor[2][0] += directionVectors3D[i].z * directionVectors3D[i].x * diffs[i]; tensor[2][1] += directionVectors3D[i].z * directionVectors3D[i].y * diffs[i]; tensor[2][2] += directionVectors3D[i].z * directionVectors3D[i].z * diffs[i]; } sum = 0.0f; for (int i = 0; i < 3; i++) { for (int j = 0; j < 3; j++) { sum += tensor[i][j] * tensor[i][j]; } } float S = (-nu + sqrtf(nu * nu + 18.0f * SMAG_C * sqrtf(sum))) / (6.0f * SMAG_C * SMAG_C); tau = 3.0f * (nu + SMAG_C * SMAG_C * S) + 0.5f; itau = 1.0f / tau; } backLattice[idx].adj[DIR_MIDDLE_VERTEX] -= itau * (backLattice[idx].adj[DIR_MIDDLE_VERTEX] - middleEq); backLattice[idx].adj[DIR_RIGHT_FACE] -= itau * (backLattice[idx].adj[DIR_RIGHT_FACE] - rightEq); backLattice[idx].adj[DIR_LEFT_FACE] -= itau * (backLattice[idx].adj[DIR_LEFT_FACE] - leftEq); backLattice[idx].adj[DIR_BACK_FACE] -= itau * (backLattice[idx].adj[DIR_BACK_FACE] - backEq); backLattice[idx].adj[DIR_FRONT_FACE] -= itau * (backLattice[idx].adj[DIR_FRONT_FACE] - frontEq); backLattice[idx].adj[DIR_TOP_FACE] -= itau * (backLattice[idx].adj[DIR_TOP_FACE] - topEq); backLattice[idx].adj[DIR_BOTTOM_FACE] -= itau * (backLattice[idx].adj[DIR_BOTTOM_FACE] - bottomEq); backLattice[idx].adj[DIR_BACK_RIGHT_EDGE] -= itau * (backLattice[idx].adj[DIR_BACK_RIGHT_EDGE] - backRightEq); backLattice[idx].adj[DIR_BACK_LEFT_EDGE] -= itau * (backLattice[idx].adj[DIR_BACK_LEFT_EDGE] - backLeftEq); backLattice[idx].adj[DIR_FRONT_RIGHT_EDGE] -= itau * (backLattice[idx].adj[DIR_FRONT_RIGHT_EDGE] - frontRightEq); backLattice[idx].adj[DIR_FRONT_LEFT_EDGE] -= itau * (backLattice[idx].adj[DIR_FRONT_LEFT_EDGE] - frontLeftEq); backLattice[idx].adj[DIR_TOP_BACK_EDGE] -= itau * (backLattice[idx].adj[DIR_TOP_BACK_EDGE] - topBackEq); backLattice[idx].adj[DIR_TOP_FRONT_EDGE] -= itau * (backLattice[idx].adj[DIR_TOP_FRONT_EDGE] - topFrontEq); backLattice[idx].adj[DIR_BOTTOM_BACK_EDGE] -= itau * (backLattice[idx].adj[DIR_BOTTOM_BACK_EDGE] - bottomBackEq); backLattice[idx].adj[DIR_BOTTOM_FRONT_EDGE] -= itau * (backLattice[idx].adj[DIR_BOTTOM_FRONT_EDGE] - bottomFrontEq); backLattice[idx].adj[DIR_TOP_RIGHT_EDGE] -= itau * (backLattice[idx].adj[DIR_TOP_RIGHT_EDGE] - topRightEq); backLattice[idx].adj[DIR_TOP_LEFT_EDGE] -= itau * (backLattice[idx].adj[DIR_TOP_LEFT_EDGE] - topLeftEq); backLattice[idx].adj[DIR_BOTTOM_RIGHT_EDGE] -= itau * (backLattice[idx].adj[DIR_BOTTOM_RIGHT_EDGE] - bottomRightEq); backLattice[idx].adj[DIR_BOTTOM_LEFT_EDGE] -= itau * (backLattice[idx].adj[DIR_BOTTOM_LEFT_EDGE] - bottomLeftEq); for (int i = 0; i < 19; i++) { if (backLattice[idx].adj[i] < 0.0f) { backLattice[idx].adj[i] = 0.0f; } else if (backLattice[idx].adj[i] > 1.0f) { backLattice[idx].adj[i] = 1.0f; } } } } } } void LBM3D_1D_indices::moveParticles() { glm::vec3 adjVelocities[8]; for (int i = 0; i < particleSystemLBM->numParticles; i++) { float x = particleVertices[i].x; float y = particleVertices[i].y; float z = particleVertices[i].z; int leftX = (int)x; int rightX = leftX + 1; int bottomY = (int)y; int topY = bottomY + 1; int backZ = (int)z; int frontZ = backZ + 1; adjVelocities[0] = velocities[getIdx(leftX, topY, backZ)]; adjVelocities[1] = velocities[getIdx(rightX, topY, backZ)]; adjVelocities[2] = velocities[getIdx(leftX, bottomY, backZ)]; adjVelocities[3] = velocities[getIdx(rightX, bottomY, backZ)]; adjVelocities[4] = velocities[getIdx(leftX, topY, frontZ)]; adjVelocities[5] = velocities[getIdx(rightX, topY, frontZ)]; adjVelocities[6] = velocities[getIdx(leftX, bottomY, frontZ)]; adjVelocities[7] = velocities[getIdx(rightX, bottomY, frontZ)]; float horizontalRatio = x - leftX; float verticalRatio = y - bottomY; float depthRatio = z - backZ; glm::vec3 topBackVelocity = adjVelocities[0] * horizontalRatio + adjVelocities[1] * (1.0f - horizontalRatio); glm::vec3 bottomBackVelocity = adjVelocities[2] * horizontalRatio + adjVelocities[3] * (1.0f - horizontalRatio); glm::vec3 backVelocity = bottomBackVelocity * verticalRatio + topBackVelocity * (1.0f - verticalRatio); glm::vec3 topFrontVelocity = adjVelocities[4] * horizontalRatio + adjVelocities[5] * (1.0f - horizontalRatio); glm::vec3 bottomFrontVelocity = adjVelocities[6] * horizontalRatio + adjVelocities[7] * (1.0f - horizontalRatio); glm::vec3 frontVelocity = bottomFrontVelocity * verticalRatio + topFrontVelocity * (1.0f - verticalRatio); glm::vec3 finalVelocity = backVelocity * depthRatio + frontVelocity * (1.0f - depthRatio); #ifdef DRAW_PARTICLE_VELOCITY_ARROWS particleArrows.push_back(particleVertices[i]); #endif particleVertices[i] += finalVelocity; #ifdef DRAW_PARTICLE_VELOCITY_ARROWS glm::vec3 tmp = particleVertices[i] + 10.0f * finalVelocity; particleArrows.push_back(tmp); #endif if (!respawnLinearly) { if (particleVertices[i].x <= 0.0f || particleVertices[i].x >= latticeWidth - 1 || particleVertices[i].y <= 0.0f || particleVertices[i].y >= latticeHeight - 1 || particleVertices[i].z <= 0.0f || particleVertices[i].z >= latticeDepth - 1) { particleVertices[i].x = 0.0f; particleVertices[i].y = rand(i, (int)y) * (latticeHeight - 1); particleVertices[i].z = rand(i, (int)z) * (latticeDepth - 1); //particleVertices[i].y = std::rand() % (latticeHeight - 1); //particleVertices[i].z = std::rand() % (latticeDepth - 1); } } else { if (particleVertices[i].x <= 0.0f || particleVertices[i].x >= latticeWidth - 1 || particleVertices[i].y <= 0.0f || particleVertices[i].y >= latticeHeight - 1 || particleVertices[i].z <= 0.0f || particleVertices[i].z >= latticeDepth - 1) { particleVertices[i] = glm::vec3(0.0f, respawnY, respawnZ++); if (respawnZ >= latticeDepth - 1) { respawnZ = 0; respawnY++; } if (respawnY >= latticeHeight - 1) { respawnY = 0; } } } } } void LBM3D_1D_indices::updateInlets() { float macroDensity = 1.0f; glm::vec3 macroVelocity = inletVelocity; float leftTermMiddle = WEIGHT_MIDDLE * macroDensity; float leftTermAxis = WEIGHT_AXIS * macroDensity; float leftTermNonaxial = WEIGHT_NON_AXIAL * macroDensity; float macroVelocityDot = glm::dot(macroVelocity, macroVelocity); float thirdTerm = 1.5f * macroVelocityDot; float middleEq = leftTermMiddle + leftTermMiddle * (-thirdTerm); float dotProd = glm::dot(vRight, macroVelocity); float firstTerm = 3.0f * dotProd; float secondTerm = 4.5f * dotProd * dotProd; float rightEq = leftTermAxis + leftTermAxis * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(vLeft, macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float leftEq = leftTermAxis + leftTermAxis * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(vFront, macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float frontEq = leftTermAxis + leftTermAxis * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(vBack, macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float backEq = leftTermAxis + leftTermAxis * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(vTop, macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float topEq = leftTermAxis + leftTermAxis * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(vBottom, macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float bottomEq = leftTermAxis + leftTermAxis * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(vBackRight, macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float backRightEq = leftTermNonaxial + leftTermNonaxial * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(vBackLeft, macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float backLeftEq = leftTermNonaxial + leftTermNonaxial * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(vFrontRight, macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float frontRightEq = leftTermNonaxial + leftTermNonaxial * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(vFrontLeft, macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float frontLeftEq = leftTermNonaxial + leftTermNonaxial * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(vTopBack, macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float topBackEq = leftTermNonaxial + leftTermNonaxial * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(vTopFront, macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float topFrontEq = leftTermNonaxial + leftTermNonaxial * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(vBottomBack, macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float bottomBackEq = leftTermNonaxial + leftTermNonaxial * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(vBottomFront, macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float bottomFrontEq = leftTermNonaxial + leftTermNonaxial * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(vTopRight, macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float topRightEq = leftTermNonaxial + leftTermNonaxial * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(vTopLeft, macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float topLeftEq = leftTermNonaxial + leftTermNonaxial * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(vBottomRight, macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float bottomRightEq = leftTermNonaxial + leftTermNonaxial * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(vBottomLeft, macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float bottomLeftEq = leftTermNonaxial + leftTermNonaxial * (firstTerm + secondTerm - thirdTerm); for (int z = 0; z < latticeDepth; z++) { for (int y = 0; y < latticeHeight; y++) { int idx = getIdx(0, y, z); backLattice[idx].adj[DIR_MIDDLE_VERTEX] = middleEq; backLattice[idx].adj[DIR_RIGHT_FACE] = rightEq; backLattice[idx].adj[DIR_LEFT_FACE] = leftEq; backLattice[idx].adj[DIR_BACK_FACE] = backEq; backLattice[idx].adj[DIR_FRONT_FACE] = frontEq; backLattice[idx].adj[DIR_TOP_FACE] = topEq; backLattice[idx].adj[DIR_BOTTOM_FACE] = bottomEq; backLattice[idx].adj[DIR_BACK_RIGHT_EDGE] = backRightEq; backLattice[idx].adj[DIR_BACK_LEFT_EDGE] = backLeftEq; backLattice[idx].adj[DIR_FRONT_RIGHT_EDGE] = frontRightEq; backLattice[idx].adj[DIR_FRONT_LEFT_EDGE] = frontLeftEq; backLattice[idx].adj[DIR_TOP_BACK_EDGE] = topBackEq; backLattice[idx].adj[DIR_TOP_FRONT_EDGE] = topFrontEq; backLattice[idx].adj[DIR_BOTTOM_BACK_EDGE] = bottomBackEq; backLattice[idx].adj[DIR_BOTTOM_FRONT_EDGE] = bottomFrontEq; backLattice[idx].adj[DIR_TOP_RIGHT_EDGE] = topRightEq; backLattice[idx].adj[DIR_TOP_LEFT_EDGE] = topLeftEq; backLattice[idx].adj[DIR_BOTTOM_RIGHT_EDGE] = bottomRightEq; backLattice[idx].adj[DIR_BOTTOM_LEFT_EDGE] = bottomLeftEq; for (int i = 0; i < 19; i++) { if (backLattice[idx].adj[i] < 0.0f) { backLattice[idx].adj[i] = 0.0f; } else if (backLattice[idx].adj[i] > 1.0f) { backLattice[idx].adj[i] = 1.0f; } } } } } void LBM3D_1D_indices::updateColliders() { for (int x = 0; x < latticeWidth; x++) { for (int y = 0; y < latticeHeight; y++) { for (int z = 0; z < latticeDepth; z++) { int idx = getIdx(x, y, z); if ((heightMap->data[x + z * heightMap->width] >= y && heightMap->data[x + z * heightMap->width] > 0.01f) || y == 0) { float right = backLattice[idx].adj[DIR_RIGHT_FACE]; float left = backLattice[idx].adj[DIR_LEFT_FACE]; float back = backLattice[idx].adj[DIR_BACK_FACE]; float front = backLattice[idx].adj[DIR_FRONT_FACE]; float top = backLattice[idx].adj[DIR_TOP_FACE]; float bottom = backLattice[idx].adj[DIR_BOTTOM_FACE]; float backRight = backLattice[idx].adj[DIR_BACK_RIGHT_EDGE]; float backLeft = backLattice[idx].adj[DIR_BACK_LEFT_EDGE]; float frontRight = backLattice[idx].adj[DIR_FRONT_RIGHT_EDGE]; float frontLeft = backLattice[idx].adj[DIR_FRONT_LEFT_EDGE]; float topBack = backLattice[idx].adj[DIR_TOP_BACK_EDGE]; float topFront = backLattice[idx].adj[DIR_TOP_FRONT_EDGE]; float bottomBack = backLattice[idx].adj[DIR_BOTTOM_BACK_EDGE]; float bottomFront = backLattice[idx].adj[DIR_BOTTOM_FRONT_EDGE]; float topRight = backLattice[idx].adj[DIR_TOP_RIGHT_EDGE]; float topLeft = backLattice[idx].adj[DIR_TOP_LEFT_EDGE]; float bottomRight = backLattice[idx].adj[DIR_BOTTOM_RIGHT_EDGE]; float bottomLeft = backLattice[idx].adj[DIR_BOTTOM_LEFT_EDGE]; backLattice[idx].adj[DIR_RIGHT_FACE] = left; backLattice[idx].adj[DIR_LEFT_FACE] = right; backLattice[idx].adj[DIR_BACK_FACE] = front; backLattice[idx].adj[DIR_FRONT_FACE] = back; backLattice[idx].adj[DIR_TOP_FACE] = bottom; backLattice[idx].adj[DIR_BOTTOM_FACE] = top; backLattice[idx].adj[DIR_BACK_RIGHT_EDGE] = frontLeft; backLattice[idx].adj[DIR_BACK_LEFT_EDGE] = frontRight; backLattice[idx].adj[DIR_FRONT_RIGHT_EDGE] = backLeft; backLattice[idx].adj[DIR_FRONT_LEFT_EDGE] = backRight; backLattice[idx].adj[DIR_TOP_BACK_EDGE] = bottomFront; backLattice[idx].adj[DIR_TOP_FRONT_EDGE] = bottomBack; backLattice[idx].adj[DIR_BOTTOM_BACK_EDGE] = topFront; backLattice[idx].adj[DIR_BOTTOM_FRONT_EDGE] = topBack; backLattice[idx].adj[DIR_TOP_RIGHT_EDGE] = bottomLeft; backLattice[idx].adj[DIR_TOP_LEFT_EDGE] = bottomRight; backLattice[idx].adj[DIR_BOTTOM_RIGHT_EDGE] = topLeft; backLattice[idx].adj[DIR_BOTTOM_LEFT_EDGE] = topRight; } } } } } void LBM3D_1D_indices::resetSimulation() { cout << "Resetting simulation..." << endl; //particleSystemLBM->initParticlePositions(latticeWidth, latticeHeight, latticeDepth, heightMap); /* for (int i = 0; i < latticeWidth * latticeHeight; i++) { for (int j = 0; j < 19; j++) { backLattice[i].adj[j] = 0.0f; } velocities[i] = glm::vec3(0.0f); } initLattice(); */ /* hipMemcpy(d_frontLattice, frontLattice, sizeof(Node3D) * latticeSize, hipMemcpyHostToDevice); hipMemcpy(d_backLattice, backLattice, sizeof(Node3D) * latticeSize, hipMemcpyHostToDevice); hipMemcpy(d_velocities, velocities, sizeof(glm::vec3) * latticeSize, hipMemcpyHostToDevice); */ initLattice(); } void LBM3D_1D_indices::synchronize() { hipDeviceSynchronize(); } float LBM3D_1D_indices::getWorldWidth() { return scale * latticeWidth; } float LBM3D_1D_indices::getWorldHeight() { return scale * latticeHeight; } float LBM3D_1D_indices::getWorldDepth() { return scale * latticeDepth; } void LBM3D_1D_indices::snapToGround() { if (!heightMap) { return; } float ww = getWorldWidth(); float wd = getWorldDepth(); float miny = heightMap->getHeight(position.x, position.z); miny = min(miny, heightMap->getHeight(position.x + ww, position.z)); miny = min(miny, heightMap->getHeight(position.x + ww, position.z + wd)); miny = min(miny, heightMap->getHeight(position.x, position.z + wd)); position.y = miny; } const char * LBM3D_1D_indices::getRespawnModeString(int mode) { switch (mode) { case eRespawnMode::CYCLE_ALL: return "Cycle All (x, y, z)"; case eRespawnMode::CYCLE_XZ: return "Cycle x and z"; case eRespawnMode::RANDOM_UNIFORM: return "Random (Uniform)"; default: return "None"; } } glm::mat4 LBM3D_1D_indices::getModelMatrix() { glm::mat4 model(1.0f); model = glm::translate(model, position); model = glm::scale(model, glm::vec3(scale)); return model; } glm::mat4 LBM3D_1D_indices::getPrevStateModelMatrix() { glm::mat4 model(1.0f); model = glm::translate(model, prevState.position); model = glm::scale(model, glm::vec3(prevState.scale)); return model; } void LBM3D_1D_indices::mapVBO(GLuint VBO) { //res = cudaParticleVerticesVBO; //cudaParticleColorsVBO = res; //cout << "CUDA mapping VBO" << endl; CHECK_ERROR(hipGraphicsGLRegisterBuffer(&cudaParticleVerticesVBO, VBO, hipGraphicsMapFlagsWriteDiscard)); // returns out of memory error (even though it shouldn't according to documentation } void LBM3D_1D_indices::initBuffers() { #ifdef DRAW_VELOCITY_ARROWS // Velocity arrows glGenVertexArrays(1, &velocityVAO); glBindVertexArray(velocityVAO); glGenBuffers(1, &velocityVBO); glBindBuffer(GL_ARRAY_BUFFER, velocityVBO); glEnableVertexAttribArray(0); glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, sizeof(glm::vec3), (void *)0); glBindVertexArray(0); #endif #ifdef DRAW_PARTICLE_VELOCITY_ARROWS // Particle arrows glGenVertexArrays(1, &particleArrowsVAO); glBindVertexArray(particleArrowsVAO); glGenBuffers(1, &particleArrowsVBO); glBindBuffer(GL_ARRAY_BUFFER, particleArrowsVBO); glEnableVertexAttribArray(0); glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, sizeof(glm::vec3), (void *)0); #endif glBindVertexArray(0); } void LBM3D_1D_indices::initLattice() { CHECK_ERROR(hipMemset(d_backLattice, 0, sizeof(Node3D) * latticeSize)); CHECK_ERROR(hipMemset(d_velocities, 0, sizeof(glm::vec3) * latticeSize)); initLatticeKernel << <gridDim, blockDim >> > (d_frontLattice); /* for (int x = 0; x < latticeWidth; x++) { for (int y = 0; y < latticeHeight; y++) { for (int z = 0; z < latticeDepth; z++) { int idx = getIdx(x, y, z); frontLattice[idx].adj[DIR_MIDDLE_VERTEX] = WEIGHT_MIDDLE; for (int i = 1; i <= 6; i++) { frontLattice[idx].adj[i] = WEIGHT_AXIS; } for (int i = 7; i <= 18; i++) { frontLattice[idx].adj[i] = WEIGHT_NON_AXIAL; } } } } */ } void LBM3D_1D_indices::swapLattices() { // CPU Node3D *tmp = frontLattice; frontLattice = backLattice; backLattice = tmp; // GPU tmp = d_frontLattice; d_frontLattice = d_backLattice; d_backLattice = tmp; } float LBM3D_1D_indices::calculateMacroscopicDensity(int x, int y, int z) { float macroDensity = 0.0f; int idx = getIdx(x, y, z); for (int i = 0; i < 19; i++) { macroDensity += backLattice[idx].adj[i]; } return macroDensity; } glm::vec3 LBM3D_1D_indices::calculateMacroscopicVelocity(int x, int y, int z, float macroDensity) { glm::vec3 macroVelocity = glm::vec3(0.0f, 0.0f, 0.0f); int idx = getIdx(x, y, z); macroVelocity += vLeft * backLattice[idx].adj[DIR_LEFT_FACE]; macroVelocity += vFront * backLattice[idx].adj[DIR_FRONT_FACE]; macroVelocity += vBottom * backLattice[idx].adj[DIR_BOTTOM_FACE]; macroVelocity += vFrontLeft * backLattice[idx].adj[DIR_FRONT_LEFT_EDGE]; macroVelocity += vBackLeft * backLattice[idx].adj[DIR_BACK_LEFT_EDGE]; macroVelocity += vBottomLeft * backLattice[idx].adj[DIR_BOTTOM_LEFT_EDGE]; macroVelocity += vTopLeft * backLattice[idx].adj[DIR_TOP_LEFT_EDGE]; macroVelocity += vBottomFront * backLattice[idx].adj[DIR_BOTTOM_FRONT_EDGE]; macroVelocity += vTopFront * backLattice[idx].adj[DIR_TOP_FRONT_EDGE]; macroVelocity += vRight * backLattice[idx].adj[DIR_RIGHT_FACE]; macroVelocity += vBack * backLattice[idx].adj[DIR_BACK_FACE]; macroVelocity += vTop * backLattice[idx].adj[DIR_TOP_FACE]; macroVelocity += vBackRight * backLattice[idx].adj[DIR_BACK_RIGHT_EDGE]; macroVelocity += vFrontRight * backLattice[idx].adj[DIR_FRONT_RIGHT_EDGE]; macroVelocity += vTopRight * backLattice[idx].adj[DIR_TOP_RIGHT_EDGE]; macroVelocity += vBottomRight * backLattice[idx].adj[DIR_BOTTOM_RIGHT_EDGE]; macroVelocity += vTopBack * backLattice[idx].adj[DIR_TOP_BACK_EDGE]; macroVelocity += vBottomBack * backLattice[idx].adj[DIR_BOTTOM_BACK_EDGE]; macroVelocity /= macroDensity; return macroVelocity; } void LBM3D_1D_indices::saveState() { prevState.position = position; prevState.scale = scale; } void LBM3D_1D_indices::resetToPrevState() { position = prevState.position; scale = prevState.scale; }
9ab4c362af3a7be584e2b22e121cab24aa5caf69.cu
#include "LBM3D_1D_indices.h" #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <glm\gtx\norm.hpp> #include <glm\gtc\matrix_transform.hpp> #include <iostream> #include "CUDAUtils.cuh" #include "ParticleSystem.h" #include "StreamlineParticleSystem.h" __constant__ int d_latticeWidth; //!< Lattice width constant on the device __constant__ int d_latticeHeight; //!< Lattice height constant on the device __constant__ int d_latticeDepth; //!< Lattice depth constant on the device __constant__ int d_latticeSize; //!< Lattice size constant on the device (latticeWidth * latticeHeight * latticeDepth) __constant__ float d_tau; //!< Tau value on the device __constant__ float d_itau; //!< Inverse tau value (1.0f / tau) on the device __constant__ float d_worldSizeRatio; __constant__ glm::vec3 d_position; __device__ int d_respawnY = 0; //!< Respawn y coordinate on the device, not used (random respawn now used) __device__ int d_respawnZ = 0; //!< Respawn z coordinate on the device, not used (random respawn now used) __constant__ glm::vec3 dirVectorsConst[19]; //! Returns the flattened index using the device constants and provided coordinates. __device__ int getIdxKer(int x, int y, int z) { return (x + d_latticeWidth * (y + d_latticeHeight * z)); } //! Returns uniform random between 0.0 and 1.0. Provided from different student's work. __device__ __host__ float rand(int x, int y) { int n = x + y * 57; n = (n << 13) ^ n; return ((1.0f - ((n * (n * n * 15731 + 789221) + 1376312589) & 0x7fffffff) / 1073741824.0f) + 1.0f) * 0.5f; } ///// Maps the value to the viridis color map. //__device__ glm::vec3 mapToViridis3D(float val) { // val = glm::clamp(val, 0.0f, 1.0f); // int discreteVal = (int)(val * 255.0f); // return glm::vec3(viridis_cm[discreteVal][0], viridis_cm[discreteVal][1], viridis_cm[discreteVal][2]); //} //! Maps the world position vector to the lattice position vector. __device__ glm::vec3 getLatticePosition(glm::vec3 worldPosition) { // TODO - offsets (model matrix?), maybe even scaling (model matrix scale) worldPosition -= d_position; return (worldPosition / d_worldSizeRatio); } //! Maps the lattice position vector to world position vector. __device__ glm::vec3 getWorldPosition(glm::vec3 latticePosition) { latticePosition *= d_worldSizeRatio; return (latticePosition + d_position); } //! Moves the streamline particles. __global__ void moveStreamlineParticlesKernel(glm::vec3 *streamlineVertices, glm::vec3 *velocities, int *streamlineLengths, int maxStreamlineLength, int maxNumStreamlines, int respawnMode, int outOfBoundsMode, float velocityMultiplier = 1.0f, bool useCorrectInterpolation = true) { int idx = threadIdx.x + blockDim.x * threadIdx.y; // idx in block idx += blockDim.x * blockDim.y * blockIdx.x; glm::vec3 adjVelocities[8]; while (idx < maxNumStreamlines) { if (streamlineLengths[idx] >= maxStreamlineLength) { idx += blockDim.x * blockDim.y * gridDim.x; continue; } int off = idx * maxStreamlineLength + streamlineLengths[idx]; // buffer offset for current vertex of the streamline with idx glm::vec3 pos = getLatticePosition(streamlineVertices[off]); if (pos.x < 0.0f || pos.x > d_latticeWidth - 1 || pos.y < 0.0f || pos.y > d_latticeHeight - 1 || pos.z < 0.0f || pos.z > d_latticeDepth - 1) { //streamlineLengths[idx] = maxStreamlineLength; // so we do not try to draw it again // we actually want to remember the streamline lengths so we can then render the lines cleanly idx += blockDim.x * blockDim.y * gridDim.x; continue; } int leftX = (int)pos.x; int rightX = leftX + 1; if (rightX > d_latticeWidth - 1) { rightX = 0; } int bottomY = (int)pos.y; int topY = bottomY + 1; if (topY > d_latticeHeight - 1) { topY = 0; } int frontZ = (int)pos.z; int backZ = frontZ + 1; if (backZ > d_latticeDepth - 1) { backZ = 0; } adjVelocities[0] = velocities[getIdxKer(leftX, topY, frontZ)]; // 0: V010 adjVelocities[1] = velocities[getIdxKer(rightX, topY, frontZ)]; // 1: V110 adjVelocities[2] = velocities[getIdxKer(leftX, bottomY, frontZ)]; // 2: V000 adjVelocities[3] = velocities[getIdxKer(rightX, bottomY, frontZ)]; // 3: V100 adjVelocities[4] = velocities[getIdxKer(leftX, topY, backZ)]; // 4: V011 adjVelocities[5] = velocities[getIdxKer(rightX, topY, backZ)]; // 5: V111 adjVelocities[6] = velocities[getIdxKer(leftX, bottomY, backZ)]; // 6: V001 adjVelocities[7] = velocities[getIdxKer(rightX, bottomY, backZ)]; // 7: V101 float horizontalRatio = pos.x - (float)leftX; float verticalRatio = pos.y - (float)bottomY; float depthRatio = pos.z - (float)frontZ; glm::vec3 finalVelocity; if (useCorrectInterpolation) { finalVelocity = adjVelocities[2] * (1.0f - horizontalRatio) * (1.0f - verticalRatio) * (1.0f - depthRatio) + adjVelocities[3] * horizontalRatio * (1.0f - verticalRatio) * (1.0f - depthRatio) + adjVelocities[0] * (1.0f - horizontalRatio) * verticalRatio * (1.0f - depthRatio) + adjVelocities[6] * (1.0f - horizontalRatio) * (1.0f - verticalRatio) * depthRatio + adjVelocities[7] * horizontalRatio * (1.0f - verticalRatio) * depthRatio + adjVelocities[4] * (1.0f - horizontalRatio) * verticalRatio * depthRatio + adjVelocities[1] * horizontalRatio * verticalRatio * (1.0f - depthRatio) + adjVelocities[5] * horizontalRatio * verticalRatio * depthRatio; } else { glm::vec3 topBackVelocity = adjVelocities[0] * horizontalRatio + adjVelocities[1] * (1.0f - horizontalRatio); glm::vec3 bottomBackVelocity = adjVelocities[2] * horizontalRatio + adjVelocities[3] * (1.0f - horizontalRatio); glm::vec3 backVelocity = bottomBackVelocity * verticalRatio + topBackVelocity * (1.0f - verticalRatio); glm::vec3 topFrontVelocity = adjVelocities[4] * horizontalRatio + adjVelocities[5] * (1.0f - horizontalRatio); glm::vec3 bottomFrontVelocity = adjVelocities[6] * horizontalRatio + adjVelocities[7] * (1.0f - horizontalRatio); glm::vec3 frontVelocity = bottomFrontVelocity * verticalRatio + topFrontVelocity * (1.0f - verticalRatio); finalVelocity = backVelocity * depthRatio + frontVelocity * (1.0f - depthRatio); } finalVelocity *= velocityMultiplier; pos += finalVelocity; streamlineVertices[off + 1] = getWorldPosition(pos); streamlineLengths[idx]++; idx += blockDim.x * blockDim.y * gridDim.x; } } //! Kernel for moving particles that uses OpenGL interoperability. /*! If the particles venture beyond the simulation bounding volume, they are respawned. Out of bounds mode is not used (not implemented) yet. \param[in] particleVertices Vertices (positions stored in VBO) of particles to be updated/moved. \param[in] velocities Array of velocities that will act on the particles. \param[in] numActiveParticles Number of active particles that should be moved. \param[in] particleColors --- OLD --- VBO of particle colors. \param[in] respawnMode Determines how the particles are respawned. \param[in] outOfBoundsMode --- NOT IMPLEMENTED --- Determines how are particles that are out of bounds treated. \param[in] velocityMultiplier Artifical multiplier that determines how much the particles are moved. \param[in] useCorrectInterpolation Determines mode of trilinear interpolation. */ __global__ void moveParticlesKernelInteropNew2(glm::vec3 *particleVertices, glm::vec3 *velocities, /*int *numParticles*/ int numActiveParticles, glm::vec3 *particleColors, int respawnMode, int outOfBoundsMode, float velocityMultiplier = 1.0f, bool useCorrectInterpolation = true) { int idx = threadIdx.x + blockDim.x * threadIdx.y; // idx in block idx += blockDim.x * blockDim.y * blockIdx.x; glm::vec3 adjVelocities[8]; while (idx < numActiveParticles) { //if (isnan(particleVertices[idx].x) || isnan(particleVertices[idx].y) || isnan(particleVertices[idx].z) || // isinf(particleVertices[idx].x) || isinf(particleVertices[idx].y) || isinf(particleVertices[idx].z)) { // particleVertices[idx] = glm::vec3(0.0f); // continue; //} glm::vec3 pos = getLatticePosition(particleVertices[idx]); if (isnan(pos.x) || isnan(pos.y) || isnan(pos.z) || isinf(pos.x) || isinf(pos.y) || isinf(pos.z)) { particleVertices[idx] = glm::vec3(0.0f); continue; } if (pos.x < 0.0f || pos.x > d_latticeWidth - 1 || pos.y < 0.0f || pos.y > d_latticeHeight - 1 || pos.z < 0.0f || pos.z > d_latticeDepth - 1) { if (respawnMode <= 1) { if (pos.x < 0.0f || pos.x > d_latticeWidth - 1) { pos.x = fmodf(pos.x + d_latticeWidth - 1, d_latticeWidth - 1); } if (respawnMode == 0) { if (pos.y < 0.0f || pos.y > d_latticeHeight - 1) { pos.y = fmodf(pos.y + d_latticeHeight - 1, d_latticeHeight - 1); } } else { if (pos.y < 0.0f) { pos.y = 0.0f; } if (pos.y > d_latticeHeight - 1) { // respawn pos.x = 0.0f; pos.y = rand(idx, pos.y) * (d_latticeHeight - 1); pos.z = rand(idx, pos.z) * (d_latticeDepth - 1); } } if (pos.z < 0.0f || pos.z > d_latticeDepth - 1) { pos.z = fmodf(pos.z + d_latticeDepth - 1, d_latticeDepth - 1); } } else { //pos.x = 0.0f; pos.x = fmodf(pos.x + d_latticeWidth - 1, d_latticeWidth - 1); pos.y = fmodf(pos.y + d_latticeHeight - 1, d_latticeHeight - 1); pos.z = rand(idx, pos.z) * (d_latticeDepth - 1); } } int leftX = (int)pos.x; if (leftX < 0) { leftX = d_latticeWidth - 1; } int rightX = leftX + 1; if (rightX > d_latticeWidth - 1) { rightX = 0; } int bottomY = (int)pos.y; if (bottomY < 0) { bottomY = d_latticeHeight - 1; } int topY = bottomY + 1; if (topY > d_latticeHeight - 1) { topY = 0; } int frontZ = (int)pos.z; if (frontZ < 0) { frontZ = d_latticeDepth - 1; } int backZ = frontZ + 1; if (backZ > d_latticeDepth - 1) { backZ = 0; } adjVelocities[0] = velocities[getIdxKer(leftX, topY, frontZ)]; // 0: V010 adjVelocities[1] = velocities[getIdxKer(rightX, topY, frontZ)]; // 1: V110 adjVelocities[2] = velocities[getIdxKer(leftX, bottomY, frontZ)]; // 2: V000 adjVelocities[3] = velocities[getIdxKer(rightX, bottomY, frontZ)]; // 3: V100 adjVelocities[4] = velocities[getIdxKer(leftX, topY, backZ)]; // 4: V011 adjVelocities[5] = velocities[getIdxKer(rightX, topY, backZ)]; // 5: V111 adjVelocities[6] = velocities[getIdxKer(leftX, bottomY, backZ)]; // 6: V001 adjVelocities[7] = velocities[getIdxKer(rightX, bottomY, backZ)]; // 7: V101 float horizontalRatio = pos.x - (float)leftX; float verticalRatio = pos.y - (float)bottomY; float depthRatio = pos.z - (float)frontZ; glm::vec3 finalVelocity; if (useCorrectInterpolation) { finalVelocity = adjVelocities[2] * (1.0f - horizontalRatio) * (1.0f - verticalRatio) * (1.0f - depthRatio) + adjVelocities[3] * horizontalRatio * (1.0f - verticalRatio) * (1.0f - depthRatio) + adjVelocities[0] * (1.0f - horizontalRatio) * verticalRatio * (1.0f - depthRatio) + adjVelocities[6] * (1.0f - horizontalRatio) * (1.0f - verticalRatio) * depthRatio + adjVelocities[7] * horizontalRatio * (1.0f - verticalRatio) * depthRatio + adjVelocities[4] * (1.0f - horizontalRatio) * verticalRatio * depthRatio + adjVelocities[1] * horizontalRatio * verticalRatio * (1.0f - depthRatio) + adjVelocities[5] * horizontalRatio * verticalRatio * depthRatio; /* glm::vec3 topBackVelocity = adjVelocities[1] * horizontalRatio + adjVelocities[0] * (1.0f - horizontalRatio); glm::vec3 bottomBackVelocity = adjVelocities[3] * horizontalRatio + adjVelocities[2] * (1.0f - horizontalRatio); glm::vec3 backVelocity = topBackVelocity * verticalRatio + bottomBackVelocity * (1.0f - verticalRatio); glm::vec3 topFrontVelocity = adjVelocities[5] * horizontalRatio + adjVelocities[4] * (1.0f - horizontalRatio); glm::vec3 bottomFrontVelocity = adjVelocities[7] * horizontalRatio + adjVelocities[6] * (1.0f - horizontalRatio); glm::vec3 frontVelocity = topFrontVelocity * verticalRatio + bottomFrontVelocity * (1.0f - verticalRatio); finalVelocity = frontVelocity * depthRatio + backVelocity * (1.0f - depthRatio); */ } else { glm::vec3 topBackVelocity = adjVelocities[0] * horizontalRatio + adjVelocities[1] * (1.0f - horizontalRatio); glm::vec3 bottomBackVelocity = adjVelocities[2] * horizontalRatio + adjVelocities[3] * (1.0f - horizontalRatio); glm::vec3 backVelocity = bottomBackVelocity * verticalRatio + topBackVelocity * (1.0f - verticalRatio); glm::vec3 topFrontVelocity = adjVelocities[4] * horizontalRatio + adjVelocities[5] * (1.0f - horizontalRatio); glm::vec3 bottomFrontVelocity = adjVelocities[6] * horizontalRatio + adjVelocities[7] * (1.0f - horizontalRatio); glm::vec3 frontVelocity = bottomFrontVelocity * verticalRatio + topFrontVelocity * (1.0f - verticalRatio); finalVelocity = backVelocity * depthRatio + frontVelocity * (1.0f - depthRatio); } finalVelocity *= velocityMultiplier; pos += finalVelocity; particleVertices[idx] = getWorldPosition(pos); idx += blockDim.x * blockDim.y * gridDim.x; //if (isnan(particleVertices[idx].x) || isnan(particleVertices[idx].y) || isnan(particleVertices[idx].z) || // isinf(particleVertices[idx].x) || isinf(particleVertices[idx].y) || isinf(particleVertices[idx].z)) { // //printf("oh no!"); // particleVertices[idx] = glm::vec3(0.0f); // continue; //} } } __global__ void moveParticlesKernelInteropNew(glm::vec3 *particleVertices, glm::vec3 *velocities, /*int *numParticles*/ int numActiveParticles, glm::vec3 *particleColors, int respawnMode, int outOfBoundsMode, float velocityMultiplier = 1.0f, bool useCorrectInterpolation = true) { int idx = threadIdx.x + blockDim.x * threadIdx.y; // idx in block idx += blockDim.x * blockDim.y * blockIdx.x; glm::vec3 adjVelocities[8]; while (idx < numActiveParticles) { glm::vec3 pos = particleVertices[idx]; if (pos.x < 0.0f || pos.x > d_latticeWidth - 2 || pos.y < 0.0f || pos.y > d_latticeHeight - 2 || pos.z < 0.0f || pos.z > d_latticeDepth - 2) { if (respawnMode == 0) { if (pos.x < 0.0f || pos.x > d_latticeWidth - 2) { //pos.x = (float)((__float2int_rd(pos.x) + d_latticeWidth - 2) % (d_latticeWidth - 2)); pos.x = fmodf(pos.x + d_latticeWidth - 2, d_latticeWidth - 2); } if (pos.y < 0.0f) { pos.y = 0.0f; } if (pos.y > d_latticeHeight - 2) { // respawn pos.x = 0.0f; pos.y = rand(idx, pos.y) * (d_latticeHeight - 2); pos.z = rand(idx, pos.z) * (d_latticeDepth - 2); } if (pos.z < 0.0f || pos.z > d_latticeDepth - 2) { //pos.z = (float)((__float2int_rd(pos.z) + d_latticeDepth - 2) % (d_latticeDepth - 2)); pos.z = fmodf(pos.z + d_latticeDepth - 2, d_latticeDepth - 2); } } else { //pos.x = 0.0f; pos.x = fmodf(pos.x + d_latticeWidth - 2, d_latticeWidth - 2); //pos.y = (float)((__float2int_rd(pos.y) + d_latticeHeight - 2) % (d_latticeHeight - 2)); pos.y = fmodf(pos.y + d_latticeHeight - 2, d_latticeHeight - 2); pos.z = rand(idx, pos.z) * (d_latticeDepth - 2); } } int leftX = (int)pos.x; int rightX = leftX + 1; int bottomY = (int)pos.y; int topY = bottomY + 1; int frontZ = (int)pos.z; int backZ = frontZ + 1; adjVelocities[0] = velocities[getIdxKer(leftX, topY, frontZ)]; adjVelocities[1] = velocities[getIdxKer(rightX, topY, frontZ)]; adjVelocities[2] = velocities[getIdxKer(leftX, bottomY, frontZ)]; adjVelocities[3] = velocities[getIdxKer(rightX, bottomY, frontZ)]; adjVelocities[4] = velocities[getIdxKer(leftX, topY, backZ)]; adjVelocities[5] = velocities[getIdxKer(rightX, topY, backZ)]; adjVelocities[6] = velocities[getIdxKer(leftX, bottomY, backZ)]; adjVelocities[7] = velocities[getIdxKer(rightX, bottomY, backZ)]; float horizontalRatio = pos.x - leftX; float verticalRatio = pos.y - bottomY; float depthRatio = pos.z - frontZ; glm::vec3 finalVelocity; if (useCorrectInterpolation) { glm::vec3 topBackVelocity = adjVelocities[1] * horizontalRatio + adjVelocities[0] * (1.0f - horizontalRatio); glm::vec3 bottomBackVelocity = adjVelocities[3] * horizontalRatio + adjVelocities[2] * (1.0f - horizontalRatio); glm::vec3 backVelocity = topBackVelocity * verticalRatio + bottomBackVelocity * (1.0f - verticalRatio); glm::vec3 topFrontVelocity = adjVelocities[5] * horizontalRatio + adjVelocities[4] * (1.0f - horizontalRatio); glm::vec3 bottomFrontVelocity = adjVelocities[7] * horizontalRatio + adjVelocities[6] * (1.0f - horizontalRatio); glm::vec3 frontVelocity = topFrontVelocity * verticalRatio + bottomFrontVelocity * (1.0f - verticalRatio); finalVelocity = frontVelocity * depthRatio + backVelocity * (1.0f - depthRatio); } else { glm::vec3 topBackVelocity = adjVelocities[0] * horizontalRatio + adjVelocities[1] * (1.0f - horizontalRatio); glm::vec3 bottomBackVelocity = adjVelocities[2] * horizontalRatio + adjVelocities[3] * (1.0f - horizontalRatio); glm::vec3 backVelocity = bottomBackVelocity * verticalRatio + topBackVelocity * (1.0f - verticalRatio); glm::vec3 topFrontVelocity = adjVelocities[4] * horizontalRatio + adjVelocities[5] * (1.0f - horizontalRatio); glm::vec3 bottomFrontVelocity = adjVelocities[6] * horizontalRatio + adjVelocities[7] * (1.0f - horizontalRatio); glm::vec3 frontVelocity = bottomFrontVelocity * verticalRatio + topFrontVelocity * (1.0f - verticalRatio); finalVelocity = backVelocity * depthRatio + frontVelocity * (1.0f - depthRatio); } finalVelocity *= velocityMultiplier; pos += finalVelocity; particleVertices[idx] = pos; idx += blockDim.x * blockDim.y * gridDim.x; } } //! Kernel for moving particles that uses OpenGL interoperability. /*! Kernel for moving particles that uses OpenGL interoperability for setting particle positions and colors. If the particles venture beyond the simulation bounding volume, they are randomly respawned. If we use side mirroring (cycling), particles that go beyond side walls (on the z axis) will be mirrored/cycled to the other side of the bounding volume. \param[in] particleVertices Vertices (positions stored in VBO) of particles to be updated/moved. \param[in] velocities Array of velocities that will act on the particles. \param[in] numParticles Number of particles. \param[in] particleColors VBO of particle colors. */ __global__ void moveParticlesKernelInterop(glm::vec3 *particleVertices, glm::vec3 *velocities, /*int *numParticles*/ int numActiveParticles, glm::vec3 *particleColors, int respawnMode, int outOfBoundsMode) { int idx = threadIdx.x + blockDim.x * threadIdx.y; // idx in block idx += blockDim.x * blockDim.y * blockIdx.x; glm::vec3 adjVelocities[8]; while (idx < numActiveParticles) { // SOLVES CRASHES WITH STLP if (particleVertices[idx].x < 0.0f || particleVertices[idx].x > d_latticeWidth - 1 || particleVertices[idx].y < 0.0f || particleVertices[idx].y > d_latticeHeight - 1 || particleVertices[idx].z < 0.0f || particleVertices[idx].z > d_latticeDepth - 1) { if (outOfBoundsMode == 0) { idx += blockDim.x * blockDim.y * gridDim.x; continue; // beware - while cycle goes through multiple particles! } particleVertices[idx].x = 0.0f; //particleVertices[idx].y = y; particleVertices[idx].y = rand(idx, particleVertices[idx].y) * (d_latticeHeight - 1); //particleVertices[idx].z = z; particleVertices[idx].z = rand(idx, particleVertices[idx].z) * (d_latticeDepth - 1); //particleVertices[idx].y = d_respawnY; //particleVertices[idx].z = d_respawnZ++; } float x = particleVertices[idx].x; float y = particleVertices[idx].y; float z = particleVertices[idx].z; int leftX = (int)x; int rightX = leftX + 1; int bottomY = (int)y; int topY = bottomY + 1; int frontZ = (int)z; int backZ = frontZ + 1; adjVelocities[0] = velocities[getIdxKer(leftX, topY, frontZ)]; adjVelocities[1] = velocities[getIdxKer(rightX, topY, frontZ)]; adjVelocities[2] = velocities[getIdxKer(leftX, bottomY, frontZ)]; adjVelocities[3] = velocities[getIdxKer(rightX, bottomY, frontZ)]; adjVelocities[4] = velocities[getIdxKer(leftX, topY, backZ)]; adjVelocities[5] = velocities[getIdxKer(rightX, topY, backZ)]; adjVelocities[6] = velocities[getIdxKer(leftX, bottomY, backZ)]; adjVelocities[7] = velocities[getIdxKer(rightX, bottomY, backZ)]; float horizontalRatio = x - leftX; float verticalRatio = y - bottomY; float depthRatio = z - frontZ; glm::vec3 topBackVelocity = adjVelocities[0] * horizontalRatio + adjVelocities[1] * (1.0f - horizontalRatio); glm::vec3 bottomBackVelocity = adjVelocities[2] * horizontalRatio + adjVelocities[3] * (1.0f - horizontalRatio); glm::vec3 backVelocity = bottomBackVelocity * verticalRatio + topBackVelocity * (1.0f - verticalRatio); glm::vec3 topFrontVelocity = adjVelocities[4] * horizontalRatio + adjVelocities[5] * (1.0f - horizontalRatio); glm::vec3 bottomFrontVelocity = adjVelocities[6] * horizontalRatio + adjVelocities[7] * (1.0f - horizontalRatio); glm::vec3 frontVelocity = bottomFrontVelocity * verticalRatio + topFrontVelocity * (1.0f - verticalRatio); glm::vec3 finalVelocity = backVelocity * depthRatio + frontVelocity * (1.0f - depthRatio); particleVertices[idx] += finalVelocity; if (particleVertices[idx].x < 0.0f || particleVertices[idx].x > d_latticeWidth - 1 || particleVertices[idx].y < 0.0f || particleVertices[idx].y > d_latticeHeight - 1 || particleVertices[idx].z < 0.0f || particleVertices[idx].z > d_latticeDepth - 1) { particleVertices[idx].x = 0.0f; //particleVertices[idx].y = y; //particleVertices[idx].y = rand(idx, y) * (d_latticeHeight - 1); //particleVertices[idx].z = z; if (respawnMode == 1) { particleVertices[idx].z = rand(idx, z) * (d_latticeDepth - 1); // comment this out if you want to respawn at same z } //particleVertices[idx].y = d_respawnY; //particleVertices[idx].z = d_respawnZ++; } idx += blockDim.x * blockDim.y * gridDim.x; } } //! Kernel for clearing the back lattice. /*! Kernel that clears the back lattice. \param[in] backLattice Pointer to the back lattice to be cleared. */ __global__ void clearBackLatticeKernel(Node3D *backLattice) { int idx = threadIdx.x + blockDim.x * threadIdx.y; // idx in block idx += blockDim.x * blockDim.y * blockIdx.x; //if (idx == 0) { // printf("d_latticeSize = %d\n", d_latticeSize); //} if (idx < d_latticeSize) { for (int i = 0; i < 19; i++) { backLattice[idx].adj[i] = 0.0f; } } } //! Kernel for updating the inlets. /*! Kernel for updating the inlets. Acts the same way as collision step but with predetermined velocity and density. The inlet is the left wall of the simulation bounding volume. \param[in] backLattice The back lattice where we update node values. \param[in] velocities Velocities array for the lattice. \param[in] inletVelocity Our desired inlet velocity. */ __global__ void updateInletsKernel(Node3D *backLattice, glm::vec3 *velocities, glm::vec3 inletVelocity, glm::vec3 *inletVelocities = nullptr, int xLeftInlet = 1, int xRightInlet = 0, int yBottomInlet = 0, int yTopInlet = 0, int zLeftInlet = 0, int zRightInlet = 0) { int idx = threadIdx.x + blockDim.x * threadIdx.y; // idx in block idx += blockDim.x * blockDim.y * blockIdx.x; if (idx < d_latticeSize) { int x = idx % d_latticeWidth; int y = (idx / d_latticeWidth) % d_latticeHeight; int z = idx / (d_latticeHeight * d_latticeWidth); bool shouldBeSet = false; if (xLeftInlet && x == 0) { shouldBeSet = true; } if (xRightInlet && x == d_latticeWidth - 1) { shouldBeSet = true; } if (yBottomInlet && y == 0) { shouldBeSet = true; } if (yTopInlet && y == d_latticeHeight - 1) { shouldBeSet = true; } if (zLeftInlet && z == 0) { shouldBeSet = true; } if (zRightInlet && z == d_latticeDepth - 1) { shouldBeSet = true; } if (shouldBeSet) { //#define USE_SOUNDING_VELOCITIES #ifdef USE_SOUNDING_VELOCITIES inletVelocity = inletVelocities[y]; #endif float macroDensity = 1.0f; //glm::vec3 macroVelocity = inletVelocity; float leftTermMiddle = WEIGHT_MIDDLE * macroDensity; float leftTermAxis = WEIGHT_AXIS * macroDensity; float leftTermNonaxial = WEIGHT_NON_AXIAL * macroDensity; float macroVelocityDot = glm::dot(inletVelocity, inletVelocity); float thirdTerm = 1.5f * macroVelocityDot; float middleEq = leftTermMiddle + leftTermMiddle * (-thirdTerm); float dotProd = glm::dot(dirVectorsConst[DIR_RIGHT_FACE], inletVelocity); float firstTerm = 3.0f * dotProd; float secondTerm = 4.5f * dotProd * dotProd; float rightEq = leftTermAxis + leftTermAxis * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(dirVectorsConst[DIR_LEFT_FACE], inletVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float leftEq = leftTermAxis + leftTermAxis * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(dirVectorsConst[DIR_FRONT_FACE], inletVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float frontEq = leftTermAxis + leftTermAxis * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(dirVectorsConst[DIR_BACK_FACE], inletVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float backEq = leftTermAxis + leftTermAxis * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(dirVectorsConst[DIR_TOP_FACE], inletVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float topEq = leftTermAxis + leftTermAxis * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(dirVectorsConst[DIR_BOTTOM_FACE], inletVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float bottomEq = leftTermAxis + leftTermAxis * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(dirVectorsConst[DIR_BACK_RIGHT_EDGE], inletVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float backRightEq = leftTermNonaxial + leftTermNonaxial * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(dirVectorsConst[DIR_BACK_LEFT_EDGE], inletVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float backLeftEq = leftTermNonaxial + leftTermNonaxial * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(dirVectorsConst[DIR_FRONT_RIGHT_EDGE], inletVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float frontRightEq = leftTermNonaxial + leftTermNonaxial * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(dirVectorsConst[DIR_FRONT_LEFT_EDGE], inletVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float frontLeftEq = leftTermNonaxial + leftTermNonaxial * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(dirVectorsConst[DIR_TOP_BACK_EDGE], inletVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float topBackEq = leftTermNonaxial + leftTermNonaxial * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(dirVectorsConst[DIR_TOP_FRONT_EDGE], inletVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float topFrontEq = leftTermNonaxial + leftTermNonaxial * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(dirVectorsConst[DIR_BOTTOM_BACK_EDGE], inletVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float bottomBackEq = leftTermNonaxial + leftTermNonaxial * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(dirVectorsConst[DIR_BOTTOM_FRONT_EDGE], inletVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float bottomFrontEq = leftTermNonaxial + leftTermNonaxial * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(dirVectorsConst[DIR_TOP_RIGHT_EDGE], inletVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float topRightEq = leftTermNonaxial + leftTermNonaxial * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(dirVectorsConst[DIR_TOP_LEFT_EDGE], inletVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float topLeftEq = leftTermNonaxial + leftTermNonaxial * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(dirVectorsConst[DIR_BOTTOM_RIGHT_EDGE], inletVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float bottomRightEq = leftTermNonaxial + leftTermNonaxial * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(dirVectorsConst[DIR_BOTTOM_LEFT_EDGE], inletVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float bottomLeftEq = leftTermNonaxial + leftTermNonaxial * (firstTerm + secondTerm - thirdTerm); backLattice[idx].adj[DIR_MIDDLE_VERTEX] = middleEq; backLattice[idx].adj[DIR_RIGHT_FACE] = rightEq; backLattice[idx].adj[DIR_LEFT_FACE] = leftEq; backLattice[idx].adj[DIR_BACK_FACE] = backEq; backLattice[idx].adj[DIR_FRONT_FACE] = frontEq; backLattice[idx].adj[DIR_TOP_FACE] = topEq; backLattice[idx].adj[DIR_BOTTOM_FACE] = bottomEq; backLattice[idx].adj[DIR_BACK_RIGHT_EDGE] = backRightEq; backLattice[idx].adj[DIR_BACK_LEFT_EDGE] = backLeftEq; backLattice[idx].adj[DIR_FRONT_RIGHT_EDGE] = frontRightEq; backLattice[idx].adj[DIR_FRONT_LEFT_EDGE] = frontLeftEq; backLattice[idx].adj[DIR_TOP_BACK_EDGE] = topBackEq; backLattice[idx].adj[DIR_TOP_FRONT_EDGE] = topFrontEq; backLattice[idx].adj[DIR_BOTTOM_BACK_EDGE] = bottomBackEq; backLattice[idx].adj[DIR_BOTTOM_FRONT_EDGE] = bottomFrontEq; backLattice[idx].adj[DIR_TOP_RIGHT_EDGE] = topRightEq; backLattice[idx].adj[DIR_TOP_LEFT_EDGE] = topLeftEq; backLattice[idx].adj[DIR_BOTTOM_RIGHT_EDGE] = bottomRightEq; backLattice[idx].adj[DIR_BOTTOM_LEFT_EDGE] = bottomLeftEq; /*for (int i = 0; i < 19; i++) { if (backLattice[idx].adj[i] < 0.0f) { backLattice[idx].adj[i] = 0.0f; } else if (backLattice[idx].adj[i] > 1.0f) { backLattice[idx].adj[i] = 1.0f; } }*/ } } } //! Kernel for calculating the collision operator. /*! Kernel that calculates the collision operator using Bhatnagar-Gross-Krook operator. \param[in] backLattice Back lattice in which we do our calculations. \param[in] velocities Velocities array for the lattice. */ __global__ void collisionStepKernel(Node3D *backLattice, glm::vec3 *velocities, int useSubgridModel = 0) { int idx = threadIdx.x + blockDim.x * threadIdx.y; // idx in block idx += blockDim.x * blockDim.y * blockIdx.x; if (idx < d_latticeSize) { float macroDensity = 0.0f; for (int i = 0; i < 19; i++) { macroDensity += backLattice[idx].adj[i]; } glm::vec3 macroVelocity = glm::vec3(0.0f, 0.0f, 0.0f); //macroVelocity += vMiddle * backLattice[idx].adj[DIR_MIDDLE]; macroVelocity += dirVectorsConst[DIR_LEFT_FACE] * backLattice[idx].adj[DIR_LEFT_FACE]; macroVelocity += dirVectorsConst[DIR_FRONT_FACE] * backLattice[idx].adj[DIR_FRONT_FACE]; macroVelocity += dirVectorsConst[DIR_BOTTOM_FACE] * backLattice[idx].adj[DIR_BOTTOM_FACE]; macroVelocity += dirVectorsConst[DIR_FRONT_LEFT_EDGE] * backLattice[idx].adj[DIR_FRONT_LEFT_EDGE]; macroVelocity += dirVectorsConst[DIR_BACK_LEFT_EDGE] * backLattice[idx].adj[DIR_BACK_LEFT_EDGE]; macroVelocity += dirVectorsConst[DIR_BOTTOM_LEFT_EDGE] * backLattice[idx].adj[DIR_BOTTOM_LEFT_EDGE]; macroVelocity += dirVectorsConst[DIR_TOP_LEFT_EDGE] * backLattice[idx].adj[DIR_TOP_LEFT_EDGE]; macroVelocity += dirVectorsConst[DIR_BOTTOM_FRONT_EDGE] * backLattice[idx].adj[DIR_BOTTOM_FRONT_EDGE]; macroVelocity += dirVectorsConst[DIR_TOP_FRONT_EDGE] * backLattice[idx].adj[DIR_TOP_FRONT_EDGE]; macroVelocity += dirVectorsConst[DIR_RIGHT_FACE] * backLattice[idx].adj[DIR_RIGHT_FACE]; macroVelocity += dirVectorsConst[DIR_BACK_FACE] * backLattice[idx].adj[DIR_BACK_FACE]; macroVelocity += dirVectorsConst[DIR_TOP_FACE] * backLattice[idx].adj[DIR_TOP_FACE]; macroVelocity += dirVectorsConst[DIR_BACK_RIGHT_EDGE] * backLattice[idx].adj[DIR_BACK_RIGHT_EDGE]; macroVelocity += dirVectorsConst[DIR_FRONT_RIGHT_EDGE] * backLattice[idx].adj[DIR_FRONT_RIGHT_EDGE]; macroVelocity += dirVectorsConst[DIR_TOP_RIGHT_EDGE] * backLattice[idx].adj[DIR_TOP_RIGHT_EDGE]; macroVelocity += dirVectorsConst[DIR_BOTTOM_RIGHT_EDGE] * backLattice[idx].adj[DIR_BOTTOM_RIGHT_EDGE]; macroVelocity += dirVectorsConst[DIR_TOP_BACK_EDGE] * backLattice[idx].adj[DIR_TOP_BACK_EDGE]; macroVelocity += dirVectorsConst[DIR_BOTTOM_BACK_EDGE] * backLattice[idx].adj[DIR_BOTTOM_BACK_EDGE]; macroVelocity /= macroDensity; velocities[idx] = macroVelocity; float leftTermMiddle = WEIGHT_MIDDLE * macroDensity; float leftTermAxis = WEIGHT_AXIS * macroDensity; float leftTermNonaxial = WEIGHT_NON_AXIAL * macroDensity; float macroVelocityDot = glm::dot(macroVelocity, macroVelocity); float thirdTerm = 1.5f * macroVelocityDot; float middleEq = leftTermMiddle + leftTermMiddle * (-thirdTerm); float dotProd = glm::dot(dirVectorsConst[DIR_RIGHT_FACE], macroVelocity); float firstTerm = 3.0f * dotProd; float secondTerm = 4.5f * dotProd * dotProd; float rightEq = leftTermAxis + leftTermAxis * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(dirVectorsConst[DIR_LEFT_FACE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float leftEq = leftTermAxis + leftTermAxis * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(dirVectorsConst[DIR_FRONT_FACE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float frontEq = leftTermAxis + leftTermAxis * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(dirVectorsConst[DIR_BACK_FACE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float backEq = leftTermAxis + leftTermAxis * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(dirVectorsConst[DIR_TOP_FACE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float topEq = leftTermAxis + leftTermAxis * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(dirVectorsConst[DIR_BOTTOM_FACE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float bottomEq = leftTermAxis + leftTermAxis * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(dirVectorsConst[DIR_BACK_RIGHT_EDGE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float backRightEq = leftTermNonaxial + leftTermNonaxial * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(dirVectorsConst[DIR_BACK_LEFT_EDGE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float backLeftEq = leftTermNonaxial + leftTermNonaxial * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(dirVectorsConst[DIR_FRONT_RIGHT_EDGE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float frontRightEq = leftTermNonaxial + leftTermNonaxial * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(dirVectorsConst[DIR_FRONT_LEFT_EDGE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float frontLeftEq = leftTermNonaxial + leftTermNonaxial * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(dirVectorsConst[DIR_TOP_BACK_EDGE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float topBackEq = leftTermNonaxial + leftTermNonaxial * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(dirVectorsConst[DIR_TOP_FRONT_EDGE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float topFrontEq = leftTermNonaxial + leftTermNonaxial * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(dirVectorsConst[DIR_BOTTOM_BACK_EDGE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float bottomBackEq = leftTermNonaxial + leftTermNonaxial * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(dirVectorsConst[DIR_BOTTOM_FRONT_EDGE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float bottomFrontEq = leftTermNonaxial + leftTermNonaxial * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(dirVectorsConst[DIR_TOP_RIGHT_EDGE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float topRightEq = leftTermNonaxial + leftTermNonaxial * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(dirVectorsConst[DIR_TOP_LEFT_EDGE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float topLeftEq = leftTermNonaxial + leftTermNonaxial * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(dirVectorsConst[DIR_BOTTOM_RIGHT_EDGE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float bottomRightEq = leftTermNonaxial + leftTermNonaxial * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(dirVectorsConst[DIR_BOTTOM_LEFT_EDGE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float bottomLeftEq = leftTermNonaxial + leftTermNonaxial * (firstTerm + secondTerm - thirdTerm); if (useSubgridModel) { float f[19]; f[0] = (backLattice[idx].adj[DIR_MIDDLE_VERTEX] - middleEq); f[1] = (backLattice[idx].adj[DIR_RIGHT_FACE] - rightEq); f[2] = (backLattice[idx].adj[DIR_LEFT_FACE] - leftEq); f[3] = (backLattice[idx].adj[DIR_BACK_FACE] - backEq); f[4] = (backLattice[idx].adj[DIR_FRONT_FACE] - frontEq); f[5] = (backLattice[idx].adj[DIR_TOP_FACE] - topEq); f[6] = (backLattice[idx].adj[DIR_BOTTOM_FACE] - bottomEq); f[7] = (backLattice[idx].adj[DIR_BACK_RIGHT_EDGE] - backRightEq); f[8] = (backLattice[idx].adj[DIR_BACK_LEFT_EDGE] - backLeftEq); f[9] = (backLattice[idx].adj[DIR_FRONT_RIGHT_EDGE] - frontRightEq); f[10] = (backLattice[idx].adj[DIR_FRONT_LEFT_EDGE] - frontLeftEq); f[11] = (backLattice[idx].adj[DIR_TOP_BACK_EDGE] - topBackEq); f[12] = (backLattice[idx].adj[DIR_TOP_FRONT_EDGE] - topFrontEq); f[13] = (backLattice[idx].adj[DIR_BOTTOM_BACK_EDGE] - bottomBackEq); f[14] = (backLattice[idx].adj[DIR_BOTTOM_FRONT_EDGE] - bottomFrontEq); f[15] = (backLattice[idx].adj[DIR_TOP_RIGHT_EDGE] - topRightEq); f[16] = (backLattice[idx].adj[DIR_TOP_LEFT_EDGE] - topLeftEq); f[17] = (backLattice[idx].adj[DIR_BOTTOM_RIGHT_EDGE] - bottomRightEq); f[18] = (backLattice[idx].adj[DIR_BOTTOM_LEFT_EDGE] - bottomLeftEq); float tmp = -1.0f / (2.0f * d_tau * macroDensity); float sxx = f[3] + f[4] + f[7] + f[8] + f[9] + f[10] + f[15] + f[16] + f[17] + f[18]; float sxz = f[9] - f[8] - f[10] + f[7]; float sxy = f[15] + f[16] + f[17] + f[18]; float szz = f[1] + f[2] + f[7] + f[8] + f[9] + f[10] + f[11] + f[12] + f[13] + f[14]; float szy = f[12] + f[13] - f[14] - f[11]; float syy = f[5] + f[6] + f[11] + f[12] + f[13] + f[14] + f[15] + f[16] + f[17] + f[18]; sxx *= tmp; sxz *= tmp; sxy *= tmp; szz *= tmp; szy *= tmp; syy *= tmp; float magS = sqrtf(2.0f * (sxx * sxx + syy * syy + szz * szz + 2.0f * sqrtf(sxy * sxy + sxz * sxz + szy * szy))); float nu = (2.0f * d_tau - 1.0f) / 6.0f; float itau_new = 1.0f / (3.0f * (nu + SMAG_C * SMAG_C * magS) + 0.5f); backLattice[idx].adj[DIR_MIDDLE_VERTEX] -= itau_new * (backLattice[idx].adj[DIR_MIDDLE_VERTEX] - middleEq); backLattice[idx].adj[DIR_RIGHT_FACE] -= itau_new * (backLattice[idx].adj[DIR_RIGHT_FACE] - rightEq); backLattice[idx].adj[DIR_LEFT_FACE] -= itau_new * (backLattice[idx].adj[DIR_LEFT_FACE] - leftEq); backLattice[idx].adj[DIR_BACK_FACE] -= itau_new * (backLattice[idx].adj[DIR_BACK_FACE] - backEq); backLattice[idx].adj[DIR_FRONT_FACE] -= itau_new * (backLattice[idx].adj[DIR_FRONT_FACE] - frontEq); backLattice[idx].adj[DIR_TOP_FACE] -= itau_new * (backLattice[idx].adj[DIR_TOP_FACE] - topEq); backLattice[idx].adj[DIR_BOTTOM_FACE] -= itau_new * (backLattice[idx].adj[DIR_BOTTOM_FACE] - bottomEq); backLattice[idx].adj[DIR_BACK_RIGHT_EDGE] -= itau_new * (backLattice[idx].adj[DIR_BACK_RIGHT_EDGE] - backRightEq); backLattice[idx].adj[DIR_BACK_LEFT_EDGE] -= itau_new * (backLattice[idx].adj[DIR_BACK_LEFT_EDGE] - backLeftEq); backLattice[idx].adj[DIR_FRONT_RIGHT_EDGE] -= itau_new * (backLattice[idx].adj[DIR_FRONT_RIGHT_EDGE] - frontRightEq); backLattice[idx].adj[DIR_FRONT_LEFT_EDGE] -= itau_new * (backLattice[idx].adj[DIR_FRONT_LEFT_EDGE] - frontLeftEq); backLattice[idx].adj[DIR_TOP_BACK_EDGE] -= itau_new * (backLattice[idx].adj[DIR_TOP_BACK_EDGE] - topBackEq); backLattice[idx].adj[DIR_TOP_FRONT_EDGE] -= itau_new * (backLattice[idx].adj[DIR_TOP_FRONT_EDGE] - topFrontEq); backLattice[idx].adj[DIR_BOTTOM_BACK_EDGE] -= itau_new * (backLattice[idx].adj[DIR_BOTTOM_BACK_EDGE] - bottomBackEq); backLattice[idx].adj[DIR_BOTTOM_FRONT_EDGE] -= itau_new * (backLattice[idx].adj[DIR_BOTTOM_FRONT_EDGE] - bottomFrontEq); backLattice[idx].adj[DIR_TOP_RIGHT_EDGE] -= itau_new * (backLattice[idx].adj[DIR_TOP_RIGHT_EDGE] - topRightEq); backLattice[idx].adj[DIR_TOP_LEFT_EDGE] -= itau_new * (backLattice[idx].adj[DIR_TOP_LEFT_EDGE] - topLeftEq); backLattice[idx].adj[DIR_BOTTOM_RIGHT_EDGE] -= itau_new * (backLattice[idx].adj[DIR_BOTTOM_RIGHT_EDGE] - bottomRightEq); backLattice[idx].adj[DIR_BOTTOM_LEFT_EDGE] -= itau_new * (backLattice[idx].adj[DIR_BOTTOM_LEFT_EDGE] - bottomLeftEq); } else { backLattice[idx].adj[DIR_MIDDLE_VERTEX] -= d_itau * (backLattice[idx].adj[DIR_MIDDLE_VERTEX] - middleEq); backLattice[idx].adj[DIR_RIGHT_FACE] -= d_itau * (backLattice[idx].adj[DIR_RIGHT_FACE] - rightEq); backLattice[idx].adj[DIR_LEFT_FACE] -= d_itau * (backLattice[idx].adj[DIR_LEFT_FACE] - leftEq); backLattice[idx].adj[DIR_BACK_FACE] -= d_itau * (backLattice[idx].adj[DIR_BACK_FACE] - backEq); backLattice[idx].adj[DIR_FRONT_FACE] -= d_itau * (backLattice[idx].adj[DIR_FRONT_FACE] - frontEq); backLattice[idx].adj[DIR_TOP_FACE] -= d_itau * (backLattice[idx].adj[DIR_TOP_FACE] - topEq); backLattice[idx].adj[DIR_BOTTOM_FACE] -= d_itau * (backLattice[idx].adj[DIR_BOTTOM_FACE] - bottomEq); backLattice[idx].adj[DIR_BACK_RIGHT_EDGE] -= d_itau * (backLattice[idx].adj[DIR_BACK_RIGHT_EDGE] - backRightEq); backLattice[idx].adj[DIR_BACK_LEFT_EDGE] -= d_itau * (backLattice[idx].adj[DIR_BACK_LEFT_EDGE] - backLeftEq); backLattice[idx].adj[DIR_FRONT_RIGHT_EDGE] -= d_itau * (backLattice[idx].adj[DIR_FRONT_RIGHT_EDGE] - frontRightEq); backLattice[idx].adj[DIR_FRONT_LEFT_EDGE] -= d_itau * (backLattice[idx].adj[DIR_FRONT_LEFT_EDGE] - frontLeftEq); backLattice[idx].adj[DIR_TOP_BACK_EDGE] -= d_itau * (backLattice[idx].adj[DIR_TOP_BACK_EDGE] - topBackEq); backLattice[idx].adj[DIR_TOP_FRONT_EDGE] -= d_itau * (backLattice[idx].adj[DIR_TOP_FRONT_EDGE] - topFrontEq); backLattice[idx].adj[DIR_BOTTOM_BACK_EDGE] -= d_itau * (backLattice[idx].adj[DIR_BOTTOM_BACK_EDGE] - bottomBackEq); backLattice[idx].adj[DIR_BOTTOM_FRONT_EDGE] -= d_itau * (backLattice[idx].adj[DIR_BOTTOM_FRONT_EDGE] - bottomFrontEq); backLattice[idx].adj[DIR_TOP_RIGHT_EDGE] -= d_itau * (backLattice[idx].adj[DIR_TOP_RIGHT_EDGE] - topRightEq); backLattice[idx].adj[DIR_TOP_LEFT_EDGE] -= d_itau * (backLattice[idx].adj[DIR_TOP_LEFT_EDGE] - topLeftEq); backLattice[idx].adj[DIR_BOTTOM_RIGHT_EDGE] -= d_itau * (backLattice[idx].adj[DIR_BOTTOM_RIGHT_EDGE] - bottomRightEq); backLattice[idx].adj[DIR_BOTTOM_LEFT_EDGE] -= d_itau * (backLattice[idx].adj[DIR_BOTTOM_LEFT_EDGE] - bottomLeftEq); } /*for (int i = 0; i < 19; i++) { if (backLattice[idx].adj[i] < 0.0f) { backLattice[idx].adj[i] = 0.0f; } else if (backLattice[idx].adj[i] > 1.0f) { backLattice[idx].adj[i] = 1.0f; } }*/ } } //! Kernel for calculating the collision operator that uses the shared memory (in naive manner). /*! Kernel that calculates the collision operator using Bhatnagar-Gross-Krook operator. \param[in] backLattice Back lattice in which we do our calculations. \param[in] velocities Velocities array for the lattice. */ __global__ void collisionStepKernelSharedNewReorganized(Node3D *backLattice, glm::vec3 *velocities, int useSubgridModel = 0) { int idx = threadIdx.x + blockDim.x * threadIdx.y; // idx in block idx += blockDim.x * blockDim.y * blockIdx.x; extern __shared__ Node3D cache[]; int cacheIdx = threadIdx.x + blockDim.x * threadIdx.y; if (idx < d_latticeSize) { cache[cacheIdx] = backLattice[idx]; //__syncthreads(); // not needed float macroDensity = 0.0f; for (int i = 0; i < 19; i++) { macroDensity += cache[cacheIdx].adj[i]; } glm::vec3 macroVelocity = glm::vec3(0.0f, 0.0f, 0.0f); //macroVelocity += vMiddle * backLattice[idx].adj[DIR_MIDDLE]; macroVelocity += dirVectorsConst[DIR_LEFT_FACE] * cache[cacheIdx].adj[DIR_LEFT_FACE]; macroVelocity += dirVectorsConst[DIR_FRONT_FACE] * cache[cacheIdx].adj[DIR_FRONT_FACE]; macroVelocity += dirVectorsConst[DIR_BOTTOM_FACE] * cache[cacheIdx].adj[DIR_BOTTOM_FACE]; macroVelocity += dirVectorsConst[DIR_FRONT_LEFT_EDGE] * cache[cacheIdx].adj[DIR_FRONT_LEFT_EDGE]; macroVelocity += dirVectorsConst[DIR_BACK_LEFT_EDGE] * cache[cacheIdx].adj[DIR_BACK_LEFT_EDGE]; macroVelocity += dirVectorsConst[DIR_BOTTOM_LEFT_EDGE] * cache[cacheIdx].adj[DIR_BOTTOM_LEFT_EDGE]; macroVelocity += dirVectorsConst[DIR_TOP_LEFT_EDGE] * cache[cacheIdx].adj[DIR_TOP_LEFT_EDGE]; macroVelocity += dirVectorsConst[DIR_BOTTOM_FRONT_EDGE] * cache[cacheIdx].adj[DIR_BOTTOM_FRONT_EDGE]; macroVelocity += dirVectorsConst[DIR_TOP_FRONT_EDGE] * cache[cacheIdx].adj[DIR_TOP_FRONT_EDGE]; macroVelocity += dirVectorsConst[DIR_RIGHT_FACE] * cache[cacheIdx].adj[DIR_RIGHT_FACE]; macroVelocity += dirVectorsConst[DIR_BACK_FACE] * cache[cacheIdx].adj[DIR_BACK_FACE]; macroVelocity += dirVectorsConst[DIR_TOP_FACE] * cache[cacheIdx].adj[DIR_TOP_FACE]; macroVelocity += dirVectorsConst[DIR_BACK_RIGHT_EDGE] * cache[cacheIdx].adj[DIR_BACK_RIGHT_EDGE]; macroVelocity += dirVectorsConst[DIR_FRONT_RIGHT_EDGE] * cache[cacheIdx].adj[DIR_FRONT_RIGHT_EDGE]; macroVelocity += dirVectorsConst[DIR_TOP_RIGHT_EDGE] * cache[cacheIdx].adj[DIR_TOP_RIGHT_EDGE]; macroVelocity += dirVectorsConst[DIR_BOTTOM_RIGHT_EDGE] * cache[cacheIdx].adj[DIR_BOTTOM_RIGHT_EDGE]; macroVelocity += dirVectorsConst[DIR_TOP_BACK_EDGE] * cache[cacheIdx].adj[DIR_TOP_BACK_EDGE]; macroVelocity += dirVectorsConst[DIR_BOTTOM_BACK_EDGE] * cache[cacheIdx].adj[DIR_BOTTOM_BACK_EDGE]; macroVelocity /= macroDensity; velocities[idx] = macroVelocity; float currLeftTerm = WEIGHT_MIDDLE * macroDensity; //float leftTermAxis = WEIGHT_AXIS * macroDensity; //float leftTermNonaxial = WEIGHT_NON_AXIAL * macroDensity; float macroVelocityDot = glm::dot(macroVelocity, macroVelocity); float thirdTerm = 1.5f * macroVelocityDot; float currEq = currLeftTerm + currLeftTerm * (-thirdTerm); cache[cacheIdx].adj[DIR_MIDDLE_VERTEX] -= d_itau * (cache[cacheIdx].adj[DIR_MIDDLE_VERTEX] - currEq); currLeftTerm = WEIGHT_AXIS * macroDensity; float dotProd = glm::dot(dirVectorsConst[DIR_RIGHT_FACE], macroVelocity); float firstTerm = 3.0f * dotProd; float secondTerm = 4.5f * dotProd * dotProd; currEq = currLeftTerm + currLeftTerm * (firstTerm + secondTerm - thirdTerm); cache[cacheIdx].adj[DIR_RIGHT_FACE] -= d_itau * (cache[cacheIdx].adj[DIR_RIGHT_FACE] - currEq); dotProd = glm::dot(dirVectorsConst[DIR_LEFT_FACE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; currEq = currLeftTerm + currLeftTerm * (firstTerm + secondTerm - thirdTerm); cache[cacheIdx].adj[DIR_LEFT_FACE] -= d_itau * (cache[cacheIdx].adj[DIR_LEFT_FACE] - currEq); dotProd = glm::dot(dirVectorsConst[DIR_FRONT_FACE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; currEq = currLeftTerm + currLeftTerm * (firstTerm + secondTerm - thirdTerm); cache[cacheIdx].adj[DIR_FRONT_FACE] -= d_itau * (cache[cacheIdx].adj[DIR_FRONT_FACE] - currEq); dotProd = glm::dot(dirVectorsConst[DIR_BACK_FACE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; currEq = currLeftTerm + currLeftTerm * (firstTerm + secondTerm - thirdTerm); cache[cacheIdx].adj[DIR_BACK_FACE] -= d_itau * (cache[cacheIdx].adj[DIR_BACK_FACE] - currEq); dotProd = glm::dot(dirVectorsConst[DIR_TOP_FACE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; currEq = currLeftTerm + currLeftTerm * (firstTerm + secondTerm - thirdTerm); cache[cacheIdx].adj[DIR_TOP_FACE] -= d_itau * (cache[cacheIdx].adj[DIR_TOP_FACE] - currEq); dotProd = glm::dot(dirVectorsConst[DIR_BOTTOM_FACE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; currEq = currLeftTerm + currLeftTerm * (firstTerm + secondTerm - thirdTerm); cache[cacheIdx].adj[DIR_BOTTOM_FACE] -= d_itau * (cache[cacheIdx].adj[DIR_BOTTOM_FACE] - currEq); currLeftTerm = WEIGHT_NON_AXIAL * macroDensity; dotProd = glm::dot(dirVectorsConst[DIR_BACK_RIGHT_EDGE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; currEq = currLeftTerm + currLeftTerm * (firstTerm + secondTerm - thirdTerm); cache[cacheIdx].adj[DIR_BACK_RIGHT_EDGE] -= d_itau * (cache[cacheIdx].adj[DIR_BACK_RIGHT_EDGE] - currEq); dotProd = glm::dot(dirVectorsConst[DIR_BACK_LEFT_EDGE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; currEq = currLeftTerm + currLeftTerm * (firstTerm + secondTerm - thirdTerm); cache[cacheIdx].adj[DIR_BACK_LEFT_EDGE] -= d_itau * (cache[cacheIdx].adj[DIR_BACK_LEFT_EDGE] - currEq); dotProd = glm::dot(dirVectorsConst[DIR_FRONT_RIGHT_EDGE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; currEq = currLeftTerm + currLeftTerm * (firstTerm + secondTerm - thirdTerm); cache[cacheIdx].adj[DIR_FRONT_RIGHT_EDGE] -= d_itau * (cache[cacheIdx].adj[DIR_FRONT_RIGHT_EDGE] - currEq); dotProd = glm::dot(dirVectorsConst[DIR_FRONT_LEFT_EDGE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; currEq = currLeftTerm + currLeftTerm * (firstTerm + secondTerm - thirdTerm); cache[cacheIdx].adj[DIR_FRONT_LEFT_EDGE] -= d_itau * (cache[cacheIdx].adj[DIR_FRONT_LEFT_EDGE] - currEq); dotProd = glm::dot(dirVectorsConst[DIR_TOP_BACK_EDGE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; currEq = currLeftTerm + currLeftTerm * (firstTerm + secondTerm - thirdTerm); cache[cacheIdx].adj[DIR_TOP_BACK_EDGE] -= d_itau * (cache[cacheIdx].adj[DIR_TOP_BACK_EDGE] - currEq); dotProd = glm::dot(dirVectorsConst[DIR_TOP_FRONT_EDGE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; currEq = currLeftTerm + currLeftTerm * (firstTerm + secondTerm - thirdTerm); cache[cacheIdx].adj[DIR_TOP_FRONT_EDGE] -= d_itau * (cache[cacheIdx].adj[DIR_TOP_FRONT_EDGE] - currEq); dotProd = glm::dot(dirVectorsConst[DIR_BOTTOM_BACK_EDGE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; currEq = currLeftTerm + currLeftTerm * (firstTerm + secondTerm - thirdTerm); cache[cacheIdx].adj[DIR_BOTTOM_BACK_EDGE] -= d_itau * (cache[cacheIdx].adj[DIR_BOTTOM_BACK_EDGE] - currEq); dotProd = glm::dot(dirVectorsConst[DIR_BOTTOM_FRONT_EDGE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; currEq = currLeftTerm + currLeftTerm * (firstTerm + secondTerm - thirdTerm); cache[cacheIdx].adj[DIR_BOTTOM_FRONT_EDGE] -= d_itau * (cache[cacheIdx].adj[DIR_BOTTOM_FRONT_EDGE] - currEq); dotProd = glm::dot(dirVectorsConst[DIR_TOP_RIGHT_EDGE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; currEq = currLeftTerm + currLeftTerm * (firstTerm + secondTerm - thirdTerm); cache[cacheIdx].adj[DIR_TOP_RIGHT_EDGE] -= d_itau * (cache[cacheIdx].adj[DIR_TOP_RIGHT_EDGE] - currEq); dotProd = glm::dot(dirVectorsConst[DIR_TOP_LEFT_EDGE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; currEq = currLeftTerm + currLeftTerm * (firstTerm + secondTerm - thirdTerm); cache[cacheIdx].adj[DIR_TOP_LEFT_EDGE] -= d_itau * (cache[cacheIdx].adj[DIR_TOP_LEFT_EDGE] - currEq); dotProd = glm::dot(dirVectorsConst[DIR_BOTTOM_RIGHT_EDGE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; currEq = currLeftTerm + currLeftTerm * (firstTerm + secondTerm - thirdTerm); cache[cacheIdx].adj[DIR_BOTTOM_RIGHT_EDGE] -= d_itau * (cache[cacheIdx].adj[DIR_BOTTOM_RIGHT_EDGE] - currEq); dotProd = glm::dot(dirVectorsConst[DIR_BOTTOM_LEFT_EDGE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; currEq = currLeftTerm + currLeftTerm * (firstTerm + secondTerm - thirdTerm); cache[cacheIdx].adj[DIR_BOTTOM_LEFT_EDGE] -= d_itau * (cache[cacheIdx].adj[DIR_BOTTOM_LEFT_EDGE] - currEq); backLattice[idx] = cache[cacheIdx]; } } //! Kernel for calculating the collision operator that uses the shared memory (in naive manner). /*! Kernel that calculates the collision operator using Bhatnagar-Gross-Krook operator. \param[in] backLattice Back lattice in which we do our calculations. \param[in] velocities Velocities array for the lattice. */ __global__ void collisionStepKernelSharedNewReorganizedExtended(Node3D *backLattice, glm::vec3 *velocities, int useSubgridModel = 0) { int idx = threadIdx.x + blockDim.x * threadIdx.y; // idx in block idx += blockDim.x * blockDim.y * blockIdx.x; extern __shared__ Node3D cache[]; int cacheIdx = threadIdx.x + blockDim.x * threadIdx.y; if (idx < d_latticeSize) { cache[cacheIdx] = backLattice[idx]; //__syncthreads(); // not needed float macroDensity = 0.0f; for (int i = 0; i < 19; i++) { macroDensity += cache[cacheIdx].adj[i]; } glm::vec3 macroVelocity = glm::vec3(0.0f, 0.0f, 0.0f); //macroVelocity += vMiddle * backLattice[idx].adj[DIR_MIDDLE]; macroVelocity += dirVectorsConst[DIR_LEFT_FACE] * cache[cacheIdx].adj[DIR_LEFT_FACE]; macroVelocity += dirVectorsConst[DIR_FRONT_FACE] * cache[cacheIdx].adj[DIR_FRONT_FACE]; macroVelocity += dirVectorsConst[DIR_BOTTOM_FACE] * cache[cacheIdx].adj[DIR_BOTTOM_FACE]; macroVelocity += dirVectorsConst[DIR_FRONT_LEFT_EDGE] * cache[cacheIdx].adj[DIR_FRONT_LEFT_EDGE]; macroVelocity += dirVectorsConst[DIR_BACK_LEFT_EDGE] * cache[cacheIdx].adj[DIR_BACK_LEFT_EDGE]; macroVelocity += dirVectorsConst[DIR_BOTTOM_LEFT_EDGE] * cache[cacheIdx].adj[DIR_BOTTOM_LEFT_EDGE]; macroVelocity += dirVectorsConst[DIR_TOP_LEFT_EDGE] * cache[cacheIdx].adj[DIR_TOP_LEFT_EDGE]; macroVelocity += dirVectorsConst[DIR_BOTTOM_FRONT_EDGE] * cache[cacheIdx].adj[DIR_BOTTOM_FRONT_EDGE]; macroVelocity += dirVectorsConst[DIR_TOP_FRONT_EDGE] * cache[cacheIdx].adj[DIR_TOP_FRONT_EDGE]; macroVelocity += dirVectorsConst[DIR_RIGHT_FACE] * cache[cacheIdx].adj[DIR_RIGHT_FACE]; macroVelocity += dirVectorsConst[DIR_BACK_FACE] * cache[cacheIdx].adj[DIR_BACK_FACE]; macroVelocity += dirVectorsConst[DIR_TOP_FACE] * cache[cacheIdx].adj[DIR_TOP_FACE]; macroVelocity += dirVectorsConst[DIR_BACK_RIGHT_EDGE] * cache[cacheIdx].adj[DIR_BACK_RIGHT_EDGE]; macroVelocity += dirVectorsConst[DIR_FRONT_RIGHT_EDGE] * cache[cacheIdx].adj[DIR_FRONT_RIGHT_EDGE]; macroVelocity += dirVectorsConst[DIR_TOP_RIGHT_EDGE] * cache[cacheIdx].adj[DIR_TOP_RIGHT_EDGE]; macroVelocity += dirVectorsConst[DIR_BOTTOM_RIGHT_EDGE] * cache[cacheIdx].adj[DIR_BOTTOM_RIGHT_EDGE]; macroVelocity += dirVectorsConst[DIR_TOP_BACK_EDGE] * cache[cacheIdx].adj[DIR_TOP_BACK_EDGE]; macroVelocity += dirVectorsConst[DIR_BOTTOM_BACK_EDGE] * cache[cacheIdx].adj[DIR_BOTTOM_BACK_EDGE]; macroVelocity /= macroDensity; velocities[idx] = macroVelocity; float currLeftTerm = WEIGHT_MIDDLE * macroDensity; //float leftTermAxis = WEIGHT_AXIS * macroDensity; //float leftTermNonaxial = WEIGHT_NON_AXIAL * macroDensity; float macroVelocityDot = glm::dot(macroVelocity, macroVelocity); float thirdTerm = 1.5f * macroVelocityDot; float currEq = currLeftTerm + currLeftTerm * (-thirdTerm); cache[cacheIdx].adj[DIR_MIDDLE_VERTEX] -= d_itau * (cache[cacheIdx].adj[DIR_MIDDLE_VERTEX] - currEq); currLeftTerm = WEIGHT_AXIS * macroDensity; float dotProd = glm::dot(dirVectorsConst[DIR_RIGHT_FACE], macroVelocity); float firstTerm = 3.0f * dotProd; float secondTerm = 4.5f * dotProd * dotProd; float fourthTerm = dotProd * ((9.0f * dotProd) * (9.0f * dotProd) - 3 * macroVelocityDot); currEq = currLeftTerm + currLeftTerm * (firstTerm + secondTerm - thirdTerm); cache[cacheIdx].adj[DIR_RIGHT_FACE] -= d_itau * (cache[cacheIdx].adj[DIR_RIGHT_FACE] - currEq); dotProd = glm::dot(dirVectorsConst[DIR_LEFT_FACE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; fourthTerm = dotProd * ((9.0f * dotProd) * (9.0f * dotProd) - 3 * macroVelocityDot); currEq = currLeftTerm + currLeftTerm * (firstTerm + secondTerm - thirdTerm + fourthTerm); cache[cacheIdx].adj[DIR_LEFT_FACE] -= d_itau * (cache[cacheIdx].adj[DIR_LEFT_FACE] - currEq); dotProd = glm::dot(dirVectorsConst[DIR_FRONT_FACE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; fourthTerm = dotProd * ((9.0f * dotProd) * (9.0f * dotProd) - 3 * macroVelocityDot); currEq = currLeftTerm + currLeftTerm * (firstTerm + secondTerm - thirdTerm + fourthTerm); cache[cacheIdx].adj[DIR_FRONT_FACE] -= d_itau * (cache[cacheIdx].adj[DIR_FRONT_FACE] - currEq); dotProd = glm::dot(dirVectorsConst[DIR_BACK_FACE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; fourthTerm = dotProd * ((9.0f * dotProd) * (9.0f * dotProd) - 3 * macroVelocityDot); currEq = currLeftTerm + currLeftTerm * (firstTerm + secondTerm - thirdTerm + fourthTerm); cache[cacheIdx].adj[DIR_BACK_FACE] -= d_itau * (cache[cacheIdx].adj[DIR_BACK_FACE] - currEq); dotProd = glm::dot(dirVectorsConst[DIR_TOP_FACE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; fourthTerm = dotProd * ((9.0f * dotProd) * (9.0f * dotProd) - 3 * macroVelocityDot); currEq = currLeftTerm + currLeftTerm * (firstTerm + secondTerm - thirdTerm + fourthTerm); cache[cacheIdx].adj[DIR_TOP_FACE] -= d_itau * (cache[cacheIdx].adj[DIR_TOP_FACE] - currEq); dotProd = glm::dot(dirVectorsConst[DIR_BOTTOM_FACE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; fourthTerm = dotProd * ((9.0f * dotProd) * (9.0f * dotProd) - 3 * macroVelocityDot); currEq = currLeftTerm + currLeftTerm * (firstTerm + secondTerm - thirdTerm + fourthTerm); cache[cacheIdx].adj[DIR_BOTTOM_FACE] -= d_itau * (cache[cacheIdx].adj[DIR_BOTTOM_FACE] - currEq); currLeftTerm = WEIGHT_NON_AXIAL * macroDensity; dotProd = glm::dot(dirVectorsConst[DIR_BACK_RIGHT_EDGE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; fourthTerm = dotProd * ((9.0f * dotProd) * (9.0f * dotProd) - 3 * macroVelocityDot); currEq = currLeftTerm + currLeftTerm * (firstTerm + secondTerm - thirdTerm + fourthTerm); cache[cacheIdx].adj[DIR_BACK_RIGHT_EDGE] -= d_itau * (cache[cacheIdx].adj[DIR_BACK_RIGHT_EDGE] - currEq); dotProd = glm::dot(dirVectorsConst[DIR_BACK_LEFT_EDGE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; fourthTerm = dotProd * ((9.0f * dotProd) * (9.0f * dotProd) - 3 * macroVelocityDot); currEq = currLeftTerm + currLeftTerm * (firstTerm + secondTerm - thirdTerm + fourthTerm); cache[cacheIdx].adj[DIR_BACK_LEFT_EDGE] -= d_itau * (cache[cacheIdx].adj[DIR_BACK_LEFT_EDGE] - currEq); dotProd = glm::dot(dirVectorsConst[DIR_FRONT_RIGHT_EDGE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; fourthTerm = dotProd * ((9.0f * dotProd) * (9.0f * dotProd) - 3 * macroVelocityDot); currEq = currLeftTerm + currLeftTerm * (firstTerm + secondTerm - thirdTerm + fourthTerm); cache[cacheIdx].adj[DIR_FRONT_RIGHT_EDGE] -= d_itau * (cache[cacheIdx].adj[DIR_FRONT_RIGHT_EDGE] - currEq); dotProd = glm::dot(dirVectorsConst[DIR_FRONT_LEFT_EDGE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; fourthTerm = dotProd * ((9.0f * dotProd) * (9.0f * dotProd) - 3 * macroVelocityDot); currEq = currLeftTerm + currLeftTerm * (firstTerm + secondTerm - thirdTerm + fourthTerm); cache[cacheIdx].adj[DIR_FRONT_LEFT_EDGE] -= d_itau * (cache[cacheIdx].adj[DIR_FRONT_LEFT_EDGE] - currEq); dotProd = glm::dot(dirVectorsConst[DIR_TOP_BACK_EDGE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; fourthTerm = dotProd * ((9.0f * dotProd) * (9.0f * dotProd) - 3 * macroVelocityDot); currEq = currLeftTerm + currLeftTerm * (firstTerm + secondTerm - thirdTerm + fourthTerm); cache[cacheIdx].adj[DIR_TOP_BACK_EDGE] -= d_itau * (cache[cacheIdx].adj[DIR_TOP_BACK_EDGE] - currEq); dotProd = glm::dot(dirVectorsConst[DIR_TOP_FRONT_EDGE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; fourthTerm = dotProd * ((9.0f * dotProd) * (9.0f * dotProd) - 3 * macroVelocityDot); currEq = currLeftTerm + currLeftTerm * (firstTerm + secondTerm - thirdTerm + fourthTerm); cache[cacheIdx].adj[DIR_TOP_FRONT_EDGE] -= d_itau * (cache[cacheIdx].adj[DIR_TOP_FRONT_EDGE] - currEq); dotProd = glm::dot(dirVectorsConst[DIR_BOTTOM_BACK_EDGE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; fourthTerm = dotProd * ((9.0f * dotProd) * (9.0f * dotProd) - 3 * macroVelocityDot); currEq = currLeftTerm + currLeftTerm * (firstTerm + secondTerm - thirdTerm + fourthTerm); cache[cacheIdx].adj[DIR_BOTTOM_BACK_EDGE] -= d_itau * (cache[cacheIdx].adj[DIR_BOTTOM_BACK_EDGE] - currEq); dotProd = glm::dot(dirVectorsConst[DIR_BOTTOM_FRONT_EDGE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; fourthTerm = dotProd * ((9.0f * dotProd) * (9.0f * dotProd) - 3 * macroVelocityDot); currEq = currLeftTerm + currLeftTerm * (firstTerm + secondTerm - thirdTerm + fourthTerm); cache[cacheIdx].adj[DIR_BOTTOM_FRONT_EDGE] -= d_itau * (cache[cacheIdx].adj[DIR_BOTTOM_FRONT_EDGE] - currEq); dotProd = glm::dot(dirVectorsConst[DIR_TOP_RIGHT_EDGE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; fourthTerm = dotProd * ((9.0f * dotProd) * (9.0f * dotProd) - 3 * macroVelocityDot); currEq = currLeftTerm + currLeftTerm * (firstTerm + secondTerm - thirdTerm + fourthTerm); cache[cacheIdx].adj[DIR_TOP_RIGHT_EDGE] -= d_itau * (cache[cacheIdx].adj[DIR_TOP_RIGHT_EDGE] - currEq); dotProd = glm::dot(dirVectorsConst[DIR_TOP_LEFT_EDGE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; fourthTerm = dotProd * ((9.0f * dotProd) * (9.0f * dotProd) - 3 * macroVelocityDot); currEq = currLeftTerm + currLeftTerm * (firstTerm + secondTerm - thirdTerm + fourthTerm); cache[cacheIdx].adj[DIR_TOP_LEFT_EDGE] -= d_itau * (cache[cacheIdx].adj[DIR_TOP_LEFT_EDGE] - currEq); dotProd = glm::dot(dirVectorsConst[DIR_BOTTOM_RIGHT_EDGE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; fourthTerm = dotProd * ((9.0f * dotProd) * (9.0f * dotProd) - 3 * macroVelocityDot); currEq = currLeftTerm + currLeftTerm * (firstTerm + secondTerm - thirdTerm + fourthTerm); cache[cacheIdx].adj[DIR_BOTTOM_RIGHT_EDGE] -= d_itau * (cache[cacheIdx].adj[DIR_BOTTOM_RIGHT_EDGE] - currEq); dotProd = glm::dot(dirVectorsConst[DIR_BOTTOM_LEFT_EDGE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; fourthTerm = dotProd * ((9.0f * dotProd) * (9.0f * dotProd) - 3 * macroVelocityDot); currEq = currLeftTerm + currLeftTerm * (firstTerm + secondTerm - thirdTerm + fourthTerm); cache[cacheIdx].adj[DIR_BOTTOM_LEFT_EDGE] -= d_itau * (cache[cacheIdx].adj[DIR_BOTTOM_LEFT_EDGE] - currEq); backLattice[idx] = cache[cacheIdx]; } } //! Kernel for calculating the collision operator that uses the shared memory (in naive manner). /*! Kernel that calculates the collision operator using Bhatnagar-Gross-Krook operator. \param[in] backLattice Back lattice in which we do our calculations. \param[in] velocities Velocities array for the lattice. */ __global__ void collisionStepKernelShared(Node3D *backLattice, glm::vec3 *velocities, int useSubgridModel = 0) { int idx = threadIdx.x + blockDim.x * threadIdx.y; // idx in block idx += blockDim.x * blockDim.y * blockIdx.x; extern __shared__ Node3D cache[]; int cacheIdx = threadIdx.x + blockDim.x * threadIdx.y; if (idx < d_latticeSize) { cache[cacheIdx] = backLattice[idx]; //__syncthreads(); // not needed float macroDensity = 0.0f; for (int i = 0; i < 19; i++) { macroDensity += cache[cacheIdx].adj[i]; } glm::vec3 macroVelocity = glm::vec3(0.0f, 0.0f, 0.0f); //macroVelocity += vMiddle * backLattice[idx].adj[DIR_MIDDLE]; macroVelocity += dirVectorsConst[DIR_LEFT_FACE] * cache[cacheIdx].adj[DIR_LEFT_FACE]; macroVelocity += dirVectorsConst[DIR_FRONT_FACE] * cache[cacheIdx].adj[DIR_FRONT_FACE]; macroVelocity += dirVectorsConst[DIR_BOTTOM_FACE] * cache[cacheIdx].adj[DIR_BOTTOM_FACE]; macroVelocity += dirVectorsConst[DIR_FRONT_LEFT_EDGE] * cache[cacheIdx].adj[DIR_FRONT_LEFT_EDGE]; macroVelocity += dirVectorsConst[DIR_BACK_LEFT_EDGE] * cache[cacheIdx].adj[DIR_BACK_LEFT_EDGE]; macroVelocity += dirVectorsConst[DIR_BOTTOM_LEFT_EDGE] * cache[cacheIdx].adj[DIR_BOTTOM_LEFT_EDGE]; macroVelocity += dirVectorsConst[DIR_TOP_LEFT_EDGE] * cache[cacheIdx].adj[DIR_TOP_LEFT_EDGE]; macroVelocity += dirVectorsConst[DIR_BOTTOM_FRONT_EDGE] * cache[cacheIdx].adj[DIR_BOTTOM_FRONT_EDGE]; macroVelocity += dirVectorsConst[DIR_TOP_FRONT_EDGE] * cache[cacheIdx].adj[DIR_TOP_FRONT_EDGE]; macroVelocity += dirVectorsConst[DIR_RIGHT_FACE] * cache[cacheIdx].adj[DIR_RIGHT_FACE]; macroVelocity += dirVectorsConst[DIR_BACK_FACE] * cache[cacheIdx].adj[DIR_BACK_FACE]; macroVelocity += dirVectorsConst[DIR_TOP_FACE] * cache[cacheIdx].adj[DIR_TOP_FACE]; macroVelocity += dirVectorsConst[DIR_BACK_RIGHT_EDGE] * cache[cacheIdx].adj[DIR_BACK_RIGHT_EDGE]; macroVelocity += dirVectorsConst[DIR_FRONT_RIGHT_EDGE] * cache[cacheIdx].adj[DIR_FRONT_RIGHT_EDGE]; macroVelocity += dirVectorsConst[DIR_TOP_RIGHT_EDGE] * cache[cacheIdx].adj[DIR_TOP_RIGHT_EDGE]; macroVelocity += dirVectorsConst[DIR_BOTTOM_RIGHT_EDGE] * cache[cacheIdx].adj[DIR_BOTTOM_RIGHT_EDGE]; macroVelocity += dirVectorsConst[DIR_TOP_BACK_EDGE] * cache[cacheIdx].adj[DIR_TOP_BACK_EDGE]; macroVelocity += dirVectorsConst[DIR_BOTTOM_BACK_EDGE] * cache[cacheIdx].adj[DIR_BOTTOM_BACK_EDGE]; macroVelocity /= macroDensity; velocities[idx] = macroVelocity; float leftTermMiddle = WEIGHT_MIDDLE * macroDensity; float leftTermAxis = WEIGHT_AXIS * macroDensity; float leftTermNonaxial = WEIGHT_NON_AXIAL * macroDensity; float macroVelocityDot = glm::dot(macroVelocity, macroVelocity); float thirdTerm = 1.5f * macroVelocityDot; float middleEq = leftTermMiddle + leftTermMiddle * (-thirdTerm); float dotProd = glm::dot(dirVectorsConst[DIR_RIGHT_FACE], macroVelocity); float firstTerm = 3.0f * dotProd; float secondTerm = 4.5f * dotProd * dotProd; float rightEq = leftTermAxis + leftTermAxis * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(dirVectorsConst[DIR_LEFT_FACE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float leftEq = leftTermAxis + leftTermAxis * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(dirVectorsConst[DIR_FRONT_FACE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float frontEq = leftTermAxis + leftTermAxis * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(dirVectorsConst[DIR_BACK_FACE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float backEq = leftTermAxis + leftTermAxis * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(dirVectorsConst[DIR_TOP_FACE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float topEq = leftTermAxis + leftTermAxis * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(dirVectorsConst[DIR_BOTTOM_FACE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float bottomEq = leftTermAxis + leftTermAxis * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(dirVectorsConst[DIR_BACK_RIGHT_EDGE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float backRightEq = leftTermNonaxial + leftTermNonaxial * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(dirVectorsConst[DIR_BACK_LEFT_EDGE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float backLeftEq = leftTermNonaxial + leftTermNonaxial * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(dirVectorsConst[DIR_FRONT_RIGHT_EDGE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float frontRightEq = leftTermNonaxial + leftTermNonaxial * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(dirVectorsConst[DIR_FRONT_LEFT_EDGE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float frontLeftEq = leftTermNonaxial + leftTermNonaxial * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(dirVectorsConst[DIR_TOP_BACK_EDGE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float topBackEq = leftTermNonaxial + leftTermNonaxial * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(dirVectorsConst[DIR_TOP_FRONT_EDGE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float topFrontEq = leftTermNonaxial + leftTermNonaxial * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(dirVectorsConst[DIR_BOTTOM_BACK_EDGE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float bottomBackEq = leftTermNonaxial + leftTermNonaxial * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(dirVectorsConst[DIR_BOTTOM_FRONT_EDGE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float bottomFrontEq = leftTermNonaxial + leftTermNonaxial * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(dirVectorsConst[DIR_TOP_RIGHT_EDGE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float topRightEq = leftTermNonaxial + leftTermNonaxial * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(dirVectorsConst[DIR_TOP_LEFT_EDGE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float topLeftEq = leftTermNonaxial + leftTermNonaxial * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(dirVectorsConst[DIR_BOTTOM_RIGHT_EDGE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float bottomRightEq = leftTermNonaxial + leftTermNonaxial * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(dirVectorsConst[DIR_BOTTOM_LEFT_EDGE], macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float bottomLeftEq = leftTermNonaxial + leftTermNonaxial * (firstTerm + secondTerm - thirdTerm); if (useSubgridModel) { float f[19]; f[0] = (cache[cacheIdx].adj[DIR_MIDDLE_VERTEX] - middleEq); f[1] = (cache[cacheIdx].adj[DIR_RIGHT_FACE] - rightEq); f[2] = (cache[cacheIdx].adj[DIR_LEFT_FACE] - leftEq); f[3] = (cache[cacheIdx].adj[DIR_BACK_FACE] - backEq); f[4] = (cache[cacheIdx].adj[DIR_FRONT_FACE] - frontEq); f[5] = (cache[cacheIdx].adj[DIR_TOP_FACE] - topEq); f[6] = (cache[cacheIdx].adj[DIR_BOTTOM_FACE] - bottomEq); f[7] = (cache[cacheIdx].adj[DIR_BACK_RIGHT_EDGE] - backRightEq); f[8] = (cache[cacheIdx].adj[DIR_BACK_LEFT_EDGE] - backLeftEq); f[9] = (cache[cacheIdx].adj[DIR_FRONT_RIGHT_EDGE] - frontRightEq); f[10] = (cache[cacheIdx].adj[DIR_FRONT_LEFT_EDGE] - frontLeftEq); f[11] = (cache[cacheIdx].adj[DIR_TOP_BACK_EDGE] - topBackEq); f[12] = (cache[cacheIdx].adj[DIR_TOP_FRONT_EDGE] - topFrontEq); f[13] = (cache[cacheIdx].adj[DIR_BOTTOM_BACK_EDGE] - bottomBackEq); f[14] = (cache[cacheIdx].adj[DIR_BOTTOM_FRONT_EDGE] - bottomFrontEq); f[15] = (cache[cacheIdx].adj[DIR_TOP_RIGHT_EDGE] - topRightEq); f[16] = (cache[cacheIdx].adj[DIR_TOP_LEFT_EDGE] - topLeftEq); f[17] = (cache[cacheIdx].adj[DIR_BOTTOM_RIGHT_EDGE] - bottomRightEq); f[18] = (cache[cacheIdx].adj[DIR_BOTTOM_LEFT_EDGE] - bottomLeftEq); float tmp = -1.0f / (2.0f * d_tau); float sxx = f[3] + f[4] + f[7] + f[8] + f[9] + f[10] + f[15] + f[16] + f[17] + f[18]; float sxz = f[9] - f[8] - f[10] + f[7]; float sxy = f[15] + f[16] + f[17] + f[18]; float szz = f[1] + f[2] + f[7] + f[8] + f[9] + f[10] + f[11] + f[12] + f[13] + f[14]; float szy = f[12] + f[13] - f[14] - f[11]; float syy = f[5] + f[6] + f[11] + f[12] + f[13] + f[14] + f[15] + f[16] + f[17] + f[18]; sxx *= tmp; sxz *= tmp; sxy *= tmp; szz *= tmp; szy *= tmp; syy *= tmp; float magS = sqrtf(2.0f * (sxx * sxx + syy * syy + szz * szz + 2.0f * sqrtf(sxy * sxy + sxz * sxz + szy * szy))); float nu = (2.0f * d_tau - 1.0f) / 6.0f; float itau_new = 1.0f / (3.0f * (nu + SMAG_C * SMAG_C * magS) + 0.5f); cache[cacheIdx].adj[DIR_MIDDLE_VERTEX] -= itau_new * (cache[cacheIdx].adj[DIR_MIDDLE_VERTEX] - middleEq); cache[cacheIdx].adj[DIR_RIGHT_FACE] -= itau_new * (cache[cacheIdx].adj[DIR_RIGHT_FACE] - rightEq); cache[cacheIdx].adj[DIR_LEFT_FACE] -= itau_new * (cache[cacheIdx].adj[DIR_LEFT_FACE] - leftEq); cache[cacheIdx].adj[DIR_BACK_FACE] -= itau_new * (cache[cacheIdx].adj[DIR_BACK_FACE] - backEq); cache[cacheIdx].adj[DIR_FRONT_FACE] -= itau_new * (cache[cacheIdx].adj[DIR_FRONT_FACE] - frontEq); cache[cacheIdx].adj[DIR_TOP_FACE] -= itau_new * (cache[cacheIdx].adj[DIR_TOP_FACE] - topEq); cache[cacheIdx].adj[DIR_BOTTOM_FACE] -= itau_new * (cache[cacheIdx].adj[DIR_BOTTOM_FACE] - bottomEq); cache[cacheIdx].adj[DIR_BACK_RIGHT_EDGE] -= itau_new * (cache[cacheIdx].adj[DIR_BACK_RIGHT_EDGE] - backRightEq); cache[cacheIdx].adj[DIR_BACK_LEFT_EDGE] -= itau_new * (cache[cacheIdx].adj[DIR_BACK_LEFT_EDGE] - backLeftEq); cache[cacheIdx].adj[DIR_FRONT_RIGHT_EDGE] -= itau_new * (cache[cacheIdx].adj[DIR_FRONT_RIGHT_EDGE] - frontRightEq); cache[cacheIdx].adj[DIR_FRONT_LEFT_EDGE] -= itau_new * (cache[cacheIdx].adj[DIR_FRONT_LEFT_EDGE] - frontLeftEq); cache[cacheIdx].adj[DIR_TOP_BACK_EDGE] -= itau_new * (cache[cacheIdx].adj[DIR_TOP_BACK_EDGE] - topBackEq); cache[cacheIdx].adj[DIR_TOP_FRONT_EDGE] -= itau_new * (cache[cacheIdx].adj[DIR_TOP_FRONT_EDGE] - topFrontEq); cache[cacheIdx].adj[DIR_BOTTOM_BACK_EDGE] -= itau_new * (cache[cacheIdx].adj[DIR_BOTTOM_BACK_EDGE] - bottomBackEq); cache[cacheIdx].adj[DIR_BOTTOM_FRONT_EDGE] -= itau_new * (cache[cacheIdx].adj[DIR_BOTTOM_FRONT_EDGE] - bottomFrontEq); cache[cacheIdx].adj[DIR_TOP_RIGHT_EDGE] -= itau_new * (cache[cacheIdx].adj[DIR_TOP_RIGHT_EDGE] - topRightEq); cache[cacheIdx].adj[DIR_TOP_LEFT_EDGE] -= itau_new * (cache[cacheIdx].adj[DIR_TOP_LEFT_EDGE] - topLeftEq); cache[cacheIdx].adj[DIR_BOTTOM_RIGHT_EDGE] -= itau_new * (cache[cacheIdx].adj[DIR_BOTTOM_RIGHT_EDGE] - bottomRightEq); cache[cacheIdx].adj[DIR_BOTTOM_LEFT_EDGE] -= itau_new * (cache[cacheIdx].adj[DIR_BOTTOM_LEFT_EDGE] - bottomLeftEq); } else { cache[cacheIdx].adj[DIR_MIDDLE_VERTEX] -= d_itau * (cache[cacheIdx].adj[DIR_MIDDLE_VERTEX] - middleEq); cache[cacheIdx].adj[DIR_RIGHT_FACE] -= d_itau * (cache[cacheIdx].adj[DIR_RIGHT_FACE] - rightEq); cache[cacheIdx].adj[DIR_LEFT_FACE] -= d_itau * (cache[cacheIdx].adj[DIR_LEFT_FACE] - leftEq); cache[cacheIdx].adj[DIR_BACK_FACE] -= d_itau * (cache[cacheIdx].adj[DIR_BACK_FACE] - backEq); cache[cacheIdx].adj[DIR_FRONT_FACE] -= d_itau * (cache[cacheIdx].adj[DIR_FRONT_FACE] - frontEq); cache[cacheIdx].adj[DIR_TOP_FACE] -= d_itau * (cache[cacheIdx].adj[DIR_TOP_FACE] - topEq); cache[cacheIdx].adj[DIR_BOTTOM_FACE] -= d_itau * (cache[cacheIdx].adj[DIR_BOTTOM_FACE] - bottomEq); cache[cacheIdx].adj[DIR_BACK_RIGHT_EDGE] -= d_itau * (cache[cacheIdx].adj[DIR_BACK_RIGHT_EDGE] - backRightEq); cache[cacheIdx].adj[DIR_BACK_LEFT_EDGE] -= d_itau * (cache[cacheIdx].adj[DIR_BACK_LEFT_EDGE] - backLeftEq); cache[cacheIdx].adj[DIR_FRONT_RIGHT_EDGE] -= d_itau * (cache[cacheIdx].adj[DIR_FRONT_RIGHT_EDGE] - frontRightEq); cache[cacheIdx].adj[DIR_FRONT_LEFT_EDGE] -= d_itau * (cache[cacheIdx].adj[DIR_FRONT_LEFT_EDGE] - frontLeftEq); cache[cacheIdx].adj[DIR_TOP_BACK_EDGE] -= d_itau * (cache[cacheIdx].adj[DIR_TOP_BACK_EDGE] - topBackEq); cache[cacheIdx].adj[DIR_TOP_FRONT_EDGE] -= d_itau * (cache[cacheIdx].adj[DIR_TOP_FRONT_EDGE] - topFrontEq); cache[cacheIdx].adj[DIR_BOTTOM_BACK_EDGE] -= d_itau * (cache[cacheIdx].adj[DIR_BOTTOM_BACK_EDGE] - bottomBackEq); cache[cacheIdx].adj[DIR_BOTTOM_FRONT_EDGE] -= d_itau * (cache[cacheIdx].adj[DIR_BOTTOM_FRONT_EDGE] - bottomFrontEq); cache[cacheIdx].adj[DIR_TOP_RIGHT_EDGE] -= d_itau * (cache[cacheIdx].adj[DIR_TOP_RIGHT_EDGE] - topRightEq); cache[cacheIdx].adj[DIR_TOP_LEFT_EDGE] -= d_itau * (cache[cacheIdx].adj[DIR_TOP_LEFT_EDGE] - topLeftEq); cache[cacheIdx].adj[DIR_BOTTOM_RIGHT_EDGE] -= d_itau * (cache[cacheIdx].adj[DIR_BOTTOM_RIGHT_EDGE] - bottomRightEq); cache[cacheIdx].adj[DIR_BOTTOM_LEFT_EDGE] -= d_itau * (cache[cacheIdx].adj[DIR_BOTTOM_LEFT_EDGE] - bottomLeftEq); } /*for (int i = 0; i < 19; i++) { if (cache[cacheIdx].adj[i] < 0.0f) { cache[cacheIdx].adj[i] = 0.0f; } else if (cache[cacheIdx].adj[i] > 1.0f) { cache[cacheIdx].adj[i] = 1.0f; } }*/ backLattice[idx] = cache[cacheIdx]; } } //! Kernel for calculating the collision operator using shared memory with smaller register usage. /*! Kernel that calculates the collision operator using Bhatnagar-Gross-Krook operator. Uses shared memory and less registers. Slower than its naive version unfortunately. \param[in] backLattice Back lattice in which we do our calculations. \param[in] velocities Velocities array for the lattice. */ __global__ void collisionStepKernelStreamlinedShared(Node3D *backLattice, glm::vec3 *velocities) { int idx = threadIdx.x + blockDim.x * threadIdx.y; // idx in block idx += blockDim.x * blockDim.y * blockIdx.x; extern __shared__ Node3D cache[]; int cacheIdx = threadIdx.x + blockDim.x * threadIdx.y; if (idx < d_latticeSize) { cache[cacheIdx] = backLattice[idx]; float macroDensity = 0.0f; for (int i = 0; i < 19; i++) { macroDensity += cache[cacheIdx].adj[i]; } glm::vec3 macroVelocity = glm::vec3(0.0f, 0.0f, 0.0f); macroVelocity += dirVectorsConst[DIR_LEFT_FACE] * cache[cacheIdx].adj[DIR_LEFT_FACE]; macroVelocity += dirVectorsConst[DIR_FRONT_FACE] * cache[cacheIdx].adj[DIR_FRONT_FACE]; macroVelocity += dirVectorsConst[DIR_BOTTOM_FACE] * cache[cacheIdx].adj[DIR_BOTTOM_FACE]; macroVelocity += dirVectorsConst[DIR_FRONT_LEFT_EDGE] * cache[cacheIdx].adj[DIR_FRONT_LEFT_EDGE]; macroVelocity += dirVectorsConst[DIR_BACK_LEFT_EDGE] * cache[cacheIdx].adj[DIR_BACK_LEFT_EDGE]; macroVelocity += dirVectorsConst[DIR_BOTTOM_LEFT_EDGE] * cache[cacheIdx].adj[DIR_BOTTOM_LEFT_EDGE]; macroVelocity += dirVectorsConst[DIR_TOP_LEFT_EDGE] * cache[cacheIdx].adj[DIR_TOP_LEFT_EDGE]; macroVelocity += dirVectorsConst[DIR_BOTTOM_FRONT_EDGE] * cache[cacheIdx].adj[DIR_BOTTOM_FRONT_EDGE]; macroVelocity += dirVectorsConst[DIR_TOP_FRONT_EDGE] * cache[cacheIdx].adj[DIR_TOP_FRONT_EDGE]; macroVelocity += dirVectorsConst[DIR_RIGHT_FACE] * cache[cacheIdx].adj[DIR_RIGHT_FACE]; macroVelocity += dirVectorsConst[DIR_BACK_FACE] * cache[cacheIdx].adj[DIR_BACK_FACE]; macroVelocity += dirVectorsConst[DIR_TOP_FACE] * cache[cacheIdx].adj[DIR_TOP_FACE]; macroVelocity += dirVectorsConst[DIR_BACK_RIGHT_EDGE] * cache[cacheIdx].adj[DIR_BACK_RIGHT_EDGE]; macroVelocity += dirVectorsConst[DIR_FRONT_RIGHT_EDGE] * cache[cacheIdx].adj[DIR_FRONT_RIGHT_EDGE]; macroVelocity += dirVectorsConst[DIR_TOP_RIGHT_EDGE] * cache[cacheIdx].adj[DIR_TOP_RIGHT_EDGE]; macroVelocity += dirVectorsConst[DIR_BOTTOM_RIGHT_EDGE] * cache[cacheIdx].adj[DIR_BOTTOM_RIGHT_EDGE]; macroVelocity += dirVectorsConst[DIR_TOP_BACK_EDGE] * cache[cacheIdx].adj[DIR_TOP_BACK_EDGE]; macroVelocity += dirVectorsConst[DIR_BOTTOM_BACK_EDGE] * cache[cacheIdx].adj[DIR_BOTTOM_BACK_EDGE]; macroVelocity /= macroDensity; velocities[idx] = macroVelocity; float leftTermMiddle = WEIGHT_MIDDLE * macroDensity; float leftTermAxis = WEIGHT_AXIS * macroDensity; float leftTermNonaxial = WEIGHT_NON_AXIAL * macroDensity; float thirdTerm = 1.5f * glm::dot(macroVelocity, macroVelocity); float middleEq = leftTermMiddle + leftTermMiddle * (-thirdTerm); float tmp; float rightEq = leftTermAxis * (1.0f + 3.0f * macroVelocity.x + 4.5f * macroVelocity.x * macroVelocity.x - thirdTerm); float leftEq = leftTermAxis * (1.0f - 3.0f * macroVelocity.x + 4.5f * macroVelocity.x * macroVelocity.x - thirdTerm); float frontEq = leftTermAxis * (1.0f + 3.0f * macroVelocity.z + 4.5f * macroVelocity.z * macroVelocity.z - thirdTerm); float backEq = leftTermAxis * (1.0f - 3.0f * macroVelocity.z + 4.5f * macroVelocity.z * macroVelocity.z - thirdTerm); float topEq = leftTermAxis * (1.0f + 3.0f * macroVelocity.y + 4.5f * macroVelocity.y * macroVelocity.y - thirdTerm); float bottomEq = leftTermAxis * (1.0f - 3.0f * macroVelocity.y + 4.5f * macroVelocity.y * macroVelocity.y - thirdTerm); tmp = macroVelocity.x - macroVelocity.z; float backRightEq = leftTermNonaxial * (1.0f + 3.0f * tmp + 4.5f * tmp * tmp - thirdTerm); tmp = macroVelocity.x + macroVelocity.z; float backLeftEq = leftTermNonaxial * (1.0f - 3.0f * tmp + 4.5f * tmp * tmp - thirdTerm); float frontRightEq = leftTermNonaxial * (1.0f + 3.0f * tmp + 4.5f * tmp * tmp - thirdTerm); tmp = -macroVelocity.x + macroVelocity.z; float frontLeftEq = leftTermNonaxial * (1.0f + 3.0f * tmp + 4.5f * tmp * tmp - thirdTerm); tmp = macroVelocity.y - macroVelocity.z; float topBackEq = leftTermNonaxial * (1.0f + 3.0f * tmp + 4.5f * tmp * tmp - thirdTerm); tmp = macroVelocity.y + macroVelocity.z; float topFrontEq = leftTermNonaxial * (1.0f + 3.0f * tmp + 4.5f * tmp * tmp - thirdTerm); tmp = -macroVelocity.y - macroVelocity.z; float bottomBackEq = leftTermNonaxial * (1.0f + 3.0f * tmp + 4.5f * tmp * tmp - thirdTerm); tmp = -macroVelocity.y + macroVelocity.z; float bottomFrontEq = leftTermNonaxial * (1.0f + 3.0f * tmp + 4.5f * tmp * tmp - thirdTerm); tmp = macroVelocity.x + macroVelocity.y; float topRightEq = leftTermNonaxial * (1.0f + 3.0f * tmp + 4.5f * tmp * tmp - thirdTerm); tmp = -macroVelocity.x + macroVelocity.y; float topLeftEq = leftTermNonaxial * (1.0f + 3.0f * tmp + 4.5f * tmp * tmp - thirdTerm); tmp = macroVelocity.x - macroVelocity.y; float bottomRightEq = leftTermNonaxial * (1.0f + 3.0f * tmp + 4.5f * tmp * tmp - thirdTerm); tmp = -macroVelocity.x - macroVelocity.y; float bottomLeftEq = leftTermNonaxial * (1.0f + 3.0f * tmp + 4.5f * tmp * tmp - thirdTerm); cache[cacheIdx].adj[DIR_MIDDLE_VERTEX] -= d_itau * (cache[cacheIdx].adj[DIR_MIDDLE_VERTEX] - middleEq); cache[cacheIdx].adj[DIR_RIGHT_FACE] -= d_itau * (cache[cacheIdx].adj[DIR_RIGHT_FACE] - rightEq); cache[cacheIdx].adj[DIR_LEFT_FACE] -= d_itau * (cache[cacheIdx].adj[DIR_LEFT_FACE] - leftEq); cache[cacheIdx].adj[DIR_BACK_FACE] -= d_itau * (cache[cacheIdx].adj[DIR_BACK_FACE] - backEq); cache[cacheIdx].adj[DIR_FRONT_FACE] -= d_itau * (cache[cacheIdx].adj[DIR_FRONT_FACE] - frontEq); cache[cacheIdx].adj[DIR_TOP_FACE] -= d_itau * (cache[cacheIdx].adj[DIR_TOP_FACE] - topEq); cache[cacheIdx].adj[DIR_BOTTOM_FACE] -= d_itau * (cache[cacheIdx].adj[DIR_BOTTOM_FACE] - bottomEq); cache[cacheIdx].adj[DIR_BACK_RIGHT_EDGE] -= d_itau * (cache[cacheIdx].adj[DIR_BACK_RIGHT_EDGE] - backRightEq); cache[cacheIdx].adj[DIR_BACK_LEFT_EDGE] -= d_itau * (cache[cacheIdx].adj[DIR_BACK_LEFT_EDGE] - backLeftEq); cache[cacheIdx].adj[DIR_FRONT_RIGHT_EDGE] -= d_itau * (cache[cacheIdx].adj[DIR_FRONT_RIGHT_EDGE] - frontRightEq); cache[cacheIdx].adj[DIR_FRONT_LEFT_EDGE] -= d_itau * (cache[cacheIdx].adj[DIR_FRONT_LEFT_EDGE] - frontLeftEq); cache[cacheIdx].adj[DIR_TOP_BACK_EDGE] -= d_itau * (cache[cacheIdx].adj[DIR_TOP_BACK_EDGE] - topBackEq); cache[cacheIdx].adj[DIR_TOP_FRONT_EDGE] -= d_itau * (cache[cacheIdx].adj[DIR_TOP_FRONT_EDGE] - topFrontEq); cache[cacheIdx].adj[DIR_BOTTOM_BACK_EDGE] -= d_itau * (cache[cacheIdx].adj[DIR_BOTTOM_BACK_EDGE] - bottomBackEq); cache[cacheIdx].adj[DIR_BOTTOM_FRONT_EDGE] -= d_itau * (cache[cacheIdx].adj[DIR_BOTTOM_FRONT_EDGE] - bottomFrontEq); cache[cacheIdx].adj[DIR_TOP_RIGHT_EDGE] -= d_itau * (cache[cacheIdx].adj[DIR_TOP_RIGHT_EDGE] - topRightEq); cache[cacheIdx].adj[DIR_TOP_LEFT_EDGE] -= d_itau * (cache[cacheIdx].adj[DIR_TOP_LEFT_EDGE] - topLeftEq); cache[cacheIdx].adj[DIR_BOTTOM_RIGHT_EDGE] -= d_itau * (cache[cacheIdx].adj[DIR_BOTTOM_RIGHT_EDGE] - bottomRightEq); cache[cacheIdx].adj[DIR_BOTTOM_LEFT_EDGE] -= d_itau * (cache[cacheIdx].adj[DIR_BOTTOM_LEFT_EDGE] - bottomLeftEq); //for (int i = 0; i < 19; i++) { // if (cache[cacheIdx].adj[i] < 0.0f) { // cache[cacheIdx].adj[i] = 0.0f; // } else if (cache[cacheIdx].adj[i] > 1.0f) { // cache[cacheIdx].adj[i] = 1.0f; // } //} backLattice[idx] = cache[cacheIdx]; } } //! Kernel for updating colliders/obstacles in the lattice. /*! Updates colliders/obstacles by using the full bounce back approach. \param[in] backLattice Back lattice in which we do our calculations. \param[in] velocities Velocities array for the lattice. \param[in] heightMap Height map of the scene. */ __global__ void updateCollidersKernel(Node3D *backLattice, glm::vec3 *velocities, float *heightMap) { int idx = threadIdx.x + blockDim.x * threadIdx.y; // idx in block idx += blockDim.x * blockDim.y * blockIdx.x; if (idx < d_latticeSize) { int x = idx % d_latticeWidth; int y = (idx / d_latticeWidth) % d_latticeHeight; int z = idx / (d_latticeHeight * d_latticeWidth); if ((heightMap[x + z * d_latticeWidth] >= y && heightMap[x + z * d_latticeWidth] > 0.01f) || y == 0) { #define USE_REGISTER_FRIENDLY_COLLIDERS #ifdef USE_REGISTER_FRIENDLY_COLLIDERS float tmp; float *adj = backLattice[idx].adj; // left and right tmp = adj[DIR_RIGHT_FACE]; adj[DIR_RIGHT_FACE] = adj[DIR_LEFT_FACE]; adj[DIR_LEFT_FACE] = tmp; // top and bottom tmp = adj[DIR_TOP_FACE]; adj[DIR_TOP_FACE] = adj[DIR_BOTTOM_FACE]; adj[DIR_BOTTOM_FACE] = tmp; // front and back tmp = adj[DIR_BACK_FACE]; adj[DIR_BACK_FACE] = adj[DIR_FRONT_FACE]; adj[DIR_FRONT_FACE] = tmp; // frontLeft and backRight tmp = adj[DIR_FRONT_LEFT_EDGE]; adj[DIR_FRONT_LEFT_EDGE] = adj[DIR_BACK_RIGHT_EDGE]; adj[DIR_BACK_RIGHT_EDGE] = tmp; // frontRight and backLeft tmp = adj[DIR_FRONT_RIGHT_EDGE]; adj[DIR_FRONT_RIGHT_EDGE] = adj[DIR_BACK_LEFT_EDGE]; adj[DIR_BACK_LEFT_EDGE] = adj[DIR_FRONT_RIGHT_EDGE]; // bottomFront and topBack tmp = adj[DIR_BOTTOM_FRONT_EDGE]; adj[DIR_BOTTOM_FRONT_EDGE] = adj[DIR_TOP_BACK_EDGE]; adj[DIR_TOP_BACK_EDGE] = tmp; // bottomBack and topFront tmp = adj[DIR_BOTTOM_BACK_EDGE]; adj[DIR_BOTTOM_BACK_EDGE] = adj[DIR_TOP_FRONT_EDGE]; adj[DIR_TOP_FRONT_EDGE] = tmp; // topRight and bottomLeft tmp = adj[DIR_TOP_RIGHT_EDGE]; adj[DIR_TOP_RIGHT_EDGE] = adj[DIR_BOTTOM_LEFT_EDGE]; adj[DIR_BOTTOM_LEFT_EDGE] = tmp; // topLeft and bottomRight tmp = adj[DIR_TOP_LEFT_EDGE]; adj[DIR_TOP_LEFT_EDGE] = adj[DIR_BOTTOM_RIGHT_EDGE]; adj[DIR_BOTTOM_RIGHT_EDGE] = tmp; #else // USE_REGISTER_FRIENDLY_COLLIDERS float right = backLattice[idx].adj[DIR_RIGHT_FACE]; float left = backLattice[idx].adj[DIR_LEFT_FACE]; float back = backLattice[idx].adj[DIR_BACK_FACE]; float front = backLattice[idx].adj[DIR_FRONT_FACE]; float top = backLattice[idx].adj[DIR_TOP_FACE]; float bottom = backLattice[idx].adj[DIR_BOTTOM_FACE]; float backRight = backLattice[idx].adj[DIR_BACK_RIGHT_EDGE]; float backLeft = backLattice[idx].adj[DIR_BACK_LEFT_EDGE]; float frontRight = backLattice[idx].adj[DIR_FRONT_RIGHT_EDGE]; float frontLeft = backLattice[idx].adj[DIR_FRONT_LEFT_EDGE]; float topBack = backLattice[idx].adj[DIR_TOP_BACK_EDGE]; float topFront = backLattice[idx].adj[DIR_TOP_FRONT_EDGE]; float bottomBack = backLattice[idx].adj[DIR_BOTTOM_BACK_EDGE]; float bottomFront = backLattice[idx].adj[DIR_BOTTOM_FRONT_EDGE]; float topRight = backLattice[idx].adj[DIR_TOP_RIGHT_EDGE]; float topLeft = backLattice[idx].adj[DIR_TOP_LEFT_EDGE]; float bottomRight = backLattice[idx].adj[DIR_BOTTOM_RIGHT_EDGE]; float bottomLeft = backLattice[idx].adj[DIR_BOTTOM_LEFT_EDGE]; backLattice[idx].adj[DIR_RIGHT_FACE] = left; backLattice[idx].adj[DIR_LEFT_FACE] = right; backLattice[idx].adj[DIR_BACK_FACE] = front; backLattice[idx].adj[DIR_FRONT_FACE] = back; backLattice[idx].adj[DIR_TOP_FACE] = bottom; backLattice[idx].adj[DIR_BOTTOM_FACE] = top; backLattice[idx].adj[DIR_BACK_RIGHT_EDGE] = frontLeft; backLattice[idx].adj[DIR_BACK_LEFT_EDGE] = frontRight; backLattice[idx].adj[DIR_FRONT_RIGHT_EDGE] = backLeft; backLattice[idx].adj[DIR_FRONT_LEFT_EDGE] = backRight; backLattice[idx].adj[DIR_TOP_BACK_EDGE] = bottomFront; backLattice[idx].adj[DIR_TOP_FRONT_EDGE] = bottomBack; backLattice[idx].adj[DIR_BOTTOM_BACK_EDGE] = topFront; backLattice[idx].adj[DIR_BOTTOM_FRONT_EDGE] = topBack; backLattice[idx].adj[DIR_TOP_RIGHT_EDGE] = bottomLeft; backLattice[idx].adj[DIR_TOP_LEFT_EDGE] = bottomRight; backLattice[idx].adj[DIR_BOTTOM_RIGHT_EDGE] = topLeft; backLattice[idx].adj[DIR_BOTTOM_LEFT_EDGE] = topRight; #endif // USE_REGISTER_FRIENDLY_COLLIDERS } } } //! Kernel that streams the microscopic particles from the previous frame. /*! Kernel that streams the microscopic particles from the previous frame. \param[in] backLatice Lattice that will be used in the current frame (the one we are currently updating). \param[in] frontLattice Lattice from the previous frame from which we stream the particles. */ __global__ void streamingStepKernel(Node3D *backLattice, Node3D *frontLattice) { int idx = threadIdx.x + blockDim.x * threadIdx.y; // idx in block idx += blockDim.x * blockDim.y * blockIdx.x; if (idx < d_latticeSize) { int x = idx % d_latticeWidth; int y = (idx / d_latticeWidth) % d_latticeHeight; int z = idx / (d_latticeHeight * d_latticeWidth); backLattice[idx].adj[DIR_MIDDLE_VERTEX] += frontLattice[idx].adj[DIR_MIDDLE_VERTEX]; int right; int left; int top; int bottom; int front; int back; right = x + 1; left = x - 1; top = y + 1; bottom = y - 1; front = z + 1; back = z - 1; if (right > d_latticeWidth - 1) { //right = d_latticeWidth - 1; right = 0; } if (left < 0) { //left = 0; left = d_latticeWidth - 1; } if (top > d_latticeHeight - 1) { //top = d_latticeHeight - 1; top = 0; } if (bottom < 0) { //bottom = 0; bottom = d_latticeHeight - 1; } if (front > d_latticeDepth - 1) { //front = d_latticeDepth - 1; front = 0; } if (back < 0) { //back = 0; back = d_latticeDepth - 1; } /* backLattice[idx].adj[DIR_LEFT_FACE] = frontLattice[getIdxKer(right, y, z)].adj[DIR_LEFT_FACE]; backLattice[idx].adj[DIR_FRONT_FACE] = frontLattice[getIdxKer(x, y, back)].adj[DIR_FRONT_FACE]; backLattice[idx].adj[DIR_BOTTOM_FACE] = frontLattice[getIdxKer(x, top, z)].adj[DIR_BOTTOM_FACE]; backLattice[idx].adj[DIR_FRONT_LEFT_EDGE] = frontLattice[getIdxKer(right, y, back)].adj[DIR_FRONT_LEFT_EDGE]; backLattice[idx].adj[DIR_BACK_LEFT_EDGE] = frontLattice[getIdxKer(right, y, front)].adj[DIR_BACK_LEFT_EDGE]; backLattice[idx].adj[DIR_BOTTOM_LEFT_EDGE] = frontLattice[getIdxKer(right, top, z)].adj[DIR_BOTTOM_LEFT_EDGE]; backLattice[idx].adj[DIR_TOP_LEFT_EDGE] = frontLattice[getIdxKer(right, bottom, z)].adj[DIR_TOP_LEFT_EDGE]; backLattice[idx].adj[DIR_BOTTOM_FRONT_EDGE] = frontLattice[getIdxKer(x, top, back)].adj[DIR_BOTTOM_FRONT_EDGE]; backLattice[idx].adj[DIR_TOP_FRONT_EDGE] = frontLattice[getIdxKer(x, bottom, back)].adj[DIR_TOP_FRONT_EDGE]; backLattice[idx].adj[DIR_RIGHT_FACE] = frontLattice[getIdxKer(left, y, z)].adj[DIR_RIGHT_FACE]; backLattice[idx].adj[DIR_BACK_FACE] = frontLattice[getIdxKer(x, y, front)].adj[DIR_BACK_FACE]; backLattice[idx].adj[DIR_TOP_FACE] = frontLattice[getIdxKer(x, bottom, z)].adj[DIR_TOP_FACE]; backLattice[idx].adj[DIR_BACK_RIGHT_EDGE] = frontLattice[getIdxKer(left, y, front)].adj[DIR_BACK_RIGHT_EDGE]; backLattice[idx].adj[DIR_FRONT_RIGHT_EDGE] = frontLattice[getIdxKer(left, y, back)].adj[DIR_FRONT_RIGHT_EDGE]; backLattice[idx].adj[DIR_TOP_RIGHT_EDGE] = frontLattice[getIdxKer(left, bottom, z)].adj[DIR_TOP_RIGHT_EDGE]; backLattice[idx].adj[DIR_BOTTOM_RIGHT_EDGE] = frontLattice[getIdxKer(left, top, z)].adj[DIR_BOTTOM_RIGHT_EDGE]; backLattice[idx].adj[DIR_TOP_BACK_EDGE] = frontLattice[getIdxKer(x, bottom, front)].adj[DIR_TOP_BACK_EDGE]; backLattice[idx].adj[DIR_BOTTOM_BACK_EDGE] = frontLattice[getIdxKer(x, top, front)].adj[DIR_BOTTOM_BACK_EDGE]; */ backLattice[idx].adj[DIR_LEFT_FACE] += frontLattice[getIdxKer(right, y, z)].adj[DIR_LEFT_FACE]; backLattice[idx].adj[DIR_FRONT_FACE] += frontLattice[getIdxKer(x, y, back)].adj[DIR_FRONT_FACE]; backLattice[idx].adj[DIR_BOTTOM_FACE] += frontLattice[getIdxKer(x, top, z)].adj[DIR_BOTTOM_FACE]; backLattice[idx].adj[DIR_FRONT_LEFT_EDGE] += frontLattice[getIdxKer(right, y, back)].adj[DIR_FRONT_LEFT_EDGE]; backLattice[idx].adj[DIR_BACK_LEFT_EDGE] += frontLattice[getIdxKer(right, y, front)].adj[DIR_BACK_LEFT_EDGE]; backLattice[idx].adj[DIR_BOTTOM_LEFT_EDGE] += frontLattice[getIdxKer(right, top, z)].adj[DIR_BOTTOM_LEFT_EDGE]; backLattice[idx].adj[DIR_TOP_LEFT_EDGE] += frontLattice[getIdxKer(right, bottom, z)].adj[DIR_TOP_LEFT_EDGE]; backLattice[idx].adj[DIR_BOTTOM_FRONT_EDGE] += frontLattice[getIdxKer(x, top, back)].adj[DIR_BOTTOM_FRONT_EDGE]; backLattice[idx].adj[DIR_TOP_FRONT_EDGE] += frontLattice[getIdxKer(x, bottom, back)].adj[DIR_TOP_FRONT_EDGE]; backLattice[idx].adj[DIR_RIGHT_FACE] += frontLattice[getIdxKer(left, y, z)].adj[DIR_RIGHT_FACE]; backLattice[idx].adj[DIR_BACK_FACE] += frontLattice[getIdxKer(x, y, front)].adj[DIR_BACK_FACE]; backLattice[idx].adj[DIR_TOP_FACE] += frontLattice[getIdxKer(x, bottom, z)].adj[DIR_TOP_FACE]; backLattice[idx].adj[DIR_BACK_RIGHT_EDGE] += frontLattice[getIdxKer(left, y, front)].adj[DIR_BACK_RIGHT_EDGE]; backLattice[idx].adj[DIR_FRONT_RIGHT_EDGE] += frontLattice[getIdxKer(left, y, back)].adj[DIR_FRONT_RIGHT_EDGE]; backLattice[idx].adj[DIR_TOP_RIGHT_EDGE] += frontLattice[getIdxKer(left, bottom, z)].adj[DIR_TOP_RIGHT_EDGE]; backLattice[idx].adj[DIR_BOTTOM_RIGHT_EDGE] += frontLattice[getIdxKer(left, top, z)].adj[DIR_BOTTOM_RIGHT_EDGE]; backLattice[idx].adj[DIR_TOP_BACK_EDGE] += frontLattice[getIdxKer(x, bottom, front)].adj[DIR_TOP_BACK_EDGE]; backLattice[idx].adj[DIR_BOTTOM_BACK_EDGE] += frontLattice[getIdxKer(x, top, front)].adj[DIR_BOTTOM_BACK_EDGE]; for (int i = 0; i < 19; i++) { if (backLattice[idx].adj[i] < 0.0f) { backLattice[idx].adj[i] = 0.0f; } else if (backLattice[idx].adj[i] > 1.0f) { backLattice[idx].adj[i] = 1.0f; } } } } //! Initializes the front lattice with default distribution function values. __global__ void initLatticeKernel(Node3D *frontLattice) { int idx = threadIdx.x + blockDim.x * threadIdx.y; // idx in block idx += blockDim.x * blockDim.y * blockIdx.x; //if (idx == 0) { // printf("d_latticeSize = %d\n", d_latticeSize); //} if (idx < d_latticeSize) { frontLattice[idx].adj[DIR_MIDDLE_VERTEX] = WEIGHT_MIDDLE; for (int i = 1; i <= 6; i++) { frontLattice[idx].adj[i] = WEIGHT_AXIS; } for (int i = 7; i <= 18; i++) { frontLattice[idx].adj[i] = WEIGHT_NON_AXIAL; } } } LBM3D_1D_indices::LBM3D_1D_indices() { } LBM3D_1D_indices::LBM3D_1D_indices(VariableManager *vars, ParticleSystem *particleSystem, STLPDiagram *stlpDiagram) : vars(vars), particleSystem(particleSystem), blockDim(blockDim), stlpDiagram(stlpDiagram) { cout << "Creating LBM 3D..." << endl; position = vars->latticePosition; tau = vars->tau; sceneFilename = vars->sceneFilename; blockDim = dim3(vars->blockDim_3D_x, vars->blockDim_3D_y, 1); itau = 1.0f / tau; nu = (2.0f * tau - 1.0f) / 6.0f; heightMap = vars->heightMap; latticeWidth = vars->latticeWidth; latticeHeight = vars->latticeHeight; latticeDepth = vars->latticeDepth; scale = vars->latticeScale; //latticeWidth = heightMap->width; //latticeDepth = heightMap->height; latticeSize = latticeWidth * latticeHeight * latticeDepth; CHECK_ERROR(cudaMalloc((void**)&d_heightMap, sizeof(float) * latticeWidth * latticeDepth)); refreshHeightMap(); //frontLattice = new Node3D[latticeSize](); //backLattice = new Node3D[latticeSize](); //velocities = new glm::vec3[latticeSize](); CHECK_ERROR(cudaMalloc((void**)&d_frontLattice, sizeof(Node3D) * latticeSize)); CHECK_ERROR(cudaMalloc((void**)&d_backLattice, sizeof(Node3D) * latticeSize)); CHECK_ERROR(cudaMalloc((void**)&d_velocities, sizeof(glm::vec3) * latticeSize)); //cudaGraphicsGLRegisterBuffer(&cudaParticleVerticesVBO, particleSystem->vbo, cudaGraphicsMapFlagsWriteDiscard); //CHECK_ERROR(cudaGraphicsGLRegisterBuffer(&cudaParticleColorsVBO, particleSystem->colorsVBO, cudaGraphicsMapFlagsWriteDiscard)); CHECK_ERROR(cudaMemcpyToSymbol(dirVectorsConst, &directionVectors3D[0], 19 * sizeof(glm::vec3))); CHECK_ERROR(cudaMemcpyToSymbol(d_latticeWidth, &latticeWidth, sizeof(int))); CHECK_ERROR(cudaMemcpyToSymbol(d_latticeHeight, &latticeHeight, sizeof(int))); CHECK_ERROR(cudaMemcpyToSymbol(d_latticeDepth, &latticeDepth, sizeof(int))); CHECK_ERROR(cudaMemcpyToSymbol(d_latticeSize, &latticeSize, sizeof(int))); CHECK_ERROR(cudaMemcpyToSymbol(d_tau, &tau, sizeof(float))); CHECK_ERROR(cudaMemcpyToSymbol(d_itau, &itau, sizeof(float))); CHECK_ERROR(cudaMemcpyToSymbol(d_worldSizeRatio, &scale, sizeof(float))); CHECK_ERROR(cudaMemcpyToSymbol(d_position, glm::value_ptr(position), sizeof(glm::vec3))); gridDim = dim3((unsigned int)ceil(latticeSize / (blockDim.x * blockDim.y * blockDim.z)) + 1, 1, 1); cacheSize = blockDim.x * blockDim.y * blockDim.z * sizeof(Node3D); initBuffers(); initLattice(); if (vars->useSoundingWindVelocities) { CHECK_ERROR(cudaMalloc((void**)&d_inletVelocities, sizeof(glm::vec3) * latticeHeight)); vector<glm::vec3> windDeltas; stlpDiagram->getWindDeltasForLattice(latticeHeight, windDeltas); CHECK_ERROR(cudaMemcpy(d_inletVelocities, windDeltas.data(), sizeof(glm::vec3) * latticeHeight, cudaMemcpyHostToDevice)); } //CHECK_ERROR(cudaMemcpy(d_backLattice, backLattice, sizeof(Node3D) * latticeSize, cudaMemcpyHostToDevice)); //CHECK_ERROR(cudaMemcpy(d_velocities, velocities, sizeof(glm::vec3) * latticeSize, cudaMemcpyHostToDevice)); //CHECK_ERROR(cudaMemcpy(d_frontLattice, frontLattice, sizeof(Node3D) * latticeSize, cudaMemcpyHostToDevice)); grid = new GridLBM(this); editGrid = new GridLBM(this, glm::vec3(1.0f, 0.2f, 0.2f)); CHECK_ERROR(cudaPeekAtLastError()); } LBM3D_1D_indices::~LBM3D_1D_indices() { //delete[] frontLattice; //delete[] backLattice; //delete[] velocities; //delete heightMap; CHECK_ERROR(cudaFree(d_frontLattice)); CHECK_ERROR(cudaFree(d_backLattice)); CHECK_ERROR(cudaFree(d_velocities)); //cudaGraphicsUnregisterResource(cudaParticleVerticesVBO); //cudaGraphicsUnregisterResource(cudaParticleColorsVBO); if (grid) { delete grid; } if (editGrid) { delete editGrid; } } void LBM3D_1D_indices::recalculateVariables() { itau = 1.0f / tau; nu = (2.0f * tau - 1.0f) / 6.0f; CHECK_ERROR(cudaMemcpyToSymbol(d_tau, &tau, sizeof(float))); CHECK_ERROR(cudaMemcpyToSymbol(d_itau, &itau, sizeof(float))); } void LBM3D_1D_indices::refreshHeightMap() { float *tempHM = new float[latticeWidth * latticeDepth](); for (int z = 0; z < latticeDepth; z++) { for (int x = 0; x < latticeWidth; x++) { int xidx = vars->terrainXOffset + x; int zidx = vars->terrainZOffset + z; // TESTING xidx = (int)((xidx * scale) + position.x); zidx = (int)((zidx * scale) + position.z); xidx /= (int)heightMap->texelWorldSize; zidx /= (int)heightMap->texelWorldSize; if (xidx < heightMap->width && xidx >= 0 && zidx < heightMap->height && zidx >= 0) { //tempHM[x + z * latticeWidth] = heightMap->data[xidx][zidx]; tempHM[x + z * latticeWidth] = (heightMap->data[xidx + zidx * heightMap->width] - position.y) / scale; } //tempHM[x + z * latticeWidth] = heightMap->data[x][z]; } } CHECK_ERROR(cudaMemcpy(d_heightMap, tempHM, sizeof(float) * latticeWidth * latticeDepth, cudaMemcpyHostToDevice)); delete[] tempHM; } void LBM3D_1D_indices::startEditing() { editing = true; saveState(); } void LBM3D_1D_indices::stopEditing(bool saveChanges) { editing = false; if (saveChanges) { this->saveChanges(); } else { resetChanges(); } } void LBM3D_1D_indices::saveChanges() { CHECK_ERROR(cudaMemcpyToSymbol(d_worldSizeRatio, &scale, sizeof(float))); CHECK_ERROR(cudaMemcpyToSymbol(d_position, glm::value_ptr(position), sizeof(glm::vec3))); refreshHeightMap(); CHECK_ERROR(cudaGetLastError()); } void LBM3D_1D_indices::resetChanges() { resetToPrevState(); } bool LBM3D_1D_indices::isUnderEdit() { return editing; } void LBM3D_1D_indices::draw() { grid->draw(); if (editing) { editGrid->draw(getPrevStateModelMatrix()); } } void LBM3D_1D_indices::draw(ShaderProgram & shader) { #ifdef DRAW_VELOCITY_ARROWS shader.setVec3("u_Color", glm::vec3(0.2f, 0.3f, 1.0f)); glBindVertexArray(velocityVAO); glBindBuffer(GL_ARRAY_BUFFER, velocityVBO); glBufferData(GL_ARRAY_BUFFER, sizeof(glm::vec3) * velocityArrows.size(), &velocityArrows[0], GL_STATIC_DRAW); glDrawArrays(GL_LINES, 0, velocityArrows.size()); #endif #ifdef DRAW_PARTICLE_VELOCITY_ARROWS shader.setVec3("u_Color", glm::vec3(0.8f, 1.0f, 0.6f)); glBindVertexArray(particleArrowsVAO); glBindBuffer(GL_ARRAY_BUFFER, particleArrowsVBO); glBufferData(GL_ARRAY_BUFFER, sizeof(glm::vec3) * particleArrows.size(), &particleArrows[0], GL_STATIC_DRAW); glDrawArrays(GL_LINES, 0, particleArrows.size()); #endif heightMap->draw(); } void LBM3D_1D_indices::doStep() { clearBackLattice(); updateInlets(); streamingStep(); updateColliders(); collisionStep(); moveParticles(); swapLattices(); } void LBM3D_1D_indices::doStepCUDA() { CHECK_ERROR(cudaPeekAtLastError()); // ============================================= clear back lattice CUDA clearBackLatticeKernel << <gridDim, blockDim >> > (d_backLattice); //cudaDeviceSynchronize(); // FOR FINDING ERROR - TESTING! CHECK_ERROR(cudaPeekAtLastError()); // ============================================= update inlets CUDA //updateInletsKernel << <gridDim, blockDim >> > (d_backLattice, d_velocities, inletVelocity); updateInletsKernel << <gridDim, blockDim >> > (d_backLattice, d_velocities, inletVelocity, d_inletVelocities, xLeftInlet, xRightInlet, yBottomInlet, yTopInlet, zLeftInlet, zRightInlet); //cudaDeviceSynchronize(); // FOR FINDING ERROR - TESTING! CHECK_ERROR(cudaPeekAtLastError()); // ============================================= streaming step CUDA streamingStepKernel << <gridDim, blockDim >> > (d_backLattice, d_frontLattice); //cudaDeviceSynchronize(); // FOR FINDING ERROR - TESTING! CHECK_ERROR(cudaPeekAtLastError()); // ============================================= update colliders CUDA updateCollidersKernel << <gridDim, blockDim >> > (d_backLattice, d_velocities, d_heightMap); //cudaDeviceSynchronize(); // FOR FINDING ERROR - TESTING! CHECK_ERROR(cudaPeekAtLastError()); // ============================================= collision step CUDA //collisionStepKernel << <gridDim, blockDim >> > (d_backLattice, d_velocities, vars->useSubgridModel); //collisionStepKernelShared << <gridDim, blockDim, cacheSize >> > (d_backLattice, d_velocities, vars->useSubgridModel); if (vars->lbmUseExtendedCollisionStep) { collisionStepKernelSharedNewReorganizedExtended << <gridDim, blockDim, cacheSize >> > (d_backLattice, d_velocities, vars->useSubgridModel); } else { collisionStepKernelSharedNewReorganized << <gridDim, blockDim, cacheSize >> > (d_backLattice, d_velocities, vars->useSubgridModel); } //collisionStepKernelStreamlinedShared << <gridDim, blockDim, cacheSize >> > (d_backLattice, d_velocities); //cudaDeviceSynchronize(); // FOR FINDING ERROR - TESTING! CHECK_ERROR(cudaPeekAtLastError()); // ============================================= move particles CUDA - different respawn from CPU !!! glm::vec3 *d_particleVerticesVBO; CHECK_ERROR(cudaGraphicsMapResources(1, &particleSystem->cudaParticleVerticesVBO, 0)); size_t num_bytes; CHECK_ERROR(cudaGraphicsResourceGetMappedPointer((void **)&d_particleVerticesVBO, &num_bytes, particleSystem->cudaParticleVerticesVBO)); //printf("CUDA-LBM mapped VBO: May access %ld bytes\n", num_bytes); /* glm::vec3 *d_particleColorsVBO; cudaGraphicsMapResources(1, &cudaParticleColorsVBO, 0); cudaGraphicsResourceGetMappedPointer((void **)&d_particleColorsVBO, &num_bytes, cudaParticleColorsVBO); */ //cudaDeviceSynchronize(); // FOR FINDING ERROR - TESTING! CHECK_ERROR(cudaPeekAtLastError()); //moveParticlesKernelInterop << <gridDim, blockDim >> > (d_particleVerticesVBO, d_velocities, /*d_numParticles*/particleSystem->numActiveParticles, nullptr, respawnMode, outOfBoundsMode); //moveParticlesKernelInteropNew << <gridDim, blockDim >> > (d_particleVerticesVBO, d_velocities, /*d_numParticles*/particleSystem->numActiveParticles, nullptr, respawnMode, outOfBoundsMode, vars->lbmVelocityMultiplier, (bool)vars->lbmUseCorrectInterpolation); moveParticlesKernelInteropNew2 << <gridDim, blockDim >> > (d_particleVerticesVBO, d_velocities, /*d_numParticles*/particleSystem->numActiveParticles, nullptr, respawnMode, outOfBoundsMode, vars->lbmVelocityMultiplier, vars->lbmUseCorrectInterpolation != 0); //cudaDeviceSynchronize(); // FOR FINDING ERROR - TESTING! CHECK_ERROR(cudaPeekAtLastError()); CHECK_ERROR(cudaGraphicsUnmapResources(1, &particleSystem->cudaParticleVerticesVBO, 0)); if (streamlineParticleSystem->active) { streamlineParticleSystem->frameCounter++; glm::vec3 *d_streamlinesVBO; CHECK_ERROR(cudaGraphicsMapResources(1, &streamlineParticleSystem->cudaStreamlinesVBO, 0)); size_t num_bytes; CHECK_ERROR(cudaGraphicsResourceGetMappedPointer((void **)&d_streamlinesVBO, &num_bytes, streamlineParticleSystem->cudaStreamlinesVBO)); CHECK_ERROR(cudaPeekAtLastError()); moveStreamlineParticlesKernel << <gridDim, blockDim >> > (d_streamlinesVBO, d_velocities, streamlineParticleSystem->d_currActiveVertices, streamlineParticleSystem->maxStreamlineLength, streamlineParticleSystem->maxNumStreamlines, respawnMode, outOfBoundsMode, vars->lbmVelocityMultiplier, vars->lbmUseCorrectInterpolation != 0); CHECK_ERROR(cudaPeekAtLastError()); cudaGraphicsUnmapResources(1, &streamlineParticleSystem->cudaStreamlinesVBO, 0); CHECK_ERROR(cudaPeekAtLastError()); streamlineParticleSystem->update(); if (streamlineParticleSystem->frameCounter >= streamlineParticleSystem->maxStreamlineLength) { // we should be finished with creating the streamlines streamlineParticleSystem->deactivate(); } } /* cudaGraphicsUnmapResources(1, &cudaParticleColorsVBO, 0); */ //CHECK_ERROR(cudaPeekAtLastError()); swapLattices(); //CHECK_ERROR(cudaPeekAtLastError()); frameId++; //cudaDeviceSynchronize(); // FOR FINDING ERROR - TESTING! CHECK_ERROR(cudaPeekAtLastError()); } void LBM3D_1D_indices::clearBackLattice() { for (int i = 0; i < latticeSize; i++) { for (int j = 0; j < 19; j++) { backLattice[i].adj[j] = 0.0f; } } #ifdef DRAW_VELOCITY_ARROWS velocityArrows.clear(); #endif #ifdef DRAW_PARTICLE_VELOCITY_ARROWS particleArrows.clear(); #endif } void LBM3D_1D_indices::streamingStep() { for (int x = 0; x < latticeWidth; x++) { for (int y = 0; y < latticeHeight; y++) { for (int z = 0; z < latticeDepth; z++) { int idx = getIdx(x, y, z); backLattice[idx].adj[DIR_MIDDLE_VERTEX] += frontLattice[idx].adj[DIR_MIDDLE_VERTEX]; int right; int left; int top; int bottom; int front; int back; right = x + 1; left = x - 1; top = y + 1; bottom = y - 1; front = z + 1; back = z - 1; if (right > latticeWidth - 1) { right = latticeWidth - 1; } if (left < 0) { left = 0; } if (top > latticeHeight - 1) { top = latticeHeight - 1; } if (bottom < 0) { bottom = 0; } if (front > latticeDepth - 1) { front = latticeDepth - 1; } if (back < 0) { back = 0; } backLattice[idx].adj[DIR_LEFT_FACE] += frontLattice[getIdx(right, y, z)].adj[DIR_LEFT_FACE]; backLattice[idx].adj[DIR_FRONT_FACE] += frontLattice[getIdx(x, y, back)].adj[DIR_FRONT_FACE]; backLattice[idx].adj[DIR_BOTTOM_FACE] += frontLattice[getIdx(x, top, z)].adj[DIR_BOTTOM_FACE]; backLattice[idx].adj[DIR_FRONT_LEFT_EDGE] += frontLattice[getIdx(right, y, back)].adj[DIR_FRONT_LEFT_EDGE]; backLattice[idx].adj[DIR_BACK_LEFT_EDGE] += frontLattice[getIdx(right, y, front)].adj[DIR_BACK_LEFT_EDGE]; backLattice[idx].adj[DIR_BOTTOM_LEFT_EDGE] += frontLattice[getIdx(right, top, z)].adj[DIR_BOTTOM_LEFT_EDGE]; backLattice[idx].adj[DIR_TOP_LEFT_EDGE] += frontLattice[getIdx(right, bottom, z)].adj[DIR_TOP_LEFT_EDGE]; backLattice[idx].adj[DIR_BOTTOM_FRONT_EDGE] += frontLattice[getIdx(x, top, back)].adj[DIR_BOTTOM_FRONT_EDGE]; backLattice[idx].adj[DIR_TOP_FRONT_EDGE] += frontLattice[getIdx(x, bottom, back)].adj[DIR_TOP_FRONT_EDGE]; backLattice[idx].adj[DIR_RIGHT_FACE] += frontLattice[getIdx(left, y, z)].adj[DIR_RIGHT_FACE]; backLattice[idx].adj[DIR_BACK_FACE] += frontLattice[getIdx(x, y, front)].adj[DIR_BACK_FACE]; backLattice[idx].adj[DIR_TOP_FACE] += frontLattice[getIdx(x, bottom, z)].adj[DIR_TOP_FACE]; backLattice[idx].adj[DIR_BACK_RIGHT_EDGE] += frontLattice[getIdx(left, y, front)].adj[DIR_BACK_RIGHT_EDGE]; backLattice[idx].adj[DIR_FRONT_RIGHT_EDGE] += frontLattice[getIdx(left, y, back)].adj[DIR_FRONT_RIGHT_EDGE]; backLattice[idx].adj[DIR_TOP_RIGHT_EDGE] += frontLattice[getIdx(left, bottom, z)].adj[DIR_TOP_RIGHT_EDGE]; backLattice[idx].adj[DIR_BOTTOM_RIGHT_EDGE] += frontLattice[getIdx(left, top, z)].adj[DIR_BOTTOM_RIGHT_EDGE]; backLattice[idx].adj[DIR_TOP_BACK_EDGE] += frontLattice[getIdx(x, bottom, front)].adj[DIR_TOP_BACK_EDGE]; backLattice[idx].adj[DIR_BOTTOM_BACK_EDGE] += frontLattice[getIdx(x, top, front)].adj[DIR_BOTTOM_BACK_EDGE]; for (int i = 0; i < 19; i++) { if (backLattice[idx].adj[i] < 0.0f) { backLattice[idx].adj[i] = 0.0f; } else if (backLattice[idx].adj[i] > 1.0f) { backLattice[idx].adj[i] = 1.0f; } } } } } } void LBM3D_1D_indices::collisionStep() { for (int x = 0; x < latticeWidth; x++) { for (int y = 0; y < latticeHeight; y++) { for (int z = 0; z < latticeDepth; z++) { int idx = getIdx(x, y, z); float macroDensity = calculateMacroscopicDensity(x, y, z); glm::vec3 macroVelocity = calculateMacroscopicVelocity(x, y, z, macroDensity); velocities[idx] = macroVelocity; #ifdef DRAW_VELOCITY_ARROWS velocityArrows.push_back(glm::vec3(x, y, z)); velocityArrows.push_back(glm::vec3(x, y, z) + velocities[idx] * 2.0f); #endif float leftTermMiddle = WEIGHT_MIDDLE * macroDensity; float leftTermAxis = WEIGHT_AXIS * macroDensity; float leftTermNonaxial = WEIGHT_NON_AXIAL * macroDensity; float macroVelocityDot = glm::dot(macroVelocity, macroVelocity); float thirdTerm = 1.5f * macroVelocityDot; float middleEq = leftTermMiddle + leftTermMiddle * (-thirdTerm); float dotProd = glm::dot(vRight, macroVelocity); float firstTerm = 3.0f * dotProd; float secondTerm = 4.5f * dotProd * dotProd; float rightEq = leftTermAxis + leftTermAxis * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(vLeft, macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float leftEq = leftTermAxis + leftTermAxis * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(vFront, macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float frontEq = leftTermAxis + leftTermAxis * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(vBack, macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float backEq = leftTermAxis + leftTermAxis * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(vTop, macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float topEq = leftTermAxis + leftTermAxis * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(vBottom, macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float bottomEq = leftTermAxis + leftTermAxis * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(vBackRight, macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float backRightEq = leftTermNonaxial + leftTermNonaxial * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(vBackLeft, macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float backLeftEq = leftTermNonaxial + leftTermNonaxial * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(vFrontRight, macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float frontRightEq = leftTermNonaxial + leftTermNonaxial * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(vFrontLeft, macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float frontLeftEq = leftTermNonaxial + leftTermNonaxial * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(vTopBack, macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float topBackEq = leftTermNonaxial + leftTermNonaxial * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(vTopFront, macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float topFrontEq = leftTermNonaxial + leftTermNonaxial * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(vBottomBack, macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float bottomBackEq = leftTermNonaxial + leftTermNonaxial * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(vBottomFront, macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float bottomFrontEq = leftTermNonaxial + leftTermNonaxial * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(vTopRight, macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float topRightEq = leftTermNonaxial + leftTermNonaxial * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(vTopLeft, macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float topLeftEq = leftTermNonaxial + leftTermNonaxial * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(vBottomRight, macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float bottomRightEq = leftTermNonaxial + leftTermNonaxial * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(vBottomLeft, macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float bottomLeftEq = leftTermNonaxial + leftTermNonaxial * (firstTerm + secondTerm - thirdTerm); if (useSubgridModel) { // SUBGRID MODEL - EXPERIMENTAL - GIVES INCORRECT VALUES float tensor[3][3]; float diffs[19]; diffs[0] = (backLattice[idx].adj[DIR_MIDDLE_VERTEX] - middleEq); diffs[1] = (backLattice[idx].adj[DIR_RIGHT_FACE] - rightEq); diffs[2] = (backLattice[idx].adj[DIR_LEFT_FACE] - leftEq); diffs[3] = (backLattice[idx].adj[DIR_BACK_FACE] - backEq); diffs[4] = (backLattice[idx].adj[DIR_FRONT_FACE] - frontEq); diffs[5] = (backLattice[idx].adj[DIR_TOP_FACE] - topEq); diffs[6] = (backLattice[idx].adj[DIR_BOTTOM_FACE] - bottomEq); diffs[7] = (backLattice[idx].adj[DIR_BACK_RIGHT_EDGE] - backRightEq); diffs[8] = (backLattice[idx].adj[DIR_BACK_LEFT_EDGE] - backLeftEq); diffs[9] = (backLattice[idx].adj[DIR_FRONT_RIGHT_EDGE] - frontRightEq); diffs[10] = (backLattice[idx].adj[DIR_FRONT_LEFT_EDGE] - frontLeftEq); diffs[11] = (backLattice[idx].adj[DIR_TOP_BACK_EDGE] - topBackEq); diffs[12] = (backLattice[idx].adj[DIR_TOP_FRONT_EDGE] - topFrontEq); diffs[13] = (backLattice[idx].adj[DIR_BOTTOM_BACK_EDGE] - bottomBackEq); diffs[14] = (backLattice[idx].adj[DIR_BOTTOM_FRONT_EDGE] - bottomFrontEq); diffs[15] = (backLattice[idx].adj[DIR_TOP_RIGHT_EDGE] - topRightEq); diffs[16] = (backLattice[idx].adj[DIR_TOP_LEFT_EDGE] - topLeftEq); diffs[17] = (backLattice[idx].adj[DIR_BOTTOM_RIGHT_EDGE] - bottomRightEq); diffs[18] = (backLattice[idx].adj[DIR_BOTTOM_LEFT_EDGE] - bottomLeftEq); float sum = 0.0f; for (int i = 0; i < 19; i++) { sum += diffs[i]; } for (int i = 0; i < 9; i++) { tensor[0][0] = 0.0f; } for (int i = 0; i < 19; i++) { tensor[0][0] += directionVectors3D[i].x * directionVectors3D[i].x * diffs[i]; tensor[0][1] += directionVectors3D[i].x * directionVectors3D[i].y * diffs[i]; tensor[0][2] += directionVectors3D[i].x * directionVectors3D[i].z * diffs[i]; tensor[1][0] += directionVectors3D[i].y * directionVectors3D[i].x * diffs[i]; tensor[1][1] += directionVectors3D[i].y * directionVectors3D[i].y * diffs[i]; tensor[1][2] += directionVectors3D[i].y * directionVectors3D[i].z * diffs[i]; tensor[2][0] += directionVectors3D[i].z * directionVectors3D[i].x * diffs[i]; tensor[2][1] += directionVectors3D[i].z * directionVectors3D[i].y * diffs[i]; tensor[2][2] += directionVectors3D[i].z * directionVectors3D[i].z * diffs[i]; } sum = 0.0f; for (int i = 0; i < 3; i++) { for (int j = 0; j < 3; j++) { sum += tensor[i][j] * tensor[i][j]; } } float S = (-nu + sqrtf(nu * nu + 18.0f * SMAG_C * sqrtf(sum))) / (6.0f * SMAG_C * SMAG_C); tau = 3.0f * (nu + SMAG_C * SMAG_C * S) + 0.5f; itau = 1.0f / tau; } backLattice[idx].adj[DIR_MIDDLE_VERTEX] -= itau * (backLattice[idx].adj[DIR_MIDDLE_VERTEX] - middleEq); backLattice[idx].adj[DIR_RIGHT_FACE] -= itau * (backLattice[idx].adj[DIR_RIGHT_FACE] - rightEq); backLattice[idx].adj[DIR_LEFT_FACE] -= itau * (backLattice[idx].adj[DIR_LEFT_FACE] - leftEq); backLattice[idx].adj[DIR_BACK_FACE] -= itau * (backLattice[idx].adj[DIR_BACK_FACE] - backEq); backLattice[idx].adj[DIR_FRONT_FACE] -= itau * (backLattice[idx].adj[DIR_FRONT_FACE] - frontEq); backLattice[idx].adj[DIR_TOP_FACE] -= itau * (backLattice[idx].adj[DIR_TOP_FACE] - topEq); backLattice[idx].adj[DIR_BOTTOM_FACE] -= itau * (backLattice[idx].adj[DIR_BOTTOM_FACE] - bottomEq); backLattice[idx].adj[DIR_BACK_RIGHT_EDGE] -= itau * (backLattice[idx].adj[DIR_BACK_RIGHT_EDGE] - backRightEq); backLattice[idx].adj[DIR_BACK_LEFT_EDGE] -= itau * (backLattice[idx].adj[DIR_BACK_LEFT_EDGE] - backLeftEq); backLattice[idx].adj[DIR_FRONT_RIGHT_EDGE] -= itau * (backLattice[idx].adj[DIR_FRONT_RIGHT_EDGE] - frontRightEq); backLattice[idx].adj[DIR_FRONT_LEFT_EDGE] -= itau * (backLattice[idx].adj[DIR_FRONT_LEFT_EDGE] - frontLeftEq); backLattice[idx].adj[DIR_TOP_BACK_EDGE] -= itau * (backLattice[idx].adj[DIR_TOP_BACK_EDGE] - topBackEq); backLattice[idx].adj[DIR_TOP_FRONT_EDGE] -= itau * (backLattice[idx].adj[DIR_TOP_FRONT_EDGE] - topFrontEq); backLattice[idx].adj[DIR_BOTTOM_BACK_EDGE] -= itau * (backLattice[idx].adj[DIR_BOTTOM_BACK_EDGE] - bottomBackEq); backLattice[idx].adj[DIR_BOTTOM_FRONT_EDGE] -= itau * (backLattice[idx].adj[DIR_BOTTOM_FRONT_EDGE] - bottomFrontEq); backLattice[idx].adj[DIR_TOP_RIGHT_EDGE] -= itau * (backLattice[idx].adj[DIR_TOP_RIGHT_EDGE] - topRightEq); backLattice[idx].adj[DIR_TOP_LEFT_EDGE] -= itau * (backLattice[idx].adj[DIR_TOP_LEFT_EDGE] - topLeftEq); backLattice[idx].adj[DIR_BOTTOM_RIGHT_EDGE] -= itau * (backLattice[idx].adj[DIR_BOTTOM_RIGHT_EDGE] - bottomRightEq); backLattice[idx].adj[DIR_BOTTOM_LEFT_EDGE] -= itau * (backLattice[idx].adj[DIR_BOTTOM_LEFT_EDGE] - bottomLeftEq); for (int i = 0; i < 19; i++) { if (backLattice[idx].adj[i] < 0.0f) { backLattice[idx].adj[i] = 0.0f; } else if (backLattice[idx].adj[i] > 1.0f) { backLattice[idx].adj[i] = 1.0f; } } } } } } void LBM3D_1D_indices::moveParticles() { glm::vec3 adjVelocities[8]; for (int i = 0; i < particleSystemLBM->numParticles; i++) { float x = particleVertices[i].x; float y = particleVertices[i].y; float z = particleVertices[i].z; int leftX = (int)x; int rightX = leftX + 1; int bottomY = (int)y; int topY = bottomY + 1; int backZ = (int)z; int frontZ = backZ + 1; adjVelocities[0] = velocities[getIdx(leftX, topY, backZ)]; adjVelocities[1] = velocities[getIdx(rightX, topY, backZ)]; adjVelocities[2] = velocities[getIdx(leftX, bottomY, backZ)]; adjVelocities[3] = velocities[getIdx(rightX, bottomY, backZ)]; adjVelocities[4] = velocities[getIdx(leftX, topY, frontZ)]; adjVelocities[5] = velocities[getIdx(rightX, topY, frontZ)]; adjVelocities[6] = velocities[getIdx(leftX, bottomY, frontZ)]; adjVelocities[7] = velocities[getIdx(rightX, bottomY, frontZ)]; float horizontalRatio = x - leftX; float verticalRatio = y - bottomY; float depthRatio = z - backZ; glm::vec3 topBackVelocity = adjVelocities[0] * horizontalRatio + adjVelocities[1] * (1.0f - horizontalRatio); glm::vec3 bottomBackVelocity = adjVelocities[2] * horizontalRatio + adjVelocities[3] * (1.0f - horizontalRatio); glm::vec3 backVelocity = bottomBackVelocity * verticalRatio + topBackVelocity * (1.0f - verticalRatio); glm::vec3 topFrontVelocity = adjVelocities[4] * horizontalRatio + adjVelocities[5] * (1.0f - horizontalRatio); glm::vec3 bottomFrontVelocity = adjVelocities[6] * horizontalRatio + adjVelocities[7] * (1.0f - horizontalRatio); glm::vec3 frontVelocity = bottomFrontVelocity * verticalRatio + topFrontVelocity * (1.0f - verticalRatio); glm::vec3 finalVelocity = backVelocity * depthRatio + frontVelocity * (1.0f - depthRatio); #ifdef DRAW_PARTICLE_VELOCITY_ARROWS particleArrows.push_back(particleVertices[i]); #endif particleVertices[i] += finalVelocity; #ifdef DRAW_PARTICLE_VELOCITY_ARROWS glm::vec3 tmp = particleVertices[i] + 10.0f * finalVelocity; particleArrows.push_back(tmp); #endif if (!respawnLinearly) { if (particleVertices[i].x <= 0.0f || particleVertices[i].x >= latticeWidth - 1 || particleVertices[i].y <= 0.0f || particleVertices[i].y >= latticeHeight - 1 || particleVertices[i].z <= 0.0f || particleVertices[i].z >= latticeDepth - 1) { particleVertices[i].x = 0.0f; particleVertices[i].y = rand(i, (int)y) * (latticeHeight - 1); particleVertices[i].z = rand(i, (int)z) * (latticeDepth - 1); //particleVertices[i].y = std::rand() % (latticeHeight - 1); //particleVertices[i].z = std::rand() % (latticeDepth - 1); } } else { if (particleVertices[i].x <= 0.0f || particleVertices[i].x >= latticeWidth - 1 || particleVertices[i].y <= 0.0f || particleVertices[i].y >= latticeHeight - 1 || particleVertices[i].z <= 0.0f || particleVertices[i].z >= latticeDepth - 1) { particleVertices[i] = glm::vec3(0.0f, respawnY, respawnZ++); if (respawnZ >= latticeDepth - 1) { respawnZ = 0; respawnY++; } if (respawnY >= latticeHeight - 1) { respawnY = 0; } } } } } void LBM3D_1D_indices::updateInlets() { float macroDensity = 1.0f; glm::vec3 macroVelocity = inletVelocity; float leftTermMiddle = WEIGHT_MIDDLE * macroDensity; float leftTermAxis = WEIGHT_AXIS * macroDensity; float leftTermNonaxial = WEIGHT_NON_AXIAL * macroDensity; float macroVelocityDot = glm::dot(macroVelocity, macroVelocity); float thirdTerm = 1.5f * macroVelocityDot; float middleEq = leftTermMiddle + leftTermMiddle * (-thirdTerm); float dotProd = glm::dot(vRight, macroVelocity); float firstTerm = 3.0f * dotProd; float secondTerm = 4.5f * dotProd * dotProd; float rightEq = leftTermAxis + leftTermAxis * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(vLeft, macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float leftEq = leftTermAxis + leftTermAxis * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(vFront, macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float frontEq = leftTermAxis + leftTermAxis * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(vBack, macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float backEq = leftTermAxis + leftTermAxis * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(vTop, macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float topEq = leftTermAxis + leftTermAxis * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(vBottom, macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float bottomEq = leftTermAxis + leftTermAxis * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(vBackRight, macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float backRightEq = leftTermNonaxial + leftTermNonaxial * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(vBackLeft, macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float backLeftEq = leftTermNonaxial + leftTermNonaxial * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(vFrontRight, macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float frontRightEq = leftTermNonaxial + leftTermNonaxial * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(vFrontLeft, macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float frontLeftEq = leftTermNonaxial + leftTermNonaxial * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(vTopBack, macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float topBackEq = leftTermNonaxial + leftTermNonaxial * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(vTopFront, macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float topFrontEq = leftTermNonaxial + leftTermNonaxial * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(vBottomBack, macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float bottomBackEq = leftTermNonaxial + leftTermNonaxial * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(vBottomFront, macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float bottomFrontEq = leftTermNonaxial + leftTermNonaxial * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(vTopRight, macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float topRightEq = leftTermNonaxial + leftTermNonaxial * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(vTopLeft, macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float topLeftEq = leftTermNonaxial + leftTermNonaxial * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(vBottomRight, macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float bottomRightEq = leftTermNonaxial + leftTermNonaxial * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(vBottomLeft, macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float bottomLeftEq = leftTermNonaxial + leftTermNonaxial * (firstTerm + secondTerm - thirdTerm); for (int z = 0; z < latticeDepth; z++) { for (int y = 0; y < latticeHeight; y++) { int idx = getIdx(0, y, z); backLattice[idx].adj[DIR_MIDDLE_VERTEX] = middleEq; backLattice[idx].adj[DIR_RIGHT_FACE] = rightEq; backLattice[idx].adj[DIR_LEFT_FACE] = leftEq; backLattice[idx].adj[DIR_BACK_FACE] = backEq; backLattice[idx].adj[DIR_FRONT_FACE] = frontEq; backLattice[idx].adj[DIR_TOP_FACE] = topEq; backLattice[idx].adj[DIR_BOTTOM_FACE] = bottomEq; backLattice[idx].adj[DIR_BACK_RIGHT_EDGE] = backRightEq; backLattice[idx].adj[DIR_BACK_LEFT_EDGE] = backLeftEq; backLattice[idx].adj[DIR_FRONT_RIGHT_EDGE] = frontRightEq; backLattice[idx].adj[DIR_FRONT_LEFT_EDGE] = frontLeftEq; backLattice[idx].adj[DIR_TOP_BACK_EDGE] = topBackEq; backLattice[idx].adj[DIR_TOP_FRONT_EDGE] = topFrontEq; backLattice[idx].adj[DIR_BOTTOM_BACK_EDGE] = bottomBackEq; backLattice[idx].adj[DIR_BOTTOM_FRONT_EDGE] = bottomFrontEq; backLattice[idx].adj[DIR_TOP_RIGHT_EDGE] = topRightEq; backLattice[idx].adj[DIR_TOP_LEFT_EDGE] = topLeftEq; backLattice[idx].adj[DIR_BOTTOM_RIGHT_EDGE] = bottomRightEq; backLattice[idx].adj[DIR_BOTTOM_LEFT_EDGE] = bottomLeftEq; for (int i = 0; i < 19; i++) { if (backLattice[idx].adj[i] < 0.0f) { backLattice[idx].adj[i] = 0.0f; } else if (backLattice[idx].adj[i] > 1.0f) { backLattice[idx].adj[i] = 1.0f; } } } } } void LBM3D_1D_indices::updateColliders() { for (int x = 0; x < latticeWidth; x++) { for (int y = 0; y < latticeHeight; y++) { for (int z = 0; z < latticeDepth; z++) { int idx = getIdx(x, y, z); if ((heightMap->data[x + z * heightMap->width] >= y && heightMap->data[x + z * heightMap->width] > 0.01f) || y == 0) { float right = backLattice[idx].adj[DIR_RIGHT_FACE]; float left = backLattice[idx].adj[DIR_LEFT_FACE]; float back = backLattice[idx].adj[DIR_BACK_FACE]; float front = backLattice[idx].adj[DIR_FRONT_FACE]; float top = backLattice[idx].adj[DIR_TOP_FACE]; float bottom = backLattice[idx].adj[DIR_BOTTOM_FACE]; float backRight = backLattice[idx].adj[DIR_BACK_RIGHT_EDGE]; float backLeft = backLattice[idx].adj[DIR_BACK_LEFT_EDGE]; float frontRight = backLattice[idx].adj[DIR_FRONT_RIGHT_EDGE]; float frontLeft = backLattice[idx].adj[DIR_FRONT_LEFT_EDGE]; float topBack = backLattice[idx].adj[DIR_TOP_BACK_EDGE]; float topFront = backLattice[idx].adj[DIR_TOP_FRONT_EDGE]; float bottomBack = backLattice[idx].adj[DIR_BOTTOM_BACK_EDGE]; float bottomFront = backLattice[idx].adj[DIR_BOTTOM_FRONT_EDGE]; float topRight = backLattice[idx].adj[DIR_TOP_RIGHT_EDGE]; float topLeft = backLattice[idx].adj[DIR_TOP_LEFT_EDGE]; float bottomRight = backLattice[idx].adj[DIR_BOTTOM_RIGHT_EDGE]; float bottomLeft = backLattice[idx].adj[DIR_BOTTOM_LEFT_EDGE]; backLattice[idx].adj[DIR_RIGHT_FACE] = left; backLattice[idx].adj[DIR_LEFT_FACE] = right; backLattice[idx].adj[DIR_BACK_FACE] = front; backLattice[idx].adj[DIR_FRONT_FACE] = back; backLattice[idx].adj[DIR_TOP_FACE] = bottom; backLattice[idx].adj[DIR_BOTTOM_FACE] = top; backLattice[idx].adj[DIR_BACK_RIGHT_EDGE] = frontLeft; backLattice[idx].adj[DIR_BACK_LEFT_EDGE] = frontRight; backLattice[idx].adj[DIR_FRONT_RIGHT_EDGE] = backLeft; backLattice[idx].adj[DIR_FRONT_LEFT_EDGE] = backRight; backLattice[idx].adj[DIR_TOP_BACK_EDGE] = bottomFront; backLattice[idx].adj[DIR_TOP_FRONT_EDGE] = bottomBack; backLattice[idx].adj[DIR_BOTTOM_BACK_EDGE] = topFront; backLattice[idx].adj[DIR_BOTTOM_FRONT_EDGE] = topBack; backLattice[idx].adj[DIR_TOP_RIGHT_EDGE] = bottomLeft; backLattice[idx].adj[DIR_TOP_LEFT_EDGE] = bottomRight; backLattice[idx].adj[DIR_BOTTOM_RIGHT_EDGE] = topLeft; backLattice[idx].adj[DIR_BOTTOM_LEFT_EDGE] = topRight; } } } } } void LBM3D_1D_indices::resetSimulation() { cout << "Resetting simulation..." << endl; //particleSystemLBM->initParticlePositions(latticeWidth, latticeHeight, latticeDepth, heightMap); /* for (int i = 0; i < latticeWidth * latticeHeight; i++) { for (int j = 0; j < 19; j++) { backLattice[i].adj[j] = 0.0f; } velocities[i] = glm::vec3(0.0f); } initLattice(); */ /* cudaMemcpy(d_frontLattice, frontLattice, sizeof(Node3D) * latticeSize, cudaMemcpyHostToDevice); cudaMemcpy(d_backLattice, backLattice, sizeof(Node3D) * latticeSize, cudaMemcpyHostToDevice); cudaMemcpy(d_velocities, velocities, sizeof(glm::vec3) * latticeSize, cudaMemcpyHostToDevice); */ initLattice(); } void LBM3D_1D_indices::synchronize() { cudaDeviceSynchronize(); } float LBM3D_1D_indices::getWorldWidth() { return scale * latticeWidth; } float LBM3D_1D_indices::getWorldHeight() { return scale * latticeHeight; } float LBM3D_1D_indices::getWorldDepth() { return scale * latticeDepth; } void LBM3D_1D_indices::snapToGround() { if (!heightMap) { return; } float ww = getWorldWidth(); float wd = getWorldDepth(); float miny = heightMap->getHeight(position.x, position.z); miny = min(miny, heightMap->getHeight(position.x + ww, position.z)); miny = min(miny, heightMap->getHeight(position.x + ww, position.z + wd)); miny = min(miny, heightMap->getHeight(position.x, position.z + wd)); position.y = miny; } const char * LBM3D_1D_indices::getRespawnModeString(int mode) { switch (mode) { case eRespawnMode::CYCLE_ALL: return "Cycle All (x, y, z)"; case eRespawnMode::CYCLE_XZ: return "Cycle x and z"; case eRespawnMode::RANDOM_UNIFORM: return "Random (Uniform)"; default: return "None"; } } glm::mat4 LBM3D_1D_indices::getModelMatrix() { glm::mat4 model(1.0f); model = glm::translate(model, position); model = glm::scale(model, glm::vec3(scale)); return model; } glm::mat4 LBM3D_1D_indices::getPrevStateModelMatrix() { glm::mat4 model(1.0f); model = glm::translate(model, prevState.position); model = glm::scale(model, glm::vec3(prevState.scale)); return model; } void LBM3D_1D_indices::mapVBO(GLuint VBO) { //res = cudaParticleVerticesVBO; //cudaParticleColorsVBO = res; //cout << "CUDA mapping VBO" << endl; CHECK_ERROR(cudaGraphicsGLRegisterBuffer(&cudaParticleVerticesVBO, VBO, cudaGraphicsMapFlagsWriteDiscard)); // returns out of memory error (even though it shouldn't according to documentation } void LBM3D_1D_indices::initBuffers() { #ifdef DRAW_VELOCITY_ARROWS // Velocity arrows glGenVertexArrays(1, &velocityVAO); glBindVertexArray(velocityVAO); glGenBuffers(1, &velocityVBO); glBindBuffer(GL_ARRAY_BUFFER, velocityVBO); glEnableVertexAttribArray(0); glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, sizeof(glm::vec3), (void *)0); glBindVertexArray(0); #endif #ifdef DRAW_PARTICLE_VELOCITY_ARROWS // Particle arrows glGenVertexArrays(1, &particleArrowsVAO); glBindVertexArray(particleArrowsVAO); glGenBuffers(1, &particleArrowsVBO); glBindBuffer(GL_ARRAY_BUFFER, particleArrowsVBO); glEnableVertexAttribArray(0); glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, sizeof(glm::vec3), (void *)0); #endif glBindVertexArray(0); } void LBM3D_1D_indices::initLattice() { CHECK_ERROR(cudaMemset(d_backLattice, 0, sizeof(Node3D) * latticeSize)); CHECK_ERROR(cudaMemset(d_velocities, 0, sizeof(glm::vec3) * latticeSize)); initLatticeKernel << <gridDim, blockDim >> > (d_frontLattice); /* for (int x = 0; x < latticeWidth; x++) { for (int y = 0; y < latticeHeight; y++) { for (int z = 0; z < latticeDepth; z++) { int idx = getIdx(x, y, z); frontLattice[idx].adj[DIR_MIDDLE_VERTEX] = WEIGHT_MIDDLE; for (int i = 1; i <= 6; i++) { frontLattice[idx].adj[i] = WEIGHT_AXIS; } for (int i = 7; i <= 18; i++) { frontLattice[idx].adj[i] = WEIGHT_NON_AXIAL; } } } } */ } void LBM3D_1D_indices::swapLattices() { // CPU Node3D *tmp = frontLattice; frontLattice = backLattice; backLattice = tmp; // GPU tmp = d_frontLattice; d_frontLattice = d_backLattice; d_backLattice = tmp; } float LBM3D_1D_indices::calculateMacroscopicDensity(int x, int y, int z) { float macroDensity = 0.0f; int idx = getIdx(x, y, z); for (int i = 0; i < 19; i++) { macroDensity += backLattice[idx].adj[i]; } return macroDensity; } glm::vec3 LBM3D_1D_indices::calculateMacroscopicVelocity(int x, int y, int z, float macroDensity) { glm::vec3 macroVelocity = glm::vec3(0.0f, 0.0f, 0.0f); int idx = getIdx(x, y, z); macroVelocity += vLeft * backLattice[idx].adj[DIR_LEFT_FACE]; macroVelocity += vFront * backLattice[idx].adj[DIR_FRONT_FACE]; macroVelocity += vBottom * backLattice[idx].adj[DIR_BOTTOM_FACE]; macroVelocity += vFrontLeft * backLattice[idx].adj[DIR_FRONT_LEFT_EDGE]; macroVelocity += vBackLeft * backLattice[idx].adj[DIR_BACK_LEFT_EDGE]; macroVelocity += vBottomLeft * backLattice[idx].adj[DIR_BOTTOM_LEFT_EDGE]; macroVelocity += vTopLeft * backLattice[idx].adj[DIR_TOP_LEFT_EDGE]; macroVelocity += vBottomFront * backLattice[idx].adj[DIR_BOTTOM_FRONT_EDGE]; macroVelocity += vTopFront * backLattice[idx].adj[DIR_TOP_FRONT_EDGE]; macroVelocity += vRight * backLattice[idx].adj[DIR_RIGHT_FACE]; macroVelocity += vBack * backLattice[idx].adj[DIR_BACK_FACE]; macroVelocity += vTop * backLattice[idx].adj[DIR_TOP_FACE]; macroVelocity += vBackRight * backLattice[idx].adj[DIR_BACK_RIGHT_EDGE]; macroVelocity += vFrontRight * backLattice[idx].adj[DIR_FRONT_RIGHT_EDGE]; macroVelocity += vTopRight * backLattice[idx].adj[DIR_TOP_RIGHT_EDGE]; macroVelocity += vBottomRight * backLattice[idx].adj[DIR_BOTTOM_RIGHT_EDGE]; macroVelocity += vTopBack * backLattice[idx].adj[DIR_TOP_BACK_EDGE]; macroVelocity += vBottomBack * backLattice[idx].adj[DIR_BOTTOM_BACK_EDGE]; macroVelocity /= macroDensity; return macroVelocity; } void LBM3D_1D_indices::saveState() { prevState.position = position; prevState.scale = scale; } void LBM3D_1D_indices::resetToPrevState() { position = prevState.position; scale = prevState.scale; }
e7f1bedece50674ff6c5d6a659dd1502c5f200c9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "oneflow/core/device/memory_copier.h" #include "oneflow/core/common/auto_registration_factory.h" #include "oneflow/core/common/nd_index_offset_helper.h" namespace oneflow { namespace { template<int32_t NDIMS, typename I> struct SOA { I val[NDIMS]; }; template<int32_t NDIMS, typename T, typename I> __global__ void CopyNDGpu(const int n, T* dst, const T* src, NdIndexOffsetHelper<I, NDIMS> dst_helper, NdIndexOffsetHelper<I, NDIMS> src_helper, NdIndexOffsetHelper<I, NDIMS> copy_helper, SOA<NDIMS, I> dst_pos, SOA<NDIMS, I> src_pos) { CUDA_1D_KERNEL_LOOP_T(I, i, n) { I copy_idx[NDIMS]; I src_idx[NDIMS]; I dst_idx[NDIMS]; copy_helper.OffsetToNdIndex(i, copy_idx); #pragma unroll for (I j = 0; j < NDIMS; j++) { src_idx[j] = src_pos.val[j] + copy_idx[j]; dst_idx[j] = dst_pos.val[j] + copy_idx[j]; } const I src_offset = src_helper.NdIndexToOffset(src_idx); const I dst_offset = dst_helper.NdIndexToOffset(dst_idx); dst[dst_offset] = src[src_offset]; } } size_t GetPackSize(const MemoryCopyNdDesc& desc, const void* dst, const void* src) { const int64_t mask = desc.src_shape.dim_vec().back() | desc.dst_shape.dim_vec().back() | desc.extent.dim_vec().back() | desc.src_pos.dim_vec().back() | desc.dst_pos.dim_vec().back() | static_cast<int64_t>(reinterpret_cast<uintptr_t>(dst)) | static_cast<int64_t>(reinterpret_cast<uintptr_t>(src)); if ((mask & 0xF) == 0) { return 16; } else if ((mask & 0x7) == 0) { return 8; } else if ((mask & 0x3) == 0) { return 4; } else if ((mask & 0x1) == 0) { return 2; } else { return 1; } } } // namespace template<int32_t NDIMS, typename P, typename I> void CopyNDByPackByIndexTypeGpu(DeviceCtx* ctx, void* dst, const void* src, const MemoryCopyNdDesc& desc) { CHECK_EQ(desc.dst_pos.NumAxes(), NDIMS); CHECK_EQ(desc.src_pos.NumAxes(), NDIMS); CHECK_EQ(desc.dst_shape.NumAxes(), NDIMS); CHECK_EQ(desc.src_shape.NumAxes(), NDIMS); CHECK_EQ(desc.extent.NumAxes(), NDIMS); constexpr size_t pack_size = sizeof(P); I dst_shape_dim_arr[NDIMS]; I src_shape_dim_arr[NDIMS]; I extent_dim_arr[NDIMS]; SOA<NDIMS, I> src_pos; SOA<NDIMS, I> dst_pos; FOR_RANGE(int64_t, i, 0, NDIMS) { if (i == NDIMS - 1) { dst_pos.val[i] = desc.dst_pos.dim_vec().at(i) / pack_size; src_pos.val[i] = desc.src_pos.dim_vec().at(i) / pack_size; dst_shape_dim_arr[i] = desc.dst_shape.dim_vec().at(i) / pack_size; src_shape_dim_arr[i] = desc.src_shape.dim_vec().at(i) / pack_size; extent_dim_arr[i] = desc.extent.dim_vec().at(i) / pack_size; } else { dst_pos.val[i] = desc.dst_pos.dim_vec().at(i); src_pos.val[i] = desc.src_pos.dim_vec().at(i); dst_shape_dim_arr[i] = desc.dst_shape.dim_vec().at(i); src_shape_dim_arr[i] = desc.src_shape.dim_vec().at(i); extent_dim_arr[i] = desc.extent.dim_vec().at(i); } } NdIndexOffsetHelper<I, NDIMS> dst_helper(dst_shape_dim_arr); NdIndexOffsetHelper<I, NDIMS> src_helper(src_shape_dim_arr); NdIndexOffsetHelper<I, NDIMS> copy_helper(extent_dim_arr); const int64_t elem_cnt = desc.extent.elem_cnt() / pack_size; hipLaunchKernelGGL(( CopyNDGpu<NDIMS, P, I>) , dim3(BlocksNum4ThreadsNum(elem_cnt)), dim3(kCudaThreadsNumPerBlock), 0, ctx->cuda_stream(), elem_cnt, reinterpret_cast<P*>(dst), reinterpret_cast<const P*>(src), dst_helper, src_helper, copy_helper, dst_pos, src_pos); } template<int32_t NDIMS, typename P> void CopyNDByPackGpu(DeviceCtx* ctx, void* dst, const void* src, const MemoryCopyNdDesc& desc) { if (::max(desc.dst_shape.elem_cnt(), desc.src_shape.elem_cnt()) > static_cast<int64_t>(GetMaxVal<int32_t>() / 2)) { CopyNDByPackByIndexTypeGpu<NDIMS, P, int64_t>(ctx, dst, src, desc); } else { CopyNDByPackByIndexTypeGpu<NDIMS, P, int32_t>(ctx, dst, src, desc); } } template<int32_t NDIMS> void CopyNDGpuImpl(DeviceCtx* ctx, void* dst, const void* src, const MemoryCopyNdDesc& desc) { const size_t pack_size = GetPackSize(desc, dst, src); if (pack_size == 1) { CopyNDByPackGpu<NDIMS, uint8_t>(ctx, dst, src, desc); } else if (pack_size == 2) { CopyNDByPackGpu<NDIMS, uint16_t>(ctx, dst, src, desc); } else if (pack_size == 4) { CopyNDByPackGpu<NDIMS, uint32_t>(ctx, dst, src, desc); } else if (pack_size == 8) { CopyNDByPackGpu<NDIMS, uint64_t>(ctx, dst, src, desc); } else if (pack_size == 16) { static_assert(sizeof(uint4) == 16, ""); CopyNDByPackGpu<NDIMS, uint4>(ctx, dst, src, desc); } else { UNIMPLEMENTED(); } } #define SPECIALIZE_COPY_ND_GPU_IMPL(NDIMS) \ template void CopyNDGpuImpl<NDIMS>(DeviceCtx * ctx, void* dst, const void* src, \ const MemoryCopyNdDesc& desc); SPECIALIZE_COPY_ND_GPU_IMPL(2) SPECIALIZE_COPY_ND_GPU_IMPL(3) SPECIALIZE_COPY_ND_GPU_IMPL(4) SPECIALIZE_COPY_ND_GPU_IMPL(5) SPECIALIZE_COPY_ND_GPU_IMPL(6) } // namespace oneflow
e7f1bedece50674ff6c5d6a659dd1502c5f200c9.cu
/* Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "oneflow/core/device/memory_copier.h" #include "oneflow/core/common/auto_registration_factory.h" #include "oneflow/core/common/nd_index_offset_helper.h" namespace oneflow { namespace { template<int32_t NDIMS, typename I> struct SOA { I val[NDIMS]; }; template<int32_t NDIMS, typename T, typename I> __global__ void CopyNDGpu(const int n, T* dst, const T* src, NdIndexOffsetHelper<I, NDIMS> dst_helper, NdIndexOffsetHelper<I, NDIMS> src_helper, NdIndexOffsetHelper<I, NDIMS> copy_helper, SOA<NDIMS, I> dst_pos, SOA<NDIMS, I> src_pos) { CUDA_1D_KERNEL_LOOP_T(I, i, n) { I copy_idx[NDIMS]; I src_idx[NDIMS]; I dst_idx[NDIMS]; copy_helper.OffsetToNdIndex(i, copy_idx); #pragma unroll for (I j = 0; j < NDIMS; j++) { src_idx[j] = src_pos.val[j] + copy_idx[j]; dst_idx[j] = dst_pos.val[j] + copy_idx[j]; } const I src_offset = src_helper.NdIndexToOffset(src_idx); const I dst_offset = dst_helper.NdIndexToOffset(dst_idx); dst[dst_offset] = src[src_offset]; } } size_t GetPackSize(const MemoryCopyNdDesc& desc, const void* dst, const void* src) { const int64_t mask = desc.src_shape.dim_vec().back() | desc.dst_shape.dim_vec().back() | desc.extent.dim_vec().back() | desc.src_pos.dim_vec().back() | desc.dst_pos.dim_vec().back() | static_cast<int64_t>(reinterpret_cast<uintptr_t>(dst)) | static_cast<int64_t>(reinterpret_cast<uintptr_t>(src)); if ((mask & 0xF) == 0) { return 16; } else if ((mask & 0x7) == 0) { return 8; } else if ((mask & 0x3) == 0) { return 4; } else if ((mask & 0x1) == 0) { return 2; } else { return 1; } } } // namespace template<int32_t NDIMS, typename P, typename I> void CopyNDByPackByIndexTypeGpu(DeviceCtx* ctx, void* dst, const void* src, const MemoryCopyNdDesc& desc) { CHECK_EQ(desc.dst_pos.NumAxes(), NDIMS); CHECK_EQ(desc.src_pos.NumAxes(), NDIMS); CHECK_EQ(desc.dst_shape.NumAxes(), NDIMS); CHECK_EQ(desc.src_shape.NumAxes(), NDIMS); CHECK_EQ(desc.extent.NumAxes(), NDIMS); constexpr size_t pack_size = sizeof(P); I dst_shape_dim_arr[NDIMS]; I src_shape_dim_arr[NDIMS]; I extent_dim_arr[NDIMS]; SOA<NDIMS, I> src_pos; SOA<NDIMS, I> dst_pos; FOR_RANGE(int64_t, i, 0, NDIMS) { if (i == NDIMS - 1) { dst_pos.val[i] = desc.dst_pos.dim_vec().at(i) / pack_size; src_pos.val[i] = desc.src_pos.dim_vec().at(i) / pack_size; dst_shape_dim_arr[i] = desc.dst_shape.dim_vec().at(i) / pack_size; src_shape_dim_arr[i] = desc.src_shape.dim_vec().at(i) / pack_size; extent_dim_arr[i] = desc.extent.dim_vec().at(i) / pack_size; } else { dst_pos.val[i] = desc.dst_pos.dim_vec().at(i); src_pos.val[i] = desc.src_pos.dim_vec().at(i); dst_shape_dim_arr[i] = desc.dst_shape.dim_vec().at(i); src_shape_dim_arr[i] = desc.src_shape.dim_vec().at(i); extent_dim_arr[i] = desc.extent.dim_vec().at(i); } } NdIndexOffsetHelper<I, NDIMS> dst_helper(dst_shape_dim_arr); NdIndexOffsetHelper<I, NDIMS> src_helper(src_shape_dim_arr); NdIndexOffsetHelper<I, NDIMS> copy_helper(extent_dim_arr); const int64_t elem_cnt = desc.extent.elem_cnt() / pack_size; CopyNDGpu<NDIMS, P, I> <<<BlocksNum4ThreadsNum(elem_cnt), kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>( elem_cnt, reinterpret_cast<P*>(dst), reinterpret_cast<const P*>(src), dst_helper, src_helper, copy_helper, dst_pos, src_pos); } template<int32_t NDIMS, typename P> void CopyNDByPackGpu(DeviceCtx* ctx, void* dst, const void* src, const MemoryCopyNdDesc& desc) { if (std::max(desc.dst_shape.elem_cnt(), desc.src_shape.elem_cnt()) > static_cast<int64_t>(GetMaxVal<int32_t>() / 2)) { CopyNDByPackByIndexTypeGpu<NDIMS, P, int64_t>(ctx, dst, src, desc); } else { CopyNDByPackByIndexTypeGpu<NDIMS, P, int32_t>(ctx, dst, src, desc); } } template<int32_t NDIMS> void CopyNDGpuImpl(DeviceCtx* ctx, void* dst, const void* src, const MemoryCopyNdDesc& desc) { const size_t pack_size = GetPackSize(desc, dst, src); if (pack_size == 1) { CopyNDByPackGpu<NDIMS, uint8_t>(ctx, dst, src, desc); } else if (pack_size == 2) { CopyNDByPackGpu<NDIMS, uint16_t>(ctx, dst, src, desc); } else if (pack_size == 4) { CopyNDByPackGpu<NDIMS, uint32_t>(ctx, dst, src, desc); } else if (pack_size == 8) { CopyNDByPackGpu<NDIMS, uint64_t>(ctx, dst, src, desc); } else if (pack_size == 16) { static_assert(sizeof(uint4) == 16, ""); CopyNDByPackGpu<NDIMS, uint4>(ctx, dst, src, desc); } else { UNIMPLEMENTED(); } } #define SPECIALIZE_COPY_ND_GPU_IMPL(NDIMS) \ template void CopyNDGpuImpl<NDIMS>(DeviceCtx * ctx, void* dst, const void* src, \ const MemoryCopyNdDesc& desc); SPECIALIZE_COPY_ND_GPU_IMPL(2) SPECIALIZE_COPY_ND_GPU_IMPL(3) SPECIALIZE_COPY_ND_GPU_IMPL(4) SPECIALIZE_COPY_ND_GPU_IMPL(5) SPECIALIZE_COPY_ND_GPU_IMPL(6) } // namespace oneflow
a401ed8499f9cf944bc2a4178a7618aa679a95d8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #if defined(PARTICLES) && defined(PARTICLES_GPU) #include <math.h> #include <stdio.h> #include <stdlib.h> #include <unistd.h> #include "../global/global.h" #include "../global/global_cuda.h" #include "../grid/grid3D.h" #include "../io/io.h" #include "../utils/gpu.hpp" #include "particles_3D.h" #ifdef COSMOLOGY #include "../cosmology/cosmology.h" // #include "../cosmology/cosmology_functions_gpu.h" // FUTURE FIX: The Hubble function was defined here because I couldn't get it // form other file, tried -dc flag when compiling buu paris broke. __device__ Real Get_Hubble_Parameter_dev(Real a, Real H0, Real Omega_M, Real Omega_L, Real Omega_K) { Real a2 = a * a; Real a3 = a2 * a; Real factor = (Omega_M / a3 + Omega_K / a2 + Omega_L); return H0 * sqrt(factor); } #endif __global__ void Calc_Particles_dti_Kernel(part_int_t n_local, Real dx, Real dy, Real dz, Real *vel_x_dev, Real *vel_y_dev, Real *vel_z_dev, Real *dti_array) { __shared__ Real max_dti[TPB_PARTICLES]; part_int_t id; int tid; // get a global thread ID id = blockIdx.x * blockDim.x + threadIdx.x; // and a thread id within the block tid = threadIdx.x; // set shared memory to 0 max_dti[tid] = 0; __syncthreads(); Real vx, vy, vz; // if( tid == 0 ) printf("%f %f %f \n", dx, dy, dz ); // threads corresponding to real cells do the calculation if (id < n_local) { // every thread collects the variables it needs from global memory vx = vel_x_dev[id]; vy = vel_y_dev[id]; vz = vel_z_dev[id]; max_dti[tid] = fmax(fabs(vx) / dx, fabs(vy) / dy); max_dti[tid] = fmax(max_dti[tid], fabs(vz) / dz); max_dti[tid] = fmax(max_dti[tid], 0.0); } __syncthreads(); // do the reduction in shared memory (find the max inverse timestep in the // block) for (unsigned int s = 1; s < blockDim.x; s *= 2) { if (tid % (2 * s) == 0) { max_dti[tid] = fmax(max_dti[tid], max_dti[tid + s]); } __syncthreads(); } // write the result for this block to global memory if (tid == 0) { dti_array[blockIdx.x] = max_dti[0]; } } Real Particles_3D::Calc_Particles_dt_GPU_function(int ngrid, part_int_t n_particles_local, Real dx, Real dy, Real dz, Real *vel_x, Real *vel_y, Real *vel_z, Real *dti_array_host, Real *dti_array_dev) { // // set values for GPU kernels // int ngrid = (Particles.n_local - 1) / TPB_PARTICLES + 1; // number of blocks per 1D grid dim3 dim1dGrid(ngrid, 1, 1); // number of threads per 1D block dim3 dim1dBlock(TPB_PARTICLES, 1, 1); // printf("%f %f %f \n", dx, dy, dz); // Only runs if there are local particles if (ngrid == 0) { return 0; } hipLaunchKernelGGL(Calc_Particles_dti_Kernel, dim1dGrid, dim1dBlock, 0, 0, n_particles_local, dx, dy, dz, vel_x, vel_y, vel_z, dti_array_dev); CudaCheckError(); // Initialize dt values Real max_dti = 0; // copy the dti array onto the CPU CudaSafeCall(hipMemcpy(dti_array_host, dti_array_dev, ngrid * sizeof(Real), hipMemcpyDeviceToHost)); // find maximum inverse timestep from CFL condition for (int i = 0; i < ngrid; i++) { max_dti = fmax(max_dti, dti_array_host[i]); } return max_dti; } __global__ void Advance_Particles_KDK_Step1_Kernel(part_int_t n_local, Real dt, Real *pos_x_dev, Real *pos_y_dev, Real *pos_z_dev, Real *vel_x_dev, Real *vel_y_dev, Real *vel_z_dev, Real *grav_x_dev, Real *grav_y_dev, Real *grav_z_dev) { part_int_t tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid >= n_local) { return; } // Advance velocities by half a step vel_x_dev[tid] += 0.5 * dt * grav_x_dev[tid]; vel_y_dev[tid] += 0.5 * dt * grav_y_dev[tid]; vel_z_dev[tid] += 0.5 * dt * grav_z_dev[tid]; // Advance Positions using advanced velocities pos_x_dev[tid] += dt * vel_x_dev[tid]; pos_y_dev[tid] += dt * vel_y_dev[tid]; pos_z_dev[tid] += dt * vel_z_dev[tid]; } __global__ void Advance_Particles_KDK_Step2_Kernel(part_int_t n_local, Real dt, Real *vel_x_dev, Real *vel_y_dev, Real *vel_z_dev, Real *grav_x_dev, Real *grav_y_dev, Real *grav_z_dev) { part_int_t tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid >= n_local) { return; } // Advance velocities by the second half a step vel_x_dev[tid] += 0.5 * dt * grav_x_dev[tid]; vel_y_dev[tid] += 0.5 * dt * grav_y_dev[tid]; vel_z_dev[tid] += 0.5 * dt * grav_z_dev[tid]; } void Particles_3D::Advance_Particles_KDK_Step1_GPU_function(part_int_t n_local, Real dt, Real *pos_x_dev, Real *pos_y_dev, Real *pos_z_dev, Real *vel_x_dev, Real *vel_y_dev, Real *vel_z_dev, Real *grav_x_dev, Real *grav_y_dev, Real *grav_z_dev) { // set values for GPU kernels int ngrid = (n_local - 1) / TPB_PARTICLES + 1; // number of blocks per 1D grid dim3 dim1dGrid(ngrid, 1, 1); // number of threads per 1D block dim3 dim1dBlock(TPB_PARTICLES, 1, 1); // Only runs if there are local particles if (n_local > 0) { hipLaunchKernelGGL(Advance_Particles_KDK_Step1_Kernel, dim1dGrid, dim1dBlock, 0, 0, n_local, dt, pos_x_dev, pos_y_dev, pos_z_dev, vel_x_dev, vel_y_dev, vel_z_dev, grav_x_dev, grav_y_dev, grav_z_dev); CudaCheckError(); } } void Particles_3D::Advance_Particles_KDK_Step2_GPU_function(part_int_t n_local, Real dt, Real *vel_x_dev, Real *vel_y_dev, Real *vel_z_dev, Real *grav_x_dev, Real *grav_y_dev, Real *grav_z_dev) { // set values for GPU kernels int ngrid = (n_local - 1) / TPB_PARTICLES + 1; // number of blocks per 1D grid dim3 dim1dGrid(ngrid, 1, 1); // number of threads per 1D block dim3 dim1dBlock(TPB_PARTICLES, 1, 1); // Only runs if there are local particles if (n_local > 0) { hipLaunchKernelGGL(Advance_Particles_KDK_Step2_Kernel, dim1dGrid, dim1dBlock, 0, 0, n_local, dt, vel_x_dev, vel_y_dev, vel_z_dev, grav_x_dev, grav_y_dev, grav_z_dev); CudaCheckError(); } } #ifdef COSMOLOGY __global__ void Advance_Particles_KDK_Step1_Cosmo_Kernel(part_int_t n_local, Real da, Real *pos_x_dev, Real *pos_y_dev, Real *pos_z_dev, Real *vel_x_dev, Real *vel_y_dev, Real *vel_z_dev, Real *grav_x_dev, Real *grav_y_dev, Real *grav_z_dev, Real current_a, Real H0, Real cosmo_h, Real Omega_M, Real Omega_L, Real Omega_K) { part_int_t tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid >= n_local) { return; } Real vel_x, vel_y, vel_z; vel_x = vel_x_dev[tid]; vel_y = vel_y_dev[tid]; vel_z = vel_z_dev[tid]; Real da_half, a_half, H, H_half, dt, dt_half; da_half = da / 2; a_half = current_a + da_half; H = Get_Hubble_Parameter_dev(current_a, H0, Omega_M, Omega_L, Omega_K); H_half = Get_Hubble_Parameter_dev(a_half, H0, Omega_M, Omega_L, Omega_K); dt = da / (current_a * H) * cosmo_h; dt_half = da / (a_half * H_half) * cosmo_h / (a_half); // if ( tid == 0 ) printf( "dt: %f\n", dt); // if ( tid == 0 ) printf( "pos_x: %f\n", pos_x_dev[tid]); // if ( tid == 0 ) printf( "vel_x: %f\n", vel_x_dev[tid]); // if ( tid == 0 ) printf( "grav_x: %f\n", grav_x_dev[tid]); // Advance velocities by half a step vel_x = (current_a * vel_x + 0.5 * dt * grav_x_dev[tid]) / a_half; vel_y = (current_a * vel_y + 0.5 * dt * grav_y_dev[tid]) / a_half; vel_z = (current_a * vel_z + 0.5 * dt * grav_z_dev[tid]) / a_half; vel_x_dev[tid] = vel_x; vel_y_dev[tid] = vel_y; vel_z_dev[tid] = vel_z; // Advance Positions using advanced velocities pos_x_dev[tid] += dt_half * vel_x; pos_y_dev[tid] += dt_half * vel_y; pos_z_dev[tid] += dt_half * vel_z; } __global__ void Advance_Particles_KDK_Step2_Cosmo_Kernel(part_int_t n_local, Real da, Real *vel_x_dev, Real *vel_y_dev, Real *vel_z_dev, Real *grav_x_dev, Real *grav_y_dev, Real *grav_z_dev, Real current_a, Real H0, Real cosmo_h, Real Omega_M, Real Omega_L, Real Omega_K) { part_int_t tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid >= n_local) { return; } Real vel_x, vel_y, vel_z; vel_x = vel_x_dev[tid]; vel_y = vel_y_dev[tid]; vel_z = vel_z_dev[tid]; Real da_half, a_half, dt; da_half = da / 2; a_half = current_a - da_half; dt = da / (current_a * Get_Hubble_Parameter_dev(current_a, H0, Omega_M, Omega_L, Omega_K)) * cosmo_h; // Advance velocities by the second half a step vel_x_dev[tid] = (a_half * vel_x + 0.5 * dt * grav_x_dev[tid]) / current_a; vel_y_dev[tid] = (a_half * vel_y + 0.5 * dt * grav_y_dev[tid]) / current_a; vel_z_dev[tid] = (a_half * vel_z + 0.5 * dt * grav_z_dev[tid]) / current_a; } void Particles_3D::Advance_Particles_KDK_Step1_Cosmo_GPU_function(part_int_t n_local, Real delta_a, Real *pos_x_dev, Real *pos_y_dev, Real *pos_z_dev, Real *vel_x_dev, Real *vel_y_dev, Real *vel_z_dev, Real *grav_x_dev, Real *grav_y_dev, Real *grav_z_dev, Real current_a, Real H0, Real cosmo_h, Real Omega_M, Real Omega_L, Real Omega_K) { // set values for GPU kernels int ngrid = (n_local - 1) / TPB_PARTICLES + 1; // number of blocks per 1D grid dim3 dim1dGrid(ngrid, 1, 1); // number of threads per 1D block dim3 dim1dBlock(TPB_PARTICLES, 1, 1); // Only runs if there are local particles if (n_local > 0) { hipLaunchKernelGGL(Advance_Particles_KDK_Step1_Cosmo_Kernel, dim1dGrid, dim1dBlock, 0, 0, n_local, delta_a, pos_x_dev, pos_y_dev, pos_z_dev, vel_x_dev, vel_y_dev, vel_z_dev, grav_x_dev, grav_y_dev, grav_z_dev, current_a, H0, cosmo_h, Omega_M, Omega_L, Omega_K); CHECK(hipDeviceSynchronize()); // CudaCheckError(); } } void Particles_3D::Advance_Particles_KDK_Step2_Cosmo_GPU_function(part_int_t n_local, Real delta_a, Real *vel_x_dev, Real *vel_y_dev, Real *vel_z_dev, Real *grav_x_dev, Real *grav_y_dev, Real *grav_z_dev, Real current_a, Real H0, Real cosmo_h, Real Omega_M, Real Omega_L, Real Omega_K) { // set values for GPU kernels int ngrid = (n_local - 1) / TPB_PARTICLES + 1; // number of blocks per 1D grid dim3 dim1dGrid(ngrid, 1, 1); // number of threads per 1D block dim3 dim1dBlock(TPB_PARTICLES, 1, 1); // Only runs if there are local particles if (n_local > 0) { hipLaunchKernelGGL(Advance_Particles_KDK_Step2_Cosmo_Kernel, dim1dGrid, dim1dBlock, 0, 0, n_local, delta_a, vel_x_dev, vel_y_dev, vel_z_dev, grav_x_dev, grav_y_dev, grav_z_dev, current_a, H0, cosmo_h, Omega_M, Omega_L, Omega_K); CHECK(hipDeviceSynchronize()); // CudaCheckError(); } } #endif // COSMOLOGY #endif
a401ed8499f9cf944bc2a4178a7618aa679a95d8.cu
#if defined(PARTICLES) && defined(PARTICLES_GPU) #include <math.h> #include <stdio.h> #include <stdlib.h> #include <unistd.h> #include "../global/global.h" #include "../global/global_cuda.h" #include "../grid/grid3D.h" #include "../io/io.h" #include "../utils/gpu.hpp" #include "particles_3D.h" #ifdef COSMOLOGY #include "../cosmology/cosmology.h" // #include "../cosmology/cosmology_functions_gpu.h" // FUTURE FIX: The Hubble function was defined here because I couldn't get it // form other file, tried -dc flag when compiling buu paris broke. __device__ Real Get_Hubble_Parameter_dev(Real a, Real H0, Real Omega_M, Real Omega_L, Real Omega_K) { Real a2 = a * a; Real a3 = a2 * a; Real factor = (Omega_M / a3 + Omega_K / a2 + Omega_L); return H0 * sqrt(factor); } #endif __global__ void Calc_Particles_dti_Kernel(part_int_t n_local, Real dx, Real dy, Real dz, Real *vel_x_dev, Real *vel_y_dev, Real *vel_z_dev, Real *dti_array) { __shared__ Real max_dti[TPB_PARTICLES]; part_int_t id; int tid; // get a global thread ID id = blockIdx.x * blockDim.x + threadIdx.x; // and a thread id within the block tid = threadIdx.x; // set shared memory to 0 max_dti[tid] = 0; __syncthreads(); Real vx, vy, vz; // if( tid == 0 ) printf("%f %f %f \n", dx, dy, dz ); // threads corresponding to real cells do the calculation if (id < n_local) { // every thread collects the variables it needs from global memory vx = vel_x_dev[id]; vy = vel_y_dev[id]; vz = vel_z_dev[id]; max_dti[tid] = fmax(fabs(vx) / dx, fabs(vy) / dy); max_dti[tid] = fmax(max_dti[tid], fabs(vz) / dz); max_dti[tid] = fmax(max_dti[tid], 0.0); } __syncthreads(); // do the reduction in shared memory (find the max inverse timestep in the // block) for (unsigned int s = 1; s < blockDim.x; s *= 2) { if (tid % (2 * s) == 0) { max_dti[tid] = fmax(max_dti[tid], max_dti[tid + s]); } __syncthreads(); } // write the result for this block to global memory if (tid == 0) { dti_array[blockIdx.x] = max_dti[0]; } } Real Particles_3D::Calc_Particles_dt_GPU_function(int ngrid, part_int_t n_particles_local, Real dx, Real dy, Real dz, Real *vel_x, Real *vel_y, Real *vel_z, Real *dti_array_host, Real *dti_array_dev) { // // set values for GPU kernels // int ngrid = (Particles.n_local - 1) / TPB_PARTICLES + 1; // number of blocks per 1D grid dim3 dim1dGrid(ngrid, 1, 1); // number of threads per 1D block dim3 dim1dBlock(TPB_PARTICLES, 1, 1); // printf("%f %f %f \n", dx, dy, dz); // Only runs if there are local particles if (ngrid == 0) { return 0; } hipLaunchKernelGGL(Calc_Particles_dti_Kernel, dim1dGrid, dim1dBlock, 0, 0, n_particles_local, dx, dy, dz, vel_x, vel_y, vel_z, dti_array_dev); CudaCheckError(); // Initialize dt values Real max_dti = 0; // copy the dti array onto the CPU CudaSafeCall(cudaMemcpy(dti_array_host, dti_array_dev, ngrid * sizeof(Real), cudaMemcpyDeviceToHost)); // find maximum inverse timestep from CFL condition for (int i = 0; i < ngrid; i++) { max_dti = fmax(max_dti, dti_array_host[i]); } return max_dti; } __global__ void Advance_Particles_KDK_Step1_Kernel(part_int_t n_local, Real dt, Real *pos_x_dev, Real *pos_y_dev, Real *pos_z_dev, Real *vel_x_dev, Real *vel_y_dev, Real *vel_z_dev, Real *grav_x_dev, Real *grav_y_dev, Real *grav_z_dev) { part_int_t tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid >= n_local) { return; } // Advance velocities by half a step vel_x_dev[tid] += 0.5 * dt * grav_x_dev[tid]; vel_y_dev[tid] += 0.5 * dt * grav_y_dev[tid]; vel_z_dev[tid] += 0.5 * dt * grav_z_dev[tid]; // Advance Positions using advanced velocities pos_x_dev[tid] += dt * vel_x_dev[tid]; pos_y_dev[tid] += dt * vel_y_dev[tid]; pos_z_dev[tid] += dt * vel_z_dev[tid]; } __global__ void Advance_Particles_KDK_Step2_Kernel(part_int_t n_local, Real dt, Real *vel_x_dev, Real *vel_y_dev, Real *vel_z_dev, Real *grav_x_dev, Real *grav_y_dev, Real *grav_z_dev) { part_int_t tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid >= n_local) { return; } // Advance velocities by the second half a step vel_x_dev[tid] += 0.5 * dt * grav_x_dev[tid]; vel_y_dev[tid] += 0.5 * dt * grav_y_dev[tid]; vel_z_dev[tid] += 0.5 * dt * grav_z_dev[tid]; } void Particles_3D::Advance_Particles_KDK_Step1_GPU_function(part_int_t n_local, Real dt, Real *pos_x_dev, Real *pos_y_dev, Real *pos_z_dev, Real *vel_x_dev, Real *vel_y_dev, Real *vel_z_dev, Real *grav_x_dev, Real *grav_y_dev, Real *grav_z_dev) { // set values for GPU kernels int ngrid = (n_local - 1) / TPB_PARTICLES + 1; // number of blocks per 1D grid dim3 dim1dGrid(ngrid, 1, 1); // number of threads per 1D block dim3 dim1dBlock(TPB_PARTICLES, 1, 1); // Only runs if there are local particles if (n_local > 0) { hipLaunchKernelGGL(Advance_Particles_KDK_Step1_Kernel, dim1dGrid, dim1dBlock, 0, 0, n_local, dt, pos_x_dev, pos_y_dev, pos_z_dev, vel_x_dev, vel_y_dev, vel_z_dev, grav_x_dev, grav_y_dev, grav_z_dev); CudaCheckError(); } } void Particles_3D::Advance_Particles_KDK_Step2_GPU_function(part_int_t n_local, Real dt, Real *vel_x_dev, Real *vel_y_dev, Real *vel_z_dev, Real *grav_x_dev, Real *grav_y_dev, Real *grav_z_dev) { // set values for GPU kernels int ngrid = (n_local - 1) / TPB_PARTICLES + 1; // number of blocks per 1D grid dim3 dim1dGrid(ngrid, 1, 1); // number of threads per 1D block dim3 dim1dBlock(TPB_PARTICLES, 1, 1); // Only runs if there are local particles if (n_local > 0) { hipLaunchKernelGGL(Advance_Particles_KDK_Step2_Kernel, dim1dGrid, dim1dBlock, 0, 0, n_local, dt, vel_x_dev, vel_y_dev, vel_z_dev, grav_x_dev, grav_y_dev, grav_z_dev); CudaCheckError(); } } #ifdef COSMOLOGY __global__ void Advance_Particles_KDK_Step1_Cosmo_Kernel(part_int_t n_local, Real da, Real *pos_x_dev, Real *pos_y_dev, Real *pos_z_dev, Real *vel_x_dev, Real *vel_y_dev, Real *vel_z_dev, Real *grav_x_dev, Real *grav_y_dev, Real *grav_z_dev, Real current_a, Real H0, Real cosmo_h, Real Omega_M, Real Omega_L, Real Omega_K) { part_int_t tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid >= n_local) { return; } Real vel_x, vel_y, vel_z; vel_x = vel_x_dev[tid]; vel_y = vel_y_dev[tid]; vel_z = vel_z_dev[tid]; Real da_half, a_half, H, H_half, dt, dt_half; da_half = da / 2; a_half = current_a + da_half; H = Get_Hubble_Parameter_dev(current_a, H0, Omega_M, Omega_L, Omega_K); H_half = Get_Hubble_Parameter_dev(a_half, H0, Omega_M, Omega_L, Omega_K); dt = da / (current_a * H) * cosmo_h; dt_half = da / (a_half * H_half) * cosmo_h / (a_half); // if ( tid == 0 ) printf( "dt: %f\n", dt); // if ( tid == 0 ) printf( "pos_x: %f\n", pos_x_dev[tid]); // if ( tid == 0 ) printf( "vel_x: %f\n", vel_x_dev[tid]); // if ( tid == 0 ) printf( "grav_x: %f\n", grav_x_dev[tid]); // Advance velocities by half a step vel_x = (current_a * vel_x + 0.5 * dt * grav_x_dev[tid]) / a_half; vel_y = (current_a * vel_y + 0.5 * dt * grav_y_dev[tid]) / a_half; vel_z = (current_a * vel_z + 0.5 * dt * grav_z_dev[tid]) / a_half; vel_x_dev[tid] = vel_x; vel_y_dev[tid] = vel_y; vel_z_dev[tid] = vel_z; // Advance Positions using advanced velocities pos_x_dev[tid] += dt_half * vel_x; pos_y_dev[tid] += dt_half * vel_y; pos_z_dev[tid] += dt_half * vel_z; } __global__ void Advance_Particles_KDK_Step2_Cosmo_Kernel(part_int_t n_local, Real da, Real *vel_x_dev, Real *vel_y_dev, Real *vel_z_dev, Real *grav_x_dev, Real *grav_y_dev, Real *grav_z_dev, Real current_a, Real H0, Real cosmo_h, Real Omega_M, Real Omega_L, Real Omega_K) { part_int_t tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid >= n_local) { return; } Real vel_x, vel_y, vel_z; vel_x = vel_x_dev[tid]; vel_y = vel_y_dev[tid]; vel_z = vel_z_dev[tid]; Real da_half, a_half, dt; da_half = da / 2; a_half = current_a - da_half; dt = da / (current_a * Get_Hubble_Parameter_dev(current_a, H0, Omega_M, Omega_L, Omega_K)) * cosmo_h; // Advance velocities by the second half a step vel_x_dev[tid] = (a_half * vel_x + 0.5 * dt * grav_x_dev[tid]) / current_a; vel_y_dev[tid] = (a_half * vel_y + 0.5 * dt * grav_y_dev[tid]) / current_a; vel_z_dev[tid] = (a_half * vel_z + 0.5 * dt * grav_z_dev[tid]) / current_a; } void Particles_3D::Advance_Particles_KDK_Step1_Cosmo_GPU_function(part_int_t n_local, Real delta_a, Real *pos_x_dev, Real *pos_y_dev, Real *pos_z_dev, Real *vel_x_dev, Real *vel_y_dev, Real *vel_z_dev, Real *grav_x_dev, Real *grav_y_dev, Real *grav_z_dev, Real current_a, Real H0, Real cosmo_h, Real Omega_M, Real Omega_L, Real Omega_K) { // set values for GPU kernels int ngrid = (n_local - 1) / TPB_PARTICLES + 1; // number of blocks per 1D grid dim3 dim1dGrid(ngrid, 1, 1); // number of threads per 1D block dim3 dim1dBlock(TPB_PARTICLES, 1, 1); // Only runs if there are local particles if (n_local > 0) { hipLaunchKernelGGL(Advance_Particles_KDK_Step1_Cosmo_Kernel, dim1dGrid, dim1dBlock, 0, 0, n_local, delta_a, pos_x_dev, pos_y_dev, pos_z_dev, vel_x_dev, vel_y_dev, vel_z_dev, grav_x_dev, grav_y_dev, grav_z_dev, current_a, H0, cosmo_h, Omega_M, Omega_L, Omega_K); CHECK(cudaDeviceSynchronize()); // CudaCheckError(); } } void Particles_3D::Advance_Particles_KDK_Step2_Cosmo_GPU_function(part_int_t n_local, Real delta_a, Real *vel_x_dev, Real *vel_y_dev, Real *vel_z_dev, Real *grav_x_dev, Real *grav_y_dev, Real *grav_z_dev, Real current_a, Real H0, Real cosmo_h, Real Omega_M, Real Omega_L, Real Omega_K) { // set values for GPU kernels int ngrid = (n_local - 1) / TPB_PARTICLES + 1; // number of blocks per 1D grid dim3 dim1dGrid(ngrid, 1, 1); // number of threads per 1D block dim3 dim1dBlock(TPB_PARTICLES, 1, 1); // Only runs if there are local particles if (n_local > 0) { hipLaunchKernelGGL(Advance_Particles_KDK_Step2_Cosmo_Kernel, dim1dGrid, dim1dBlock, 0, 0, n_local, delta_a, vel_x_dev, vel_y_dev, vel_z_dev, grav_x_dev, grav_y_dev, grav_z_dev, current_a, H0, cosmo_h, Omega_M, Omega_L, Omega_K); CHECK(cudaDeviceSynchronize()); // CudaCheckError(); } } #endif // COSMOLOGY #endif
9bf53de40dc634d0e331ac39c2042398b3856ef7.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <common/cudart_utils.h> #include <gtest/gtest.h> #include <cuda_utils.cuh> #include <functions/sigmoid.cuh> #include "test_utils.h" namespace MLCommon { namespace Functions { template <typename T> struct SigmoidInputs { T tolerance; int len; }; template <typename T> ::std::ostream& operator<<(::std::ostream& os, const SigmoidInputs<T>& dims) { return os; } template <typename T> class SigmoidTest : public ::testing::TestWithParam<SigmoidInputs<T>> { protected: void SetUp() override { params = ::testing::TestWithParam<SigmoidInputs<T>>::GetParam(); int len = params.len; hipStream_t stream; CUDA_CHECK(hipStreamCreate(&stream)); allocate(data, len); T data_h[params.len] = {2.1, -4.5, -0.34, 10.0}; updateDevice(data, data_h, len, stream); allocate(result, len); allocate(result_ref, len); T result_ref_h[params.len] = {0.89090318, 0.01098694, 0.41580948, 0.9999546}; updateDevice(result_ref, result_ref_h, len, stream); sigmoid(result, data, len, stream); CUDA_CHECK(hipStreamDestroy(stream)); } void TearDown() override { CUDA_CHECK(hipFree(data)); CUDA_CHECK(hipFree(result)); CUDA_CHECK(hipFree(result_ref)); } protected: SigmoidInputs<T> params; T *data, *result, *result_ref; }; const std::vector<SigmoidInputs<float>> inputsf2 = {{0.001f, 4}}; const std::vector<SigmoidInputs<double>> inputsd2 = {{0.001, 4}}; typedef SigmoidTest<float> SigmoidTestValF; TEST_P(SigmoidTestValF, Result) { ASSERT_TRUE(devArrMatch(result_ref, result, params.len, CompareApproxAbs<float>(params.tolerance))); } typedef SigmoidTest<double> SigmoidTestValD; TEST_P(SigmoidTestValD, Result) { ASSERT_TRUE(devArrMatch(result_ref, result, params.len, CompareApproxAbs<double>(params.tolerance))); } INSTANTIATE_TEST_CASE_P(SigmoidTests, SigmoidTestValF, ::testing::ValuesIn(inputsf2)); INSTANTIATE_TEST_CASE_P(SigmoidTests, SigmoidTestValD, ::testing::ValuesIn(inputsd2)); } // end namespace Functions } // end namespace MLCommon
9bf53de40dc634d0e331ac39c2042398b3856ef7.cu
/* * Copyright (c) 2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <common/cudart_utils.h> #include <gtest/gtest.h> #include <cuda_utils.cuh> #include <functions/sigmoid.cuh> #include "test_utils.h" namespace MLCommon { namespace Functions { template <typename T> struct SigmoidInputs { T tolerance; int len; }; template <typename T> ::std::ostream& operator<<(::std::ostream& os, const SigmoidInputs<T>& dims) { return os; } template <typename T> class SigmoidTest : public ::testing::TestWithParam<SigmoidInputs<T>> { protected: void SetUp() override { params = ::testing::TestWithParam<SigmoidInputs<T>>::GetParam(); int len = params.len; cudaStream_t stream; CUDA_CHECK(cudaStreamCreate(&stream)); allocate(data, len); T data_h[params.len] = {2.1, -4.5, -0.34, 10.0}; updateDevice(data, data_h, len, stream); allocate(result, len); allocate(result_ref, len); T result_ref_h[params.len] = {0.89090318, 0.01098694, 0.41580948, 0.9999546}; updateDevice(result_ref, result_ref_h, len, stream); sigmoid(result, data, len, stream); CUDA_CHECK(cudaStreamDestroy(stream)); } void TearDown() override { CUDA_CHECK(cudaFree(data)); CUDA_CHECK(cudaFree(result)); CUDA_CHECK(cudaFree(result_ref)); } protected: SigmoidInputs<T> params; T *data, *result, *result_ref; }; const std::vector<SigmoidInputs<float>> inputsf2 = {{0.001f, 4}}; const std::vector<SigmoidInputs<double>> inputsd2 = {{0.001, 4}}; typedef SigmoidTest<float> SigmoidTestValF; TEST_P(SigmoidTestValF, Result) { ASSERT_TRUE(devArrMatch(result_ref, result, params.len, CompareApproxAbs<float>(params.tolerance))); } typedef SigmoidTest<double> SigmoidTestValD; TEST_P(SigmoidTestValD, Result) { ASSERT_TRUE(devArrMatch(result_ref, result, params.len, CompareApproxAbs<double>(params.tolerance))); } INSTANTIATE_TEST_CASE_P(SigmoidTests, SigmoidTestValF, ::testing::ValuesIn(inputsf2)); INSTANTIATE_TEST_CASE_P(SigmoidTests, SigmoidTestValD, ::testing::ValuesIn(inputsd2)); } // end namespace Functions } // end namespace MLCommon
e23b46901369f756b02309875513f66e2465f21b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void sumArrays(float *A, float *B, float *C, const int N) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < N) { for (int i = 0; i < N; ++i) { C[idx] = A[idx] + B[idx]; } } }
e23b46901369f756b02309875513f66e2465f21b.cu
#include "includes.h" __global__ void sumArrays(float *A, float *B, float *C, const int N) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < N) { for (int i = 0; i < N; ++i) { C[idx] = A[idx] + B[idx]; } } }
c759d92f34aa618b556d3153711c515bece4e1ee.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Template for Assignment 1: CUDA // Use "icc -O -openmp" to compile #include <unistd.h> #include <stdio.h> #include <sys/time.h> #define threshold 1e-4 #define n (2048) void init(void); void ref(void); void test(void); void compare(int N, double *wref, double *w); __global__ void test_kernel(int N, double *A, double *B, double *X); double rtclock(void); double a[n][n],b[n][n],x[n][n],xref[n][n]; int main(){ double clkbegin, clkend, t; printf("Matrix Size = %d\n",n); init(); clkbegin = rtclock(); ref(); clkend = rtclock(); t = clkend-clkbegin; printf("Mult-Tri-Solve-Seq: Approx GFLOPS: %.1f ; Time = %.3f sec; xref[n/2][n/2-1] = %f; \n", 1.0*n*n*n/t/1e9,t,xref[n/2][n/2-1]); clkbegin = rtclock(); test(); clkend = rtclock(); t = clkend-clkbegin; printf("Multi-Tri-Solve-GPU: Approx GFLOPS: %.1f ; Time = %.3f sec; x[n/2][n/2-1] = %f; \n", 1.0*n*n*n/t/1e9,t,x[n/2][n/2-1]); compare(n, (double *) x,(double *) xref); } __global__ void test_kernel(int N, double *A, double *B, double *X) { int i,j,k; double temp; // Template version uses only one thread, which does all the work // This must be changed (and the launch parameters) to exploit GPU parallelism // You can make any changes; only requirement is that correctness test passes if(threadIdx.x == 0) { for(k=0;k<n;k++){ for (i=0;i<n;i++) { temp = B[k*N+i]; // temp = b[k][i]; for (j=0;j<i;j++) temp = temp - A[i*N+j] * X[k*N+j]; // temp = temp - a[i][j]*x[k][j]; X[k*N+i] = temp/A[i*N+i]; //x[k][i] = temp/a[i][i]; } } } } void test(void) { double *Ad,*Bd,*Xd; int size; size = sizeof(double)*n*n; hipMalloc((void **) &Ad,size); hipMalloc((void **) &Bd,size); hipMalloc((void **) &Xd,size); hipMemcpy(Ad,a,size,hipMemcpyHostToDevice); hipMemcpy(Bd,b,size,hipMemcpyHostToDevice); hipLaunchKernelGGL(( test_kernel), dim3(1),dim3(1), 0, 0, n,Ad,Bd,Xd); hipMemcpy(x,Xd,size,hipMemcpyDeviceToHost); hipFree(Ad); hipFree(Bd); hipFree(Xd); } void ref(void) { int i,j,k; double temp; for(k=0;k<n;k++){ for (i=0;i<n;i++) { temp = b[k][i]; for (j=0;j<i;j++) temp = temp - a[i][j]*xref[k][j]; xref[k][i] = temp/a[i][i]; } } } void init(void) { int i,j,k; for(k=0;k<n;k++) // for(i=0;i<n;i++) { x[k][i] = k+i; a[k][i] = 1.0 + rand();} for(i=0;i<n;i++) { x[k][i] = k+i; a[k][i] = 1.0*(k+i+1)/(n+1);} for(k=0;k<n;k++) for(i=0;i<n;i++) { b[k][i]=0; for(j=0;j<=i;j++) b[k][i] += a[i][j]*x[k][j]; } for(i=0;i<n;i++) for (j=0;j<n;j++) { x[i][j] = 0.0; xref[i][j] = 0.0; } } void compare(int N, double *wref, double *w) { double maxdiff,this_diff; int numdiffs; int i,j; numdiffs = 0; maxdiff = 0; for (i=0;i<N;i++) for (j=0;j<N;j++) { this_diff = wref[i*N+j]-w[i*N+j]; if (this_diff < 0) this_diff = -1.0*this_diff; if (this_diff>threshold) { numdiffs++; if (this_diff > maxdiff) maxdiff=this_diff; } } if (numdiffs > 0) printf("%d Diffs found over threshold %f; Max Diff = %f\n", numdiffs,threshold,maxdiff); else printf("No differences found between reference and test versions\n"); } double rtclock(void) { struct timezone Tzp; struct timeval Tp; int stat; stat = gettimeofday (&Tp, &Tzp); if (stat != 0) printf("Error return from gettimeofday: %d",stat); return(Tp.tv_sec + Tp.tv_usec*1.0e-6); }
c759d92f34aa618b556d3153711c515bece4e1ee.cu
// Template for Assignment 1: CUDA // Use "icc -O -openmp" to compile #include <unistd.h> #include <stdio.h> #include <sys/time.h> #define threshold 1e-4 #define n (2048) void init(void); void ref(void); void test(void); void compare(int N, double *wref, double *w); __global__ void test_kernel(int N, double *A, double *B, double *X); double rtclock(void); double a[n][n],b[n][n],x[n][n],xref[n][n]; int main(){ double clkbegin, clkend, t; printf("Matrix Size = %d\n",n); init(); clkbegin = rtclock(); ref(); clkend = rtclock(); t = clkend-clkbegin; printf("Mult-Tri-Solve-Seq: Approx GFLOPS: %.1f ; Time = %.3f sec; xref[n/2][n/2-1] = %f; \n", 1.0*n*n*n/t/1e9,t,xref[n/2][n/2-1]); clkbegin = rtclock(); test(); clkend = rtclock(); t = clkend-clkbegin; printf("Multi-Tri-Solve-GPU: Approx GFLOPS: %.1f ; Time = %.3f sec; x[n/2][n/2-1] = %f; \n", 1.0*n*n*n/t/1e9,t,x[n/2][n/2-1]); compare(n, (double *) x,(double *) xref); } __global__ void test_kernel(int N, double *A, double *B, double *X) { int i,j,k; double temp; // Template version uses only one thread, which does all the work // This must be changed (and the launch parameters) to exploit GPU parallelism // You can make any changes; only requirement is that correctness test passes if(threadIdx.x == 0) { for(k=0;k<n;k++){ for (i=0;i<n;i++) { temp = B[k*N+i]; // temp = b[k][i]; for (j=0;j<i;j++) temp = temp - A[i*N+j] * X[k*N+j]; // temp = temp - a[i][j]*x[k][j]; X[k*N+i] = temp/A[i*N+i]; //x[k][i] = temp/a[i][i]; } } } } void test(void) { double *Ad,*Bd,*Xd; int size; size = sizeof(double)*n*n; cudaMalloc((void **) &Ad,size); cudaMalloc((void **) &Bd,size); cudaMalloc((void **) &Xd,size); cudaMemcpy(Ad,a,size,cudaMemcpyHostToDevice); cudaMemcpy(Bd,b,size,cudaMemcpyHostToDevice); test_kernel<<<1,1>>>(n,Ad,Bd,Xd); cudaMemcpy(x,Xd,size,cudaMemcpyDeviceToHost); cudaFree(Ad); cudaFree(Bd); cudaFree(Xd); } void ref(void) { int i,j,k; double temp; for(k=0;k<n;k++){ for (i=0;i<n;i++) { temp = b[k][i]; for (j=0;j<i;j++) temp = temp - a[i][j]*xref[k][j]; xref[k][i] = temp/a[i][i]; } } } void init(void) { int i,j,k; for(k=0;k<n;k++) // for(i=0;i<n;i++) { x[k][i] = k+i; a[k][i] = 1.0 + rand();} for(i=0;i<n;i++) { x[k][i] = k+i; a[k][i] = 1.0*(k+i+1)/(n+1);} for(k=0;k<n;k++) for(i=0;i<n;i++) { b[k][i]=0; for(j=0;j<=i;j++) b[k][i] += a[i][j]*x[k][j]; } for(i=0;i<n;i++) for (j=0;j<n;j++) { x[i][j] = 0.0; xref[i][j] = 0.0; } } void compare(int N, double *wref, double *w) { double maxdiff,this_diff; int numdiffs; int i,j; numdiffs = 0; maxdiff = 0; for (i=0;i<N;i++) for (j=0;j<N;j++) { this_diff = wref[i*N+j]-w[i*N+j]; if (this_diff < 0) this_diff = -1.0*this_diff; if (this_diff>threshold) { numdiffs++; if (this_diff > maxdiff) maxdiff=this_diff; } } if (numdiffs > 0) printf("%d Diffs found over threshold %f; Max Diff = %f\n", numdiffs,threshold,maxdiff); else printf("No differences found between reference and test versions\n"); } double rtclock(void) { struct timezone Tzp; struct timeval Tp; int stat; stat = gettimeofday (&Tp, &Tzp); if (stat != 0) printf("Error return from gettimeofday: %d",stat); return(Tp.tv_sec + Tp.tv_usec*1.0e-6); }
f36d4389fc4c77520b96d0ddf5af426389b6871f.hip
// !!! This is a file automatically generated by hipify!!! #include "voxelizer_demo.h" #include <GL/glut.h> #include "axle/cg/utils.h" #include "axle/cg/model_gl.h" #include "axle/core/options.h" #include "axle/core/utils.h" #include "axle/core/timer.h" #include "axle/core/debug.h" #include "voxel/voxelizer_api.h" namespace voxel { VoxelizerDemo::VoxelizerDemo() : ax::GlutWindow("Voxelizer Demo",10, 10, 800, 600, GLUT_RGBA|GLUT_DOUBLE|GLUT_DEPTH), camera_(glm::vec3(-2.68, 0, 2.55), glm::vec3(0, 0, 0)) { camera_.set_min_dist(0.1); camera_.set_fovy(30.0); camera_.set_velocity(0, 0, 1); method_ = kGL; iters_ = 10; mode_ = kUniform; dim_ = 256; } VoxelizerDemo::~VoxelizerDemo() { } int DrawVoxel(const HVectorInt &vols, const HVectorFloat &delta, const int x_dim, const int y_dim, const int z_dim, const int int_dim) { float t[3] = { 0.f, 0.f, 0.f }; int count = 0; const int *ptr = thrust::raw_pointer_cast(&vols.front()); const int *vol_ptr = ptr; for (int z = 0; z < int_dim; ++z) { t[Y] = 0; for (int y = 0; y < y_dim; ++y) { t[X] = 0; for (int x = 0; x < x_dim; ++x) { int bits = *ptr; t[Z] = delta[Z] * 32 * z; for (int i = 0; i < 32; ++i) { if (bits & (1 << i)) { glPushMatrix(); ax::DrawCube(t[0], t[1], t[2], t[0] + delta[0], t[1] + delta[1], t[2] + delta[2]); glPopMatrix(); } t[Z] += delta[Z]; } ++ptr; t[X] += delta[X]; } t[Y] += delta[Y]; } } //for (int y = 0; y < y_dim; ++y) { // t[2] = 0.f; // for (int z = 0; z < z_dim; ++z) { // t[0] = 0.f; // for (int x = 0; x < x_int_dim; ++x) { // int bits = *ptr; // int bit = 1; // for (int i = 0; i < 32; ++i) { // if (bits & bit) { // glPushMatrix(); // //[x, y, z * 32 + i] // //glTranslatef(t[0], t[1], t[2]); // ax::DrawCube(t[0], t[1], t[2], t[0] + delta[0], // t[1] + delta[1], t[2] + delta[2]); // //glutSolidCube(delta[0]); // glPopMatrix(); // ++count; // } // bit <<= 1; // t[0] += delta[0]; // } // ++ptr; // } // t[2] += delta[2]; // } // t[1] += delta[1]; //} return count; } uint32 CreateVoxelDisplayList(const HVectorInt &vols, const HVectorFloat &delta, const HVectorFloat &bbox0, const int x_dim, const int y_dim, const int z_dim, const int int_dim) { uint32 voxel_list = glGenLists(1); if (voxel_list) { glNewList(voxel_list, GL_COMPILE); glPushMatrix(); glTranslatef(bbox0[0], -delta[1] * y_dim * 0.5, bbox0[2]); int count = DrawVoxel(vols, delta, x_dim, y_dim, z_dim, int_dim); /*glPushAttrib(GL_LIGHTING_BIT|GL_POLYGON_BIT|GL_LINE_BIT); glDisable(GL_LIGHTING); glLineWidth(1.5); glColor3f(0, 0, 0); glPolygonMode(GL_FRONT_AND_BACK, GL_LINE); DrawVoxel(vols, delta, x_int_dim, y_dim, z_dim); glPopAttrib();*/ glPopMatrix(); ax::Logger::Debug("voxels:", count); glEndList(); } return voxel_list; } void DrawBound(const HVectorFloat &tri_bbox0, const HVectorFloat &tri_bbox1, const int N) { for (int i = 0; i < N; ++i) { ax::DrawCube(tri_bbox0[i], tri_bbox0[i + N], tri_bbox0[i + _2X(N)], tri_bbox1[i], tri_bbox1[i + N], tri_bbox1[i + _2X(N)]); } } uint32 CreateBoundDisplayList(const HVectorFloat &tri_bbox0, const HVectorFloat &tri_bbox1, const int N) { uint32 bbox_list = glGenLists(1); if (bbox_list) { glNewList(bbox_list, GL_COMPILE); /* glEnable(GL_AUTO_NORMAL); */ glColor3f(0, 1, 0); DrawBound(tri_bbox0, tri_bbox1, N); glPushAttrib(GL_LIGHTING_BIT|GL_POLYGON_BIT|GL_LINE_BIT); glDisable(GL_LIGHTING); glPolygonMode(GL_FRONT_AND_BACK, GL_LINE); glLineWidth(1.5); glColor3f(0, 0, 0); DrawBound(tri_bbox0, tri_bbox1, N); glPopAttrib(); /*glDisable(GL_AUTO_NORMAL); */ glEndList(); } return bbox_list; } bool VoxelizerDemo::Initialize(const char *model_file, int dim, bool uniform) { glewInit(); ax::Logger::Debug("load model", model_file); V_RET(mesh_ = ax::LoadObj(ax::UserOptions::GetFullModelPath(model_file).c_str())); V_RET(glmesh_ = ax::GLMesh::Create(mesh_)); this->glmesh_->PreProcess(ax::kUseVBO); V_RET(this->dmesh_ = voxel::ConvertFromTriMesh<kDevice>(this->mesh_)); V_RET(this->hmesh_ = voxel::ConvertFromTriMesh<kHost>(this->mesh_)); this->RunTest(); //bbox0 [p0c0 p1c0 ...][p0c1 p1c1 ...][p0c2 p1c2 ...] //bbox1 [p0c0 p1c0 ...][p0c1 p1c1 ...][p0c2 p1c2 ...] //pi stands for a point of the bounding box correspond to the ith triangle //ci stands for the ith component of the point // !!!!!! a test should be taken to show how the initial value is set return true; } void VoxelizerDemo::RunTest() { this->dmesh_->ComputeTriBBox(); ax::SeqTimer::Begin("bbox"); this->dmesh_->ComputeMeshBBox(); ax::SeqTimer::End(); this->hmesh_->ComputeTriBBox(); this->hmesh_->ComputeMeshBBox(); std::vector<float> bbox0(3), bbox1(3); hipMemcpy(&bbox0[0], thrust::raw_pointer_cast(&this->dmesh_->bbox0().front()), sizeof(float)*3, hipMemcpyDeviceToHost); hipMemcpy(&bbox1[0], thrust::raw_pointer_cast(&this->dmesh_->bbox1().front()), sizeof(float)*3, hipMemcpyDeviceToHost); this->dvoxels_.Initialize(HVectorFloat(this->dmesh_->bbox0()), HVectorFloat(this->dmesh_->bbox1()), dim_, mode_); this->hvoxels_.Initialize(this->hmesh_->bbox0(), this->hmesh_->bbox1(), dim_, mode_); //voxel::Voxelize(thrust::raw_pointer_cast(&this->dmesh_->vertices().front()), // thrust::raw_pointer_cast(&this->dmesh_->triangles().front()), // this->dmesh_->n_triangles(), // thrust::raw_pointer_cast(&this->dmesh_->tri_bbox0().front()), // thrust::raw_pointer_cast(&this->dmesh_->tri_bbox1().front()), // this->dvoxels_); voxel::Voxelize(thrust::raw_pointer_cast(&this->hmesh_->vertices().front()), thrust::raw_pointer_cast(&this->hmesh_->triangles().front()), this->hmesh_->n_triangles(), thrust::raw_pointer_cast(&this->hmesh_->tri_bbox0().front()), thrust::raw_pointer_cast(&this->hmesh_->tri_bbox1().front()), this->hvoxels_); ax::SeqTimer::Begin("voxel"); ::tVoxels tvoxels; tvoxels.data = this->dvoxels_.vols_ptr(); tvoxels.target = voxel::kDevice; int tmp_dim[3] = { 512, 512, 128 }; ::Voxelize(this->mesh_, tmp_dim, &tvoxels); ax::SeqTimer::End(); //voxel::CheckVoxels(this->dvoxels_.vols(), this->hvoxels_.vols()); const HostVoxels &vols = this->hvoxels_; if (voxel_list_) glDeleteLists(voxel_list_, 1); voxel_list_ = CreateVoxelDisplayList(HVectorInt(this->hvoxels_.vols()), vols.delta(), vols.bbox0(), vols.dim(X), vols.dim(Y), vols.dim(Z), vols.int_dim()); /* if (bbox_list_) glDeleteLists(bbox_list_, 1); bbox_list_ = CreateBoundDisplayList(h_test_->tri_bbox0(), h_test_->tri_bbox1(), h_test_->n_triangles());*/ } void VoxelizerDemo::OnPaint() { glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); glMatrixMode(GL_MODELVIEW); glm::mat4 m = camera_.ViewMatrix(); glLoadMatrixf(&m[0][0]); GLfloat mat_specular[] = { 1.0, 1.0, 1.0, 1.0 }; GLfloat mat_shininess[] = { 50.0 }; GLfloat mat_diffuse[] = { 0., 0.8, .0, 1.0 }; glm::vec4 light_position(camera_.position(), 0); glClearColor (0.0, 0.0, 0.0, 0.0); // glMaterialfv(GL_FRONT, GL_SPECULAR, mat_specular); glMaterialfv(GL_FRONT, GL_DIFFUSE, mat_diffuse); // glMaterialfv(GL_FRONT, GL_SHININESS, mat_shininess); glLightfv(GL_LIGHT0, GL_POSITION, &light_position[0]); glEnable(GL_LIGHTING); glEnable(GL_LIGHT0); glEnable(GL_DEPTH_TEST); glShadeModel(GL_FLAT); //glPolygonMode(GL_FRONT_AND_BACK, GL_LINE); //glutSolidSphere(0.1, 32, 32); ax::CheckErrorsGL("hello"); if (kGL == method_) { glmesh_->Draw(NULL, ax::kNone); } else if (kBBox == method_) { //glCallList(bbox_list_); } else { glCallList(voxel_list_); } glDisable(GL_LIGHT0); glDisable(GL_LIGHTING); ax::DisplayStatistics("frame rate", fps_counter_.Update(), "fps"); glutSwapBuffers(); } void VoxelizerDemo::OnResize(int width, int height) { if (width == 0 || height == 0) return; glViewport(0, 0, width, height); camera_.set_aspect_ratio(static_cast<double>(width)/height); glMatrixMode(GL_PROJECTION); glm::mat4 m = camera_.ProjMatrix(); glLoadMatrixf(&m[0][0]); } void VoxelizerDemo::OnIdle() { this->RePaint(); } void VoxelizerDemo::OnKeyDown(const int key, const int x, const int y) { switch (key) { case 'a': camera_.Yaw(-1); break; case 'd': camera_.Yaw(1); break; case 'w': camera_.Walk(1); break; case 's': camera_.Walk(-1); break; case 'q': camera_.Pitch(1); break; case 'z': camera_.Pitch(-1); break; case 'n': method_ = (method_ + 1) % 3; break; case 'j': dim_ <<= 1; this->RunTest(); break; case 'k': dim_ >>= 1; this->RunTest(); break; case 'h': if (iters_ > 5) { iters_ -= 5; this->RunTest(); } break; case 'l': iters_ += 5; this->RunTest(); break; case 'm': mode_ = (mode_ + 1) % 3; this->RunTest(); break; } } } // voxel
f36d4389fc4c77520b96d0ddf5af426389b6871f.cu
#include "voxelizer_demo.h" #include <GL/glut.h> #include "axle/cg/utils.h" #include "axle/cg/model_gl.h" #include "axle/core/options.h" #include "axle/core/utils.h" #include "axle/core/timer.h" #include "axle/core/debug.h" #include "voxel/voxelizer_api.h" namespace voxel { VoxelizerDemo::VoxelizerDemo() : ax::GlutWindow("Voxelizer Demo",10, 10, 800, 600, GLUT_RGBA|GLUT_DOUBLE|GLUT_DEPTH), camera_(glm::vec3(-2.68, 0, 2.55), glm::vec3(0, 0, 0)) { camera_.set_min_dist(0.1); camera_.set_fovy(30.0); camera_.set_velocity(0, 0, 1); method_ = kGL; iters_ = 10; mode_ = kUniform; dim_ = 256; } VoxelizerDemo::~VoxelizerDemo() { } int DrawVoxel(const HVectorInt &vols, const HVectorFloat &delta, const int x_dim, const int y_dim, const int z_dim, const int int_dim) { float t[3] = { 0.f, 0.f, 0.f }; int count = 0; const int *ptr = thrust::raw_pointer_cast(&vols.front()); const int *vol_ptr = ptr; for (int z = 0; z < int_dim; ++z) { t[Y] = 0; for (int y = 0; y < y_dim; ++y) { t[X] = 0; for (int x = 0; x < x_dim; ++x) { int bits = *ptr; t[Z] = delta[Z] * 32 * z; for (int i = 0; i < 32; ++i) { if (bits & (1 << i)) { glPushMatrix(); ax::DrawCube(t[0], t[1], t[2], t[0] + delta[0], t[1] + delta[1], t[2] + delta[2]); glPopMatrix(); } t[Z] += delta[Z]; } ++ptr; t[X] += delta[X]; } t[Y] += delta[Y]; } } //for (int y = 0; y < y_dim; ++y) { // t[2] = 0.f; // for (int z = 0; z < z_dim; ++z) { // t[0] = 0.f; // for (int x = 0; x < x_int_dim; ++x) { // int bits = *ptr; // int bit = 1; // for (int i = 0; i < 32; ++i) { // if (bits & bit) { // glPushMatrix(); // //[x, y, z * 32 + i] // //glTranslatef(t[0], t[1], t[2]); // ax::DrawCube(t[0], t[1], t[2], t[0] + delta[0], // t[1] + delta[1], t[2] + delta[2]); // //glutSolidCube(delta[0]); // glPopMatrix(); // ++count; // } // bit <<= 1; // t[0] += delta[0]; // } // ++ptr; // } // t[2] += delta[2]; // } // t[1] += delta[1]; //} return count; } uint32 CreateVoxelDisplayList(const HVectorInt &vols, const HVectorFloat &delta, const HVectorFloat &bbox0, const int x_dim, const int y_dim, const int z_dim, const int int_dim) { uint32 voxel_list = glGenLists(1); if (voxel_list) { glNewList(voxel_list, GL_COMPILE); glPushMatrix(); glTranslatef(bbox0[0], -delta[1] * y_dim * 0.5, bbox0[2]); int count = DrawVoxel(vols, delta, x_dim, y_dim, z_dim, int_dim); /*glPushAttrib(GL_LIGHTING_BIT|GL_POLYGON_BIT|GL_LINE_BIT); glDisable(GL_LIGHTING); glLineWidth(1.5); glColor3f(0, 0, 0); glPolygonMode(GL_FRONT_AND_BACK, GL_LINE); DrawVoxel(vols, delta, x_int_dim, y_dim, z_dim); glPopAttrib();*/ glPopMatrix(); ax::Logger::Debug("voxels:", count); glEndList(); } return voxel_list; } void DrawBound(const HVectorFloat &tri_bbox0, const HVectorFloat &tri_bbox1, const int N) { for (int i = 0; i < N; ++i) { ax::DrawCube(tri_bbox0[i], tri_bbox0[i + N], tri_bbox0[i + _2X(N)], tri_bbox1[i], tri_bbox1[i + N], tri_bbox1[i + _2X(N)]); } } uint32 CreateBoundDisplayList(const HVectorFloat &tri_bbox0, const HVectorFloat &tri_bbox1, const int N) { uint32 bbox_list = glGenLists(1); if (bbox_list) { glNewList(bbox_list, GL_COMPILE); /* glEnable(GL_AUTO_NORMAL); */ glColor3f(0, 1, 0); DrawBound(tri_bbox0, tri_bbox1, N); glPushAttrib(GL_LIGHTING_BIT|GL_POLYGON_BIT|GL_LINE_BIT); glDisable(GL_LIGHTING); glPolygonMode(GL_FRONT_AND_BACK, GL_LINE); glLineWidth(1.5); glColor3f(0, 0, 0); DrawBound(tri_bbox0, tri_bbox1, N); glPopAttrib(); /*glDisable(GL_AUTO_NORMAL); */ glEndList(); } return bbox_list; } bool VoxelizerDemo::Initialize(const char *model_file, int dim, bool uniform) { glewInit(); ax::Logger::Debug("load model", model_file); V_RET(mesh_ = ax::LoadObj(ax::UserOptions::GetFullModelPath(model_file).c_str())); V_RET(glmesh_ = ax::GLMesh::Create(mesh_)); this->glmesh_->PreProcess(ax::kUseVBO); V_RET(this->dmesh_ = voxel::ConvertFromTriMesh<kDevice>(this->mesh_)); V_RET(this->hmesh_ = voxel::ConvertFromTriMesh<kHost>(this->mesh_)); this->RunTest(); //bbox0 [p0c0 p1c0 ...][p0c1 p1c1 ...][p0c2 p1c2 ...] //bbox1 [p0c0 p1c0 ...][p0c1 p1c1 ...][p0c2 p1c2 ...] //pi stands for a point of the bounding box correspond to the ith triangle //ci stands for the ith component of the point // !!!!!! a test should be taken to show how the initial value is set return true; } void VoxelizerDemo::RunTest() { this->dmesh_->ComputeTriBBox(); ax::SeqTimer::Begin("bbox"); this->dmesh_->ComputeMeshBBox(); ax::SeqTimer::End(); this->hmesh_->ComputeTriBBox(); this->hmesh_->ComputeMeshBBox(); std::vector<float> bbox0(3), bbox1(3); cudaMemcpy(&bbox0[0], thrust::raw_pointer_cast(&this->dmesh_->bbox0().front()), sizeof(float)*3, cudaMemcpyDeviceToHost); cudaMemcpy(&bbox1[0], thrust::raw_pointer_cast(&this->dmesh_->bbox1().front()), sizeof(float)*3, cudaMemcpyDeviceToHost); this->dvoxels_.Initialize(HVectorFloat(this->dmesh_->bbox0()), HVectorFloat(this->dmesh_->bbox1()), dim_, mode_); this->hvoxels_.Initialize(this->hmesh_->bbox0(), this->hmesh_->bbox1(), dim_, mode_); //voxel::Voxelize(thrust::raw_pointer_cast(&this->dmesh_->vertices().front()), // thrust::raw_pointer_cast(&this->dmesh_->triangles().front()), // this->dmesh_->n_triangles(), // thrust::raw_pointer_cast(&this->dmesh_->tri_bbox0().front()), // thrust::raw_pointer_cast(&this->dmesh_->tri_bbox1().front()), // this->dvoxels_); voxel::Voxelize(thrust::raw_pointer_cast(&this->hmesh_->vertices().front()), thrust::raw_pointer_cast(&this->hmesh_->triangles().front()), this->hmesh_->n_triangles(), thrust::raw_pointer_cast(&this->hmesh_->tri_bbox0().front()), thrust::raw_pointer_cast(&this->hmesh_->tri_bbox1().front()), this->hvoxels_); ax::SeqTimer::Begin("voxel"); ::tVoxels tvoxels; tvoxels.data = this->dvoxels_.vols_ptr(); tvoxels.target = voxel::kDevice; int tmp_dim[3] = { 512, 512, 128 }; ::Voxelize(this->mesh_, tmp_dim, &tvoxels); ax::SeqTimer::End(); //voxel::CheckVoxels(this->dvoxels_.vols(), this->hvoxels_.vols()); const HostVoxels &vols = this->hvoxels_; if (voxel_list_) glDeleteLists(voxel_list_, 1); voxel_list_ = CreateVoxelDisplayList(HVectorInt(this->hvoxels_.vols()), vols.delta(), vols.bbox0(), vols.dim(X), vols.dim(Y), vols.dim(Z), vols.int_dim()); /* if (bbox_list_) glDeleteLists(bbox_list_, 1); bbox_list_ = CreateBoundDisplayList(h_test_->tri_bbox0(), h_test_->tri_bbox1(), h_test_->n_triangles());*/ } void VoxelizerDemo::OnPaint() { glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); glMatrixMode(GL_MODELVIEW); glm::mat4 m = camera_.ViewMatrix(); glLoadMatrixf(&m[0][0]); GLfloat mat_specular[] = { 1.0, 1.0, 1.0, 1.0 }; GLfloat mat_shininess[] = { 50.0 }; GLfloat mat_diffuse[] = { 0., 0.8, .0, 1.0 }; glm::vec4 light_position(camera_.position(), 0); glClearColor (0.0, 0.0, 0.0, 0.0); // glMaterialfv(GL_FRONT, GL_SPECULAR, mat_specular); glMaterialfv(GL_FRONT, GL_DIFFUSE, mat_diffuse); // glMaterialfv(GL_FRONT, GL_SHININESS, mat_shininess); glLightfv(GL_LIGHT0, GL_POSITION, &light_position[0]); glEnable(GL_LIGHTING); glEnable(GL_LIGHT0); glEnable(GL_DEPTH_TEST); glShadeModel(GL_FLAT); //glPolygonMode(GL_FRONT_AND_BACK, GL_LINE); //glutSolidSphere(0.1, 32, 32); ax::CheckErrorsGL("hello"); if (kGL == method_) { glmesh_->Draw(NULL, ax::kNone); } else if (kBBox == method_) { //glCallList(bbox_list_); } else { glCallList(voxel_list_); } glDisable(GL_LIGHT0); glDisable(GL_LIGHTING); ax::DisplayStatistics("frame rate", fps_counter_.Update(), "fps"); glutSwapBuffers(); } void VoxelizerDemo::OnResize(int width, int height) { if (width == 0 || height == 0) return; glViewport(0, 0, width, height); camera_.set_aspect_ratio(static_cast<double>(width)/height); glMatrixMode(GL_PROJECTION); glm::mat4 m = camera_.ProjMatrix(); glLoadMatrixf(&m[0][0]); } void VoxelizerDemo::OnIdle() { this->RePaint(); } void VoxelizerDemo::OnKeyDown(const int key, const int x, const int y) { switch (key) { case 'a': camera_.Yaw(-1); break; case 'd': camera_.Yaw(1); break; case 'w': camera_.Walk(1); break; case 's': camera_.Walk(-1); break; case 'q': camera_.Pitch(1); break; case 'z': camera_.Pitch(-1); break; case 'n': method_ = (method_ + 1) % 3; break; case 'j': dim_ <<= 1; this->RunTest(); break; case 'k': dim_ >>= 1; this->RunTest(); break; case 'h': if (iters_ > 5) { iters_ -= 5; this->RunTest(); } break; case 'l': iters_ += 5; this->RunTest(); break; case 'm': mode_ = (mode_ + 1) % 3; this->RunTest(); break; } } } // voxel
aa0e1085923fb667edf8aa3555571725a8de486d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "pointwise_hist2.cuh" #include "split_properties_helpers.cuh" #include "compute_point_hist2_loop.cuh" #include "pointwise_hist2_half_byte_template.cuh" #include <hip/hip_cooperative_groups.h> #include <catboost/cuda/cuda_lib/kernel/arch.cuh> #include <catboost/cuda/cuda_util/kernel/instructions.cuh> #include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh> #include <catboost/cuda/cuda_lib/kernel/arch.cuh> using namespace cooperative_groups; namespace NKernel { template <int BlockSize, bool IsFullPass, int M> #if __CUDA_ARCH__ >= 520 __launch_bounds__(BlockSize, 2) #else __launch_bounds__(BlockSize, 1) #endif __global__ void ComputeSplitPropertiesHalfByteImpl( const TCFeature* __restrict__ feature, int fCount, const ui32* __restrict__ cindex, const float* __restrict__ target, const float* __restrict__ weight, const ui32* __restrict__ indices, const TDataPartition* __restrict__ partition, float* __restrict__ binSums, const int totalFeatureCount) { TPointwisePartOffsetsHelper helper(gridDim.z); helper.ShiftPartAndBinSumsPtr(partition, binSums, totalFeatureCount, IsFullPass); feature += (blockIdx.x / M) * 8; cindex += feature->Offset; fCount = min(fCount - (blockIdx.x / M) * 8, 8); // __shared__ float smem[16 * BlockSize]; using THist = TPointHistHalfByte<BlockSize>; #if __CUDA_ARCH__ > 350 const bool use64BitLoad = IsFullPass; #else const bool use64BitLoad = false; #endif if (use64BitLoad) { #if __CUDA_ARCH__ <= 350 const int OUTER_UNROLL = 2; #else const int OUTER_UNROLL = 1; #endif ComputeHistogram2 < BlockSize, OUTER_UNROLL, 1, M, THist > (indices, partition->Offset, partition->Size, target, weight, cindex, smem); } else { #if __CUDA_ARCH__ <= 300 const int INNER_UNROLL = 2; const int OUTER_UNROLL = 2; #elif __CUDA_ARCH__ <= 350 const int INNER_UNROLL = 4; const int OUTER_UNROLL = 2; #else const int INNER_UNROLL = 1; const int OUTER_UNROLL = 1; #endif ComputeHistogram < BlockSize, OUTER_UNROLL, INNER_UNROLL, 1, M, THist > ( indices, partition->Offset, partition->Size, target, weight, cindex, smem); } __syncthreads(); const int fid = (threadIdx.x / 32); const int fold = (threadIdx.x / 2) & 15; const int w = threadIdx.x & 1; if (fid < fCount && fold < feature[fid].Folds) { const float result = smem[fold * 16 + 2 * fid + w]; if (abs(result) > 1e-20) { if (M > 1) { atomicAdd(binSums + (feature[fid].FirstFoldIndex + fold) * 2 + w, result); } else { binSums[(feature[fid].FirstFoldIndex + fold) * 2 + w] = result; } } } } template <int BlockSize, int BlocksPerFeatureCount> inline void RunComputeHist2HalfByteKernel(const TCFeature* nbFeatures, int nbCount, const ui32* cindex, const float* target, const float* weight, const ui32* indices, const TDataPartition* partition, float* binSums, const int binFeatureCount, bool fullPass, TCudaStream stream, dim3 numBlocks) { if (fullPass) { ComputeSplitPropertiesHalfByteImpl < BlockSize, true, BlocksPerFeatureCount > << <numBlocks, BlockSize, 0, stream>>>( nbFeatures, nbCount, cindex, target, weight, indices, partition, binSums, binFeatureCount ); } else { ComputeSplitPropertiesHalfByteImpl < BlockSize, false, BlocksPerFeatureCount > << <numBlocks, BlockSize, 0, stream>>>( nbFeatures, nbCount, cindex, target, weight, indices, partition, binSums, binFeatureCount); } } void ComputeHist2HalfByte(const TCFeature* halfByteFeatures, ui32 halfByteFeaturesCount, const ui32* cindex, const float* target, const float* weight, const ui32* indices, ui32 size, const TDataPartition* partition, ui32 partsCount, ui32 foldCount, bool fullPass, const ui32 histLineSize, float* binSums, TCudaStream stream) { dim3 numBlocks; numBlocks.x = static_cast<ui32>((halfByteFeaturesCount + 7) / 8); const int histCount = fullPass ? partsCount : partsCount / 2; numBlocks.y = static_cast<ui32>(histCount); numBlocks.z = foldCount; const int blockSize = 768; const ui32 multiplier = min(EstimateBlockPerFeatureMultiplier(numBlocks, size), 64); numBlocks.x *= multiplier; if (halfByteFeaturesCount) { #define COMPUTE(k)\ RunComputeHist2HalfByteKernel<blockSize, k>(halfByteFeatures, halfByteFeaturesCount, cindex,\ target,\ weight, indices, partition, binSums, histLineSize,\ fullPass,\ stream, numBlocks); if (multiplier == 1) { COMPUTE(1) } else if (multiplier == 2) { COMPUTE(2) } else if (multiplier == 4) { COMPUTE(4) } else if (multiplier == 8) { COMPUTE(8) } else if (multiplier == 16) { COMPUTE(16) } else if (multiplier == 32) { COMPUTE(32) } else if (multiplier == 64) { COMPUTE(64) } else { exit(1); } #undef COMPUTE } } }
aa0e1085923fb667edf8aa3555571725a8de486d.cu
#include "pointwise_hist2.cuh" #include "split_properties_helpers.cuh" #include "compute_point_hist2_loop.cuh" #include "pointwise_hist2_half_byte_template.cuh" #include <cooperative_groups.h> #include <catboost/cuda/cuda_lib/kernel/arch.cuh> #include <catboost/cuda/cuda_util/kernel/instructions.cuh> #include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh> #include <catboost/cuda/cuda_lib/kernel/arch.cuh> using namespace cooperative_groups; namespace NKernel { template <int BlockSize, bool IsFullPass, int M> #if __CUDA_ARCH__ >= 520 __launch_bounds__(BlockSize, 2) #else __launch_bounds__(BlockSize, 1) #endif __global__ void ComputeSplitPropertiesHalfByteImpl( const TCFeature* __restrict__ feature, int fCount, const ui32* __restrict__ cindex, const float* __restrict__ target, const float* __restrict__ weight, const ui32* __restrict__ indices, const TDataPartition* __restrict__ partition, float* __restrict__ binSums, const int totalFeatureCount) { TPointwisePartOffsetsHelper helper(gridDim.z); helper.ShiftPartAndBinSumsPtr(partition, binSums, totalFeatureCount, IsFullPass); feature += (blockIdx.x / M) * 8; cindex += feature->Offset; fCount = min(fCount - (blockIdx.x / M) * 8, 8); // __shared__ float smem[16 * BlockSize]; using THist = TPointHistHalfByte<BlockSize>; #if __CUDA_ARCH__ > 350 const bool use64BitLoad = IsFullPass; #else const bool use64BitLoad = false; #endif if (use64BitLoad) { #if __CUDA_ARCH__ <= 350 const int OUTER_UNROLL = 2; #else const int OUTER_UNROLL = 1; #endif ComputeHistogram2 < BlockSize, OUTER_UNROLL, 1, M, THist > (indices, partition->Offset, partition->Size, target, weight, cindex, smem); } else { #if __CUDA_ARCH__ <= 300 const int INNER_UNROLL = 2; const int OUTER_UNROLL = 2; #elif __CUDA_ARCH__ <= 350 const int INNER_UNROLL = 4; const int OUTER_UNROLL = 2; #else const int INNER_UNROLL = 1; const int OUTER_UNROLL = 1; #endif ComputeHistogram < BlockSize, OUTER_UNROLL, INNER_UNROLL, 1, M, THist > ( indices, partition->Offset, partition->Size, target, weight, cindex, smem); } __syncthreads(); const int fid = (threadIdx.x / 32); const int fold = (threadIdx.x / 2) & 15; const int w = threadIdx.x & 1; if (fid < fCount && fold < feature[fid].Folds) { const float result = smem[fold * 16 + 2 * fid + w]; if (abs(result) > 1e-20) { if (M > 1) { atomicAdd(binSums + (feature[fid].FirstFoldIndex + fold) * 2 + w, result); } else { binSums[(feature[fid].FirstFoldIndex + fold) * 2 + w] = result; } } } } template <int BlockSize, int BlocksPerFeatureCount> inline void RunComputeHist2HalfByteKernel(const TCFeature* nbFeatures, int nbCount, const ui32* cindex, const float* target, const float* weight, const ui32* indices, const TDataPartition* partition, float* binSums, const int binFeatureCount, bool fullPass, TCudaStream stream, dim3 numBlocks) { if (fullPass) { ComputeSplitPropertiesHalfByteImpl < BlockSize, true, BlocksPerFeatureCount > << <numBlocks, BlockSize, 0, stream>>>( nbFeatures, nbCount, cindex, target, weight, indices, partition, binSums, binFeatureCount ); } else { ComputeSplitPropertiesHalfByteImpl < BlockSize, false, BlocksPerFeatureCount > << <numBlocks, BlockSize, 0, stream>>>( nbFeatures, nbCount, cindex, target, weight, indices, partition, binSums, binFeatureCount); } } void ComputeHist2HalfByte(const TCFeature* halfByteFeatures, ui32 halfByteFeaturesCount, const ui32* cindex, const float* target, const float* weight, const ui32* indices, ui32 size, const TDataPartition* partition, ui32 partsCount, ui32 foldCount, bool fullPass, const ui32 histLineSize, float* binSums, TCudaStream stream) { dim3 numBlocks; numBlocks.x = static_cast<ui32>((halfByteFeaturesCount + 7) / 8); const int histCount = fullPass ? partsCount : partsCount / 2; numBlocks.y = static_cast<ui32>(histCount); numBlocks.z = foldCount; const int blockSize = 768; const ui32 multiplier = min(EstimateBlockPerFeatureMultiplier(numBlocks, size), 64); numBlocks.x *= multiplier; if (halfByteFeaturesCount) { #define COMPUTE(k)\ RunComputeHist2HalfByteKernel<blockSize, k>(halfByteFeatures, halfByteFeaturesCount, cindex,\ target,\ weight, indices, partition, binSums, histLineSize,\ fullPass,\ stream, numBlocks); if (multiplier == 1) { COMPUTE(1) } else if (multiplier == 2) { COMPUTE(2) } else if (multiplier == 4) { COMPUTE(4) } else if (multiplier == 8) { COMPUTE(8) } else if (multiplier == 16) { COMPUTE(16) } else if (multiplier == 32) { COMPUTE(32) } else if (multiplier == 64) { COMPUTE(64) } else { exit(1); } #undef COMPUTE } } }
a994a671d4eb82b0e8186d3e57cac7d1d72f32dd.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /***************************************************************************//** * \file generateA.cu * \author Anush Krishnan (anush@bu.edu) * \brief Implementation of the kernels required to generate the matrix * resulting from the implicit terms in the momentum equation. */ #include "generateA.h" /** * namespace kernels * \brief Contains all the custom-written CUDA kernels. */ namespace kernels { /** * \brief Generates a block of the matrix resulting from implicit terms in the momentum equation. * * It assembles the matrix \c A as a combination * of the Laplacian matrix \c L and the mass matrix \c M. * A = M-alpha*L * The parameter alpha is the coefficient of the implicit part of the diffusion term. * It is 1 for a backward Euler scheme, 0.5 for a Crank-Nicolson scheme, and 0 for a fully explicit scheme. * * \param ARows rows of the COO matrix \c A * \param ACols columns of the COO matrix \c A * \param AVals values of the COO matrix \c A * \param MVals values of the COO matrix \c M * \param LRows rows of the COO matrix \c L * \param LCols columns of the COO matrix \c L * \param LVals values of the COO matrix \c A * \param ASize number of entries of the COO matrix \c A * \param alpha implicit coefficient of the diffusive scheme */ __global__ void generateA(int *ARows, int *ACols, real *AVals, real *MVals, int *LRows, int *LCols, real *LVals, int ASize, real alpha) { for (int I=threadIdx.x + blockIdx.x*blockDim.x; I<ASize; I += blockDim.x*gridDim.x) { ARows[I] = LRows[I]; ACols[I] = LCols[I]; AVals[I] = -alpha*LVals[I] + (LRows[I]==LCols[I])*MVals[LRows[I]]; } } /** * \brief Generates a block of the matrix resulting from implicit terms in the momentum equation * for the direct forcing method. * * It assembles the matrix \c A as a combination * of the Laplacian matrix \c L and the mass matrix \c M. * The parameter alpha is the coefficient of the implicit part of the diffusion term. * It is 1 for a backward Euler scheme, 0.5 for a Crank-Nicolson scheme, and 0 for a fully explicit scheme. * The left-hand side matrix A is set up as M-alpha*L, where M is the mass matrix, and L the Laplacian matrix. * But in the case of the direct forcing method, some rows are determined by interpolation relations, * and the rows of L are modified appropriately. For these rows alone, the rows of A are given by M-L. * * \param ARows rows of the COO matrix \c A * \param ACols columns of the COO matrix \c A * \param AVals values of the COO matrix \c A * \param MVals values of the COO matrix \c M * \param LRows rows of the COO matrix \c L * \param LCols columns of the COO matrix \c L * \param LVals values of the COO matrix \c A * \param ASize number of entries of the COO matrix \c A * \param alpha implicit coefficient of the diffusive scheme * \param tagsX tag to check if the node is next to an immersed boundary * \param tagsY tag to check if the node is next to an immersed boundary */ __global__ void generateADirectForcing(int *ARows, int *ACols, real *AVals, real *MVals, int *LRows, int *LCols, real *LVals, int ASize, real alpha, int *tags) { for(int I=threadIdx.x + blockIdx.x*blockDim.x; I<ASize; I += blockDim.x*gridDim.x) { ARows[I] = LRows[I]; ACols[I] = LCols[I]; AVals[I] = (tags[LRows[I]] == -1)*(-alpha*LVals[I]) // if the current location is untagged, add -alpha*L + (tags[LRows[I]] != -1)*(-LVals[I]) // if the current location is tagged, add -L + (LRows[I]==LCols[I])*MVals[LRows[I]]; // if it is a diagonal, add M } } } // end of namespace kernels
a994a671d4eb82b0e8186d3e57cac7d1d72f32dd.cu
/***************************************************************************//** * \file generateA.cu * \author Anush Krishnan (anush@bu.edu) * \brief Implementation of the kernels required to generate the matrix * resulting from the implicit terms in the momentum equation. */ #include "generateA.h" /** * namespace kernels * \brief Contains all the custom-written CUDA kernels. */ namespace kernels { /** * \brief Generates a block of the matrix resulting from implicit terms in the momentum equation. * * It assembles the matrix \c A as a combination * of the Laplacian matrix \c L and the mass matrix \c M. * A = M-alpha*L * The parameter alpha is the coefficient of the implicit part of the diffusion term. * It is 1 for a backward Euler scheme, 0.5 for a Crank-Nicolson scheme, and 0 for a fully explicit scheme. * * \param ARows rows of the COO matrix \c A * \param ACols columns of the COO matrix \c A * \param AVals values of the COO matrix \c A * \param MVals values of the COO matrix \c M * \param LRows rows of the COO matrix \c L * \param LCols columns of the COO matrix \c L * \param LVals values of the COO matrix \c A * \param ASize number of entries of the COO matrix \c A * \param alpha implicit coefficient of the diffusive scheme */ __global__ void generateA(int *ARows, int *ACols, real *AVals, real *MVals, int *LRows, int *LCols, real *LVals, int ASize, real alpha) { for (int I=threadIdx.x + blockIdx.x*blockDim.x; I<ASize; I += blockDim.x*gridDim.x) { ARows[I] = LRows[I]; ACols[I] = LCols[I]; AVals[I] = -alpha*LVals[I] + (LRows[I]==LCols[I])*MVals[LRows[I]]; } } /** * \brief Generates a block of the matrix resulting from implicit terms in the momentum equation * for the direct forcing method. * * It assembles the matrix \c A as a combination * of the Laplacian matrix \c L and the mass matrix \c M. * The parameter alpha is the coefficient of the implicit part of the diffusion term. * It is 1 for a backward Euler scheme, 0.5 for a Crank-Nicolson scheme, and 0 for a fully explicit scheme. * The left-hand side matrix A is set up as M-alpha*L, where M is the mass matrix, and L the Laplacian matrix. * But in the case of the direct forcing method, some rows are determined by interpolation relations, * and the rows of L are modified appropriately. For these rows alone, the rows of A are given by M-L. * * \param ARows rows of the COO matrix \c A * \param ACols columns of the COO matrix \c A * \param AVals values of the COO matrix \c A * \param MVals values of the COO matrix \c M * \param LRows rows of the COO matrix \c L * \param LCols columns of the COO matrix \c L * \param LVals values of the COO matrix \c A * \param ASize number of entries of the COO matrix \c A * \param alpha implicit coefficient of the diffusive scheme * \param tagsX tag to check if the node is next to an immersed boundary * \param tagsY tag to check if the node is next to an immersed boundary */ __global__ void generateADirectForcing(int *ARows, int *ACols, real *AVals, real *MVals, int *LRows, int *LCols, real *LVals, int ASize, real alpha, int *tags) { for(int I=threadIdx.x + blockIdx.x*blockDim.x; I<ASize; I += blockDim.x*gridDim.x) { ARows[I] = LRows[I]; ACols[I] = LCols[I]; AVals[I] = (tags[LRows[I]] == -1)*(-alpha*LVals[I]) // if the current location is untagged, add -alpha*L + (tags[LRows[I]] != -1)*(-LVals[I]) // if the current location is tagged, add -L + (LRows[I]==LCols[I])*MVals[LRows[I]]; // if it is a diagonal, add M } } } // end of namespace kernels
1b06ac63dab6b7e421b4bd79fe9c4d4062bd7ebd.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by ops.py // __constant__ int xdim0_update_halo_kernel2_yvel_plus_4_front; int xdim0_update_halo_kernel2_yvel_plus_4_front_h = -1; __constant__ int ydim0_update_halo_kernel2_yvel_plus_4_front; int ydim0_update_halo_kernel2_yvel_plus_4_front_h = -1; __constant__ int xdim1_update_halo_kernel2_yvel_plus_4_front; int xdim1_update_halo_kernel2_yvel_plus_4_front_h = -1; __constant__ int ydim1_update_halo_kernel2_yvel_plus_4_front; int ydim1_update_halo_kernel2_yvel_plus_4_front_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #define OPS_ACC0(x, y, z) \ (x + xdim0_update_halo_kernel2_yvel_plus_4_front * (y) + \ xdim0_update_halo_kernel2_yvel_plus_4_front * \ ydim0_update_halo_kernel2_yvel_plus_4_front * (z)) #define OPS_ACC1(x, y, z) \ (x + xdim1_update_halo_kernel2_yvel_plus_4_front * (y) + \ xdim1_update_halo_kernel2_yvel_plus_4_front * \ ydim1_update_halo_kernel2_yvel_plus_4_front * (z)) // user function __device__ inline void update_halo_kernel2_yvel_plus_4_front(double *yvel0, double *yvel1, const int *fields) { if (fields[FIELD_YVEL0] == 1) yvel0[OPS_ACC0(0, 0, 0)] = yvel0[OPS_ACC0(0, 0, -4)]; if (fields[FIELD_YVEL1] == 1) yvel1[OPS_ACC1(0, 0, 0)] = yvel1[OPS_ACC1(0, 0, -4)]; } #undef OPS_ACC0 #undef OPS_ACC1 __global__ void ops_update_halo_kernel2_yvel_plus_4_front( double *__restrict arg0, double *__restrict arg1, const int *__restrict arg2, int size0, int size1, int size2) { int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_update_halo_kernel2_yvel_plus_4_front + idx_z * 1 * 1 * xdim0_update_halo_kernel2_yvel_plus_4_front * ydim0_update_halo_kernel2_yvel_plus_4_front; arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_update_halo_kernel2_yvel_plus_4_front + idx_z * 1 * 1 * xdim1_update_halo_kernel2_yvel_plus_4_front * ydim1_update_halo_kernel2_yvel_plus_4_front; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { update_halo_kernel2_yvel_plus_4_front(arg0, arg1, arg2); } } // host stub function void ops_par_loop_update_halo_kernel2_yvel_plus_4_front( char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { // Timing double t1, t2, c1, c2; ops_arg args[3] = {arg0, arg1, arg2}; #ifdef CHECKPOINTING if (!ops_checkpointing_before(args, 3, range, 90)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(90, "update_halo_kernel2_yvel_plus_4_front"); OPS_kernels[90].count++; ops_timers_core(&c1, &t1); } // compute locally allocated range for the sub-block int start[3]; int end[3]; #ifdef OPS_MPI sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for (int n = 0; n < 3; n++) { start[n] = sb->decomp_disp[n]; end[n] = sb->decomp_disp[n] + sb->decomp_size[n]; if (start[n] >= range[2 * n]) { start[n] = 0; } else { start[n] = range[2 * n] - start[n]; } if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0) start[n] = range[2 * n]; if (end[n] >= range[2 * n + 1]) { end[n] = range[2 * n + 1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n] == MPI_PROC_NULL && (range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n])) end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]); } #else for (int n = 0; n < 3; n++) { start[n] = range[2 * n]; end[n] = range[2 * n + 1]; } #endif int x_size = MAX(0, end[0] - start[0]); int y_size = MAX(0, end[1] - start[1]); int z_size = MAX(0, end[2] - start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; if (xdim0 != xdim0_update_halo_kernel2_yvel_plus_4_front_h || ydim0 != ydim0_update_halo_kernel2_yvel_plus_4_front_h || xdim1 != xdim1_update_halo_kernel2_yvel_plus_4_front_h || ydim1 != ydim1_update_halo_kernel2_yvel_plus_4_front_h) { hipMemcpyToSymbol(xdim0_update_halo_kernel2_yvel_plus_4_front, &xdim0, sizeof(int)); xdim0_update_halo_kernel2_yvel_plus_4_front_h = xdim0; hipMemcpyToSymbol(ydim0_update_halo_kernel2_yvel_plus_4_front, &ydim0, sizeof(int)); ydim0_update_halo_kernel2_yvel_plus_4_front_h = ydim0; hipMemcpyToSymbol(xdim1_update_halo_kernel2_yvel_plus_4_front, &xdim1, sizeof(int)); xdim1_update_halo_kernel2_yvel_plus_4_front_h = xdim1; hipMemcpyToSymbol(ydim1_update_halo_kernel2_yvel_plus_4_front, &ydim1, sizeof(int)); ydim1_update_halo_kernel2_yvel_plus_4_front_h = ydim1; } int *arg2h = (int *)arg2.data; dim3 grid((x_size - 1) / OPS_block_size_x + 1, (y_size - 1) / OPS_block_size_y + 1, z_size); dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1); int consts_bytes = 0; consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg2.data = OPS_consts_h + consts_bytes; arg2.data_d = OPS_consts_d + consts_bytes; for (int d = 0; d < NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d]; consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int)); mvConstArraysToDevice(consts_bytes); int dat0 = args[0].dat->elem_size; int dat1 = args[1].dat->elem_size; char *p_a[3]; // set up initial pointers int d_m[OPS_MAX_DIM]; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d]; #endif int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]); base0 = base0 + dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]); base0 = base0 + dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] - d_m[2]); p_a[0] = (char *)args[0].data_d + base0; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d]; #endif int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]); base1 = base1 + dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]); base1 = base1 + dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] - d_m[2]); p_a[1] = (char *)args[1].data_d + base1; ops_H_D_exchanges_device(args, 3); ops_halo_exchanges(args, 3, range); if (OPS_diags > 1) { ops_timers_core(&c2, &t2); OPS_kernels[90].mpi_time += t2 - t1; } // call kernel wrapper function, passing in pointers to data hipLaunchKernelGGL(( ops_update_halo_kernel2_yvel_plus_4_front), dim3(grid), dim3(tblock), 0, 0, (double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size, z_size); if (OPS_diags > 1) { cutilSafeCall(hipDeviceSynchronize()); ops_timers_core(&c1, &t1); OPS_kernels[90].time += t1 - t2; } ops_set_dirtybit_device(args, 3); ops_set_halo_dirtybit3(&args[0], range); ops_set_halo_dirtybit3(&args[1], range); if (OPS_diags > 1) { // Update kernel record ops_timers_core(&c2, &t2); OPS_kernels[90].mpi_time += t2 - t1; OPS_kernels[90].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[90].transfer += ops_compute_transfer(dim, start, end, &arg1); } }
1b06ac63dab6b7e421b4bd79fe9c4d4062bd7ebd.cu
// // auto-generated by ops.py // __constant__ int xdim0_update_halo_kernel2_yvel_plus_4_front; int xdim0_update_halo_kernel2_yvel_plus_4_front_h = -1; __constant__ int ydim0_update_halo_kernel2_yvel_plus_4_front; int ydim0_update_halo_kernel2_yvel_plus_4_front_h = -1; __constant__ int xdim1_update_halo_kernel2_yvel_plus_4_front; int xdim1_update_halo_kernel2_yvel_plus_4_front_h = -1; __constant__ int ydim1_update_halo_kernel2_yvel_plus_4_front; int ydim1_update_halo_kernel2_yvel_plus_4_front_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #define OPS_ACC0(x, y, z) \ (x + xdim0_update_halo_kernel2_yvel_plus_4_front * (y) + \ xdim0_update_halo_kernel2_yvel_plus_4_front * \ ydim0_update_halo_kernel2_yvel_plus_4_front * (z)) #define OPS_ACC1(x, y, z) \ (x + xdim1_update_halo_kernel2_yvel_plus_4_front * (y) + \ xdim1_update_halo_kernel2_yvel_plus_4_front * \ ydim1_update_halo_kernel2_yvel_plus_4_front * (z)) // user function __device__ inline void update_halo_kernel2_yvel_plus_4_front(double *yvel0, double *yvel1, const int *fields) { if (fields[FIELD_YVEL0] == 1) yvel0[OPS_ACC0(0, 0, 0)] = yvel0[OPS_ACC0(0, 0, -4)]; if (fields[FIELD_YVEL1] == 1) yvel1[OPS_ACC1(0, 0, 0)] = yvel1[OPS_ACC1(0, 0, -4)]; } #undef OPS_ACC0 #undef OPS_ACC1 __global__ void ops_update_halo_kernel2_yvel_plus_4_front( double *__restrict arg0, double *__restrict arg1, const int *__restrict arg2, int size0, int size1, int size2) { int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_update_halo_kernel2_yvel_plus_4_front + idx_z * 1 * 1 * xdim0_update_halo_kernel2_yvel_plus_4_front * ydim0_update_halo_kernel2_yvel_plus_4_front; arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_update_halo_kernel2_yvel_plus_4_front + idx_z * 1 * 1 * xdim1_update_halo_kernel2_yvel_plus_4_front * ydim1_update_halo_kernel2_yvel_plus_4_front; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { update_halo_kernel2_yvel_plus_4_front(arg0, arg1, arg2); } } // host stub function void ops_par_loop_update_halo_kernel2_yvel_plus_4_front( char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { // Timing double t1, t2, c1, c2; ops_arg args[3] = {arg0, arg1, arg2}; #ifdef CHECKPOINTING if (!ops_checkpointing_before(args, 3, range, 90)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(90, "update_halo_kernel2_yvel_plus_4_front"); OPS_kernels[90].count++; ops_timers_core(&c1, &t1); } // compute locally allocated range for the sub-block int start[3]; int end[3]; #ifdef OPS_MPI sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for (int n = 0; n < 3; n++) { start[n] = sb->decomp_disp[n]; end[n] = sb->decomp_disp[n] + sb->decomp_size[n]; if (start[n] >= range[2 * n]) { start[n] = 0; } else { start[n] = range[2 * n] - start[n]; } if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0) start[n] = range[2 * n]; if (end[n] >= range[2 * n + 1]) { end[n] = range[2 * n + 1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n] == MPI_PROC_NULL && (range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n])) end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]); } #else for (int n = 0; n < 3; n++) { start[n] = range[2 * n]; end[n] = range[2 * n + 1]; } #endif int x_size = MAX(0, end[0] - start[0]); int y_size = MAX(0, end[1] - start[1]); int z_size = MAX(0, end[2] - start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; if (xdim0 != xdim0_update_halo_kernel2_yvel_plus_4_front_h || ydim0 != ydim0_update_halo_kernel2_yvel_plus_4_front_h || xdim1 != xdim1_update_halo_kernel2_yvel_plus_4_front_h || ydim1 != ydim1_update_halo_kernel2_yvel_plus_4_front_h) { cudaMemcpyToSymbol(xdim0_update_halo_kernel2_yvel_plus_4_front, &xdim0, sizeof(int)); xdim0_update_halo_kernel2_yvel_plus_4_front_h = xdim0; cudaMemcpyToSymbol(ydim0_update_halo_kernel2_yvel_plus_4_front, &ydim0, sizeof(int)); ydim0_update_halo_kernel2_yvel_plus_4_front_h = ydim0; cudaMemcpyToSymbol(xdim1_update_halo_kernel2_yvel_plus_4_front, &xdim1, sizeof(int)); xdim1_update_halo_kernel2_yvel_plus_4_front_h = xdim1; cudaMemcpyToSymbol(ydim1_update_halo_kernel2_yvel_plus_4_front, &ydim1, sizeof(int)); ydim1_update_halo_kernel2_yvel_plus_4_front_h = ydim1; } int *arg2h = (int *)arg2.data; dim3 grid((x_size - 1) / OPS_block_size_x + 1, (y_size - 1) / OPS_block_size_y + 1, z_size); dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1); int consts_bytes = 0; consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg2.data = OPS_consts_h + consts_bytes; arg2.data_d = OPS_consts_d + consts_bytes; for (int d = 0; d < NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d]; consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int)); mvConstArraysToDevice(consts_bytes); int dat0 = args[0].dat->elem_size; int dat1 = args[1].dat->elem_size; char *p_a[3]; // set up initial pointers int d_m[OPS_MAX_DIM]; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d]; #endif int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]); base0 = base0 + dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]); base0 = base0 + dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] - d_m[2]); p_a[0] = (char *)args[0].data_d + base0; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d]; #endif int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]); base1 = base1 + dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]); base1 = base1 + dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] - d_m[2]); p_a[1] = (char *)args[1].data_d + base1; ops_H_D_exchanges_device(args, 3); ops_halo_exchanges(args, 3, range); if (OPS_diags > 1) { ops_timers_core(&c2, &t2); OPS_kernels[90].mpi_time += t2 - t1; } // call kernel wrapper function, passing in pointers to data ops_update_halo_kernel2_yvel_plus_4_front<<<grid, tblock>>>( (double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size, z_size); if (OPS_diags > 1) { cutilSafeCall(cudaDeviceSynchronize()); ops_timers_core(&c1, &t1); OPS_kernels[90].time += t1 - t2; } ops_set_dirtybit_device(args, 3); ops_set_halo_dirtybit3(&args[0], range); ops_set_halo_dirtybit3(&args[1], range); if (OPS_diags > 1) { // Update kernel record ops_timers_core(&c2, &t2); OPS_kernels[90].mpi_time += t2 - t1; OPS_kernels[90].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[90].transfer += ops_compute_transfer(dim, start, end, &arg1); } }
7342984c971ac14a0d830227459bd7822b78ae72.hip
// !!! This is a file automatically generated by hipify!!! /*! * Copyright (c) 2020 by Contributors * \file array/cuda/segment_reduce.cu * \brief Segment reduce C APIs and definitions. */ #include <dgl/array.h> #include "./segment_reduce.cuh" #include "functor.cuh" namespace dgl { using namespace cuda; namespace aten { template <int XPU, typename IdType, typename DType> void SegmentReduce(const std::string& op, NDArray feat, NDArray offsets, NDArray out, NDArray arg) { if (op == "sum") { cuda::SegmentReduce<IdType, DType, cuda::reduce::Sum<IdType, DType>>( feat, offsets, out, arg); } else if (op == "max") { cuda::SegmentReduce<IdType, DType, cuda::reduce::Max<IdType, DType>>( feat, offsets, out, arg); } else if (op == "min") { cuda::SegmentReduce<IdType, DType, cuda::reduce::Min<IdType, DType>>( feat, offsets, out, arg); } else { LOG(FATAL) << "Not implemented"; } } template <int XPU, typename IdType, typename DType> void BackwardSegmentCmp(NDArray feat, NDArray arg, NDArray out) { cuda::BackwardSegmentCmp<IdType, DType>(feat, arg, out); } template void SegmentReduce<kDLGPU, int32_t, float>( const std::string& op, NDArray feat, NDArray offsets, NDArray out, NDArray arg); template void SegmentReduce<kDLGPU, int64_t, float>( const std::string &op, NDArray feat, NDArray offsets, NDArray out, NDArray arg); template void SegmentReduce<kDLGPU, int32_t, double>( const std::string &op, NDArray feat, NDArray offsets, NDArray out, NDArray arg); template void SegmentReduce<kDLGPU, int64_t, double>( const std::string &op, NDArray feat, NDArray offsets, NDArray out, NDArray arg); template void BackwardSegmentCmp<kDLGPU, int32_t, float>( NDArray feat, NDArray arg, NDArray out); template void BackwardSegmentCmp<kDLGPU, int64_t, float>( NDArray feat, NDArray arg, NDArray out); template void BackwardSegmentCmp<kDLGPU, int32_t, double>( NDArray feat, NDArray arg, NDArray out); template void BackwardSegmentCmp<kDLGPU, int64_t, double>( NDArray feat, NDArray arg, NDArray out); } // namespace aten } // namespace dgl
7342984c971ac14a0d830227459bd7822b78ae72.cu
/*! * Copyright (c) 2020 by Contributors * \file array/cuda/segment_reduce.cu * \brief Segment reduce C APIs and definitions. */ #include <dgl/array.h> #include "./segment_reduce.cuh" #include "./functor.cuh" namespace dgl { using namespace cuda; namespace aten { template <int XPU, typename IdType, typename DType> void SegmentReduce(const std::string& op, NDArray feat, NDArray offsets, NDArray out, NDArray arg) { if (op == "sum") { cuda::SegmentReduce<IdType, DType, cuda::reduce::Sum<IdType, DType>>( feat, offsets, out, arg); } else if (op == "max") { cuda::SegmentReduce<IdType, DType, cuda::reduce::Max<IdType, DType>>( feat, offsets, out, arg); } else if (op == "min") { cuda::SegmentReduce<IdType, DType, cuda::reduce::Min<IdType, DType>>( feat, offsets, out, arg); } else { LOG(FATAL) << "Not implemented"; } } template <int XPU, typename IdType, typename DType> void BackwardSegmentCmp(NDArray feat, NDArray arg, NDArray out) { cuda::BackwardSegmentCmp<IdType, DType>(feat, arg, out); } template void SegmentReduce<kDLGPU, int32_t, float>( const std::string& op, NDArray feat, NDArray offsets, NDArray out, NDArray arg); template void SegmentReduce<kDLGPU, int64_t, float>( const std::string &op, NDArray feat, NDArray offsets, NDArray out, NDArray arg); template void SegmentReduce<kDLGPU, int32_t, double>( const std::string &op, NDArray feat, NDArray offsets, NDArray out, NDArray arg); template void SegmentReduce<kDLGPU, int64_t, double>( const std::string &op, NDArray feat, NDArray offsets, NDArray out, NDArray arg); template void BackwardSegmentCmp<kDLGPU, int32_t, float>( NDArray feat, NDArray arg, NDArray out); template void BackwardSegmentCmp<kDLGPU, int64_t, float>( NDArray feat, NDArray arg, NDArray out); template void BackwardSegmentCmp<kDLGPU, int32_t, double>( NDArray feat, NDArray arg, NDArray out); template void BackwardSegmentCmp<kDLGPU, int64_t, double>( NDArray feat, NDArray arg, NDArray out); } // namespace aten } // namespace dgl
e55e06d3b584f3333d3289c2935d4687ce41786a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" extern "C" { } __global__ void vcopyshift(const int n, const int shift, const double *a, double *b) { int i = threadIdx.x + blockIdx.x * blockDim.x; if (i<n) { b[i+shift] = a[i]; } }
e55e06d3b584f3333d3289c2935d4687ce41786a.cu
#include "includes.h" extern "C" { } __global__ void vcopyshift(const int n, const int shift, const double *a, double *b) { int i = threadIdx.x + blockIdx.x * blockDim.x; if (i<n) { b[i+shift] = a[i]; } }
c406b4999635a24d97107f61845495788030e4e6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stddef.h> #include <stdint.h> #include "model_gpu_utils.h" #include "ten_tusscher_2004_epi_S1_5.h" extern "C" SET_ODE_INITIAL_CONDITIONS_GPU(set_model_initial_conditions_gpu) { print_to_stdout_and_file("Using ten Tusscher 2004 epi GPU model\n"); // execution configuration const int GRID = (num_volumes + BLOCK_SIZE - 1)/BLOCK_SIZE; size_t size = num_volumes*sizeof(real); check_cuda_error(hipMallocPitch((void **) &(*sv), &pitch_h, size, (size_t )NEQ)); check_cuda_error(hipMemcpyToSymbol(pitch, &pitch_h, sizeof(size_t))); hipLaunchKernelGGL(( kernel_set_model_inital_conditions) , dim3(GRID), dim3(BLOCK_SIZE), 0, 0, *sv, num_volumes); check_cuda_error( hipPeekAtLastError() ); hipDeviceSynchronize(); return pitch_h; } extern "C" SOLVE_MODEL_ODES_GPU(solve_model_odes_gpu) { // execution configuration const int GRID = ((int)num_cells_to_solve + BLOCK_SIZE - 1)/BLOCK_SIZE; size_t stim_currents_size = sizeof(real)*num_cells_to_solve; size_t cells_to_solve_size = sizeof(uint32_t)*num_cells_to_solve; real *stims_currents_device; check_cuda_error(hipMalloc((void **) &stims_currents_device, stim_currents_size)); check_cuda_error(hipMemcpy(stims_currents_device, stim_currents, stim_currents_size, hipMemcpyHostToDevice)); //the array cells to solve is passed when we are using and adapative mesh uint32_t *cells_to_solve_device = NULL; if(cells_to_solve != NULL) { check_cuda_error(hipMalloc((void **) &cells_to_solve_device, cells_to_solve_size)); check_cuda_error(hipMemcpy(cells_to_solve_device, cells_to_solve, cells_to_solve_size, hipMemcpyHostToDevice)); } hipLaunchKernelGGL(( solve_gpu) , dim3(GRID), dim3(BLOCK_SIZE), 0, 0, dt, sv, stims_currents_device, cells_to_solve_device, num_cells_to_solve, num_steps); check_cuda_error( hipPeekAtLastError() ); check_cuda_error(hipFree(stims_currents_device)); if(cells_to_solve_device) check_cuda_error(hipFree(cells_to_solve_device)); } __global__ void kernel_set_model_inital_conditions(real *sv, int num_volumes) { // Thread ID int threadID = blockDim.x * blockIdx.x + threadIdx.x; if(threadID < num_volumes) { /* *((real*)((char*)sv + pitch * 0) + threadID) = INITIAL_V; // V; millivolt *((real*)((char*)sv + pitch * 1) + threadID) = 0.f; //M *((real*)((char*)sv + pitch * 2) + threadID) = 0.75; //H *((real*)((char*)sv + pitch * 3) + threadID) = 0.75f; //J *((real*)((char*)sv + pitch * 4) + threadID) = 0.f; //Xr1 *((real*)((char*)sv + pitch * 5) + threadID) = 1.f; //Xr2 *((real*)((char*)sv + pitch * 6) + threadID) = 0.f; //Xs *((real*)((char*)sv + pitch * 7) + threadID) = 1.f; //S *((real*)((char*)sv + pitch * 8) + threadID) = 0.f; //R *((real*)((char*)sv + pitch * 9) + threadID) = 0.f; //D *((real*)((char*)sv + pitch * 10) + threadID) = 1.f; //F *((real*)((char*)sv + pitch * 11) + threadID) = 1.f; //FCa *((real*)((char*)sv + pitch * 12) + threadID) = 1.f; //G *((real*)((char*)sv + pitch * 13) + threadID) = 0.0002; //Cai *((real*)((char*)sv + pitch * 14) + threadID) = 0.2f; //CaSR *((real*)((char*)sv + pitch * 15) + threadID) = 11.6f; //Nai *((real*)((char*)sv + pitch * 16) + threadID) = 138.3f; //Ki */ // Elnaz's steady-state initial conditions real sv_sst[]={-86.6880446253658,0.00125748326662192,0.782630860941001,0.782399247016589,0.000171805994132443,0.486336701083246,0.00291533408331613,0.999998387847902,1.89530036405156e-08,1.86110630763852e-05,0.999771863293389,1.00763534787491,0.999999376554314,3.23201451342761e-05,0.778783481229109,9.83430442797094,139.599986144533}; for (uint32_t i = 0; i < NEQ; i++) *((real*)((char*)sv + pitch * i) + threadID) = sv_sst[i]; } } // Solving the model for each cell in the tissue matrix ni x nj __global__ void solve_gpu(real dt, real *sv, real* stim_currents, uint32_t *cells_to_solve, uint32_t num_cells_to_solve, int num_steps) { int threadID = blockDim.x * blockIdx.x + threadIdx.x; int sv_id; // Each thread solves one cell model if(threadID < num_cells_to_solve) { if(cells_to_solve) sv_id = cells_to_solve[threadID]; else sv_id = threadID; real rDY[NEQ]; for (int n = 0; n < num_steps; ++n) { RHS_gpu(sv, rDY, stim_currents[threadID], sv_id, dt); *((real*)((char*)sv) + sv_id) = dt*rDY[0] + *((real*)((char*)sv) + sv_id); for(int i = 0; i < NEQ; i++) { *((real*)((char*)sv + pitch * i) + sv_id) = rDY[i]; } } } } inline __device__ void RHS_gpu(real *sv, real *rDY_, real stim_current, int threadID_, real dt) { // State variables real svolt = *((real*)((char*)sv + pitch * 0) + threadID_); real sm = *((real*)((char*)sv + pitch * 1) + threadID_); real sh = *((real*)((char*)sv + pitch * 2) + threadID_); real sj = *((real*)((char*)sv + pitch * 3) + threadID_); real sxr1 = *((real*)((char*)sv + pitch * 4) + threadID_); real sxr2 = *((real*)((char*)sv + pitch * 5) + threadID_); real sxs = *((real*)((char*)sv + pitch * 6) + threadID_); real ss = *((real*)((char*)sv + pitch * 7) + threadID_); real sr = *((real*)((char*)sv + pitch * 8) + threadID_); real sd = *((real*)((char*)sv + pitch * 9) + threadID_); real sf = *((real*)((char*)sv + pitch * 10) + threadID_); real sfca = *((real*)((char*)sv + pitch * 11) + threadID_); real sg = *((real*)((char*)sv + pitch * 12) + threadID_); real Cai = *((real*)((char*)sv + pitch * 13) + threadID_); real CaSR = *((real*)((char*)sv + pitch * 14) + threadID_); real Nai = *((real*)((char*)sv + pitch * 15) + threadID_); real Ki = *((real*)((char*)sv + pitch * 16) + threadID_); //External concentrations real Ko=5.4; real Cao=2.0; real Nao=140.0; //Intracellular volumes real Vc=0.016404; real Vsr=0.001094; //Calcium dynamics real Bufc=0.15f; real Kbufc=0.001f; real Bufsr=10.f; real Kbufsr=0.3f; real taufca=2.f; real taug=2.f; real Vmaxup=0.000425f; real Kup=0.00025f; //Constants const real R = 8314.472f; const real F = 96485.3415f; const real T =310.0f; real RTONF =(R*T)/F; //Cellular capacitance real CAPACITANCE=0.185; //Parameters for currents //Parameters for IKr real Gkr=0.096; //Parameters for Iks real pKNa=0.03; ///#ifdef EPI real Gks=0.245; ///#endif ///#ifdef ENDO /// real Gks=0.245; ///#endif ///#ifdef MCELL //real Gks=0.062; ///#endif //Parameters for Ik1 real GK1=5.405; //Parameters for Ito ///#ifdef EPI real Gto=0.294; ///#endif ///#ifdef ENDO /// real Gto=0.073; ///#endif ///#ifdef MCELL /// real Gto=0.294; ///#endif //Parameters for INa real GNa=14.838; //Parameters for IbNa real GbNa=0.00029; //Parameters for INaK real KmK=1.0; real KmNa=40.0; real knak=1.362; //Parameters for ICaL real GCaL=0.000175; //Parameters for IbCa real GbCa=0.000592; //Parameters for INaCa real knaca=1000; real KmNai=87.5; real KmCa=1.38; real ksat=0.1; real n=0.35; //Parameters for IpCa real GpCa=0.825; real KpCa=0.0005; //Parameters for IpK; real GpK=0.0146; // Setting Elnaz's parameters real parameters []={14.0289144242720,0.000312520083267775,0.000172516588156547,0.000423156343342324,0.292594058953883,0.187210180830377,0.140760932670556,3.33202996756470,0.0168705003140195,1.86294307706617,1087.32533265941,0.000454183662956629,0.502574880649717,0.0199174549938367,0.00451892682878852,1.20599840550585e-05}; GNa=parameters[0]; GbNa=parameters[1]; GCaL=parameters[2]; GbCa=parameters[3]; Gto=parameters[4]; Gkr=parameters[5]; Gks=parameters[6]; GK1=parameters[7]; GpK=parameters[8]; knak=parameters[9]; knaca=parameters[10]; Vmaxup=parameters[11]; GpCa=parameters[12]; real arel=parameters[13]; real crel=parameters[14]; real Vleak=parameters[15]; real IKr; real IKs; real IK1; real Ito; real INa; real IbNa; real ICaL; real IbCa; real INaCa; real IpCa; real IpK; real INaK; real Irel; real Ileak; real dNai; real dKi; real dCai; real dCaSR; real A; // real BufferFactorc; // real BufferFactorsr; real SERCA; real Caisquare; real CaSRsquare; real CaCurrent; real CaSRCurrent; real fcaold; real gold; real Ek; real Ena; real Eks; real Eca; real CaCSQN; real bjsr; real cjsr; real CaBuf; real bc; real cc; real Ak1; real Bk1; real rec_iK1; real rec_ipK; real rec_iNaK; real AM; real BM; real AH_1; real BH_1; real AH_2; real BH_2; real AJ_1; real BJ_1; real AJ_2; real BJ_2; real M_INF; real H_INF; real J_INF; real TAU_M; real TAU_H; real TAU_J; real axr1; real bxr1; real axr2; real bxr2; real Xr1_INF; real Xr2_INF; real TAU_Xr1; real TAU_Xr2; real Axs; real Bxs; real Xs_INF; real TAU_Xs; real R_INF; real TAU_R; real S_INF; real TAU_S; real Ad; real Bd; real Cd; real TAU_D; real D_INF; real TAU_F; real F_INF; real FCa_INF; real G_INF; real inverseVcF2=1/(2*Vc*F); real inverseVcF=1./(Vc*F); real Kupsquare=Kup*Kup; // real BufcKbufc=Bufc*Kbufc; // real Kbufcsquare=Kbufc*Kbufc; // real Kbufc2=2*Kbufc; // real BufsrKbufsr=Bufsr*Kbufsr; // const real Kbufsrsquare=Kbufsr*Kbufsr; // const real Kbufsr2=2*Kbufsr; const real exptaufca=exp(-dt/taufca); const real exptaug=exp(-dt/taug); real sItot; //Needed to compute currents Ek=RTONF*(log((Ko/Ki))); Ena=RTONF*(log((Nao/Nai))); Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai))); Eca=0.5*RTONF*(log((Cao/Cai))); Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200))); Bk1=(3.*exp(0.0002*(svolt-Ek+100))+ exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek))); rec_iK1=Ak1/(Ak1+Bk1); rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T)))); rec_ipK=1./(1.+exp((25-svolt)/5.98)); //Compute currents INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena); ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))* (exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.); Ito=Gto*sr*ss*(svolt-Ek); IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek); IKs=Gks*sxs*sxs*(svolt-Eks); IK1=GK1*rec_iK1*(svolt-Ek); INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))* (1./(1+ksat*exp((n-1)*svolt*F/(R*T))))* (exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao- exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5); INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK; IpCa=GpCa*Cai/(KpCa+Cai); IpK=GpK*rec_ipK*(svolt-Ek); IbNa=GbNa*(svolt-Ena); IbCa=GbCa*(svolt-Eca); //Determine total current (sItot) = IKr + IKs + IK1 + Ito + INa + IbNa + ICaL + IbCa + INaK + INaCa + IpCa + IpK + stim_current; //update concentrations Caisquare=Cai*Cai; CaSRsquare=CaSR*CaSR; CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE; /// A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f; A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel; Irel=A*sd*sg; ///Ileak=0.00008f*(CaSR-Cai); Ileak=Vleak*(CaSR-Cai); SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare)); CaSRCurrent=SERCA-Irel-Ileak; CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr); dCaSR=dt*(Vc/Vsr)*CaSRCurrent; bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr; cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR); CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.; CaBuf=Bufc*Cai/(Cai+Kbufc); dCai=dt*(CaCurrent-CaSRCurrent); bc=Bufc-CaBuf-dCai-Cai+Kbufc; cc=Kbufc*(CaBuf+dCai+Cai); Cai=(sqrt(bc*bc+4*cc)-bc)/2; dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE; Nai+=dt*dNai; dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE; Ki+=dt*dKi; //compute steady state values and time constants AM=1./(1.+exp((-60.-svolt)/5.)); BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.)); TAU_M=AM*BM; M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03))); if (svolt>=-40.) { AH_1=0.; BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1)))); TAU_H= 1.0/(AH_1+BH_1); } else { AH_2=(0.057*exp(-(svolt+80.)/6.8)); BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt)); TAU_H=1.0/(AH_2+BH_2); } H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43))); if(svolt>=-40.) { AJ_1=0.; BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.)))); TAU_J= 1.0/(AJ_1+BJ_1); } else { AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)* exp(-0.04391*svolt))*(svolt+37.78)/ (1.+exp(0.311*(svolt+79.23)))); BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14)))); TAU_J= 1.0/(AJ_2+BJ_2); } J_INF=H_INF; Xr1_INF=1./(1.+exp((-26.-svolt)/7.)); axr1=450./(1.+exp((-45.-svolt)/10.)); bxr1=6./(1.+exp((svolt-(-30.))/11.5)); TAU_Xr1=axr1*bxr1; Xr2_INF=1./(1.+exp((svolt-(-88.))/24.)); axr2=3./(1.+exp((-60.-svolt)/20.)); bxr2=1.12/(1.+exp((svolt-60.)/20.)); TAU_Xr2=axr2*bxr2; Xs_INF=1./(1.+exp((-5.-svolt)/14.)); Axs=1100./(sqrt(1.+exp((-10.-svolt)/6))); Bxs=1./(1.+exp((svolt-60.)/20.)); TAU_Xs=Axs*Bxs; #ifdef EPI R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; #endif #ifdef ENDO R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+28)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=1000.*exp(-(svolt+67)*(svolt+67)/1000.)+8.; #endif #ifdef MCELL R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; #endif D_INF=1./(1.+exp((-5-svolt)/7.5)); Ad=1.4/(1.+exp((-35-svolt)/13))+0.25; Bd=1.4/(1.+exp((svolt+5)/5)); Cd=1./(1.+exp((50-svolt)/20)); TAU_D=Ad*Bd+Cd; F_INF=1./(1.+exp((svolt+20)/7)); TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); FCa_INF=(1./(1.+pow((Cai/0.000325),8))+ 0.1/(1.+exp((Cai-0.0005)/0.0001))+ 0.20/(1.+exp((Cai-0.00075)/0.0008))+ 0.23 )/1.46; if(Cai<0.00035) G_INF=1./(1.+pow((Cai/0.00035),6)); else G_INF=1./(1.+pow((Cai/0.00035),16)); //Update gates rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M); rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H); rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J); rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1); rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2); rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs); rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S); rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R); rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D); rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F); fcaold= sfca; sfca = FCa_INF-(FCa_INF-sfca)*exptaufca; if(sfca>fcaold && (svolt)>-37) sfca = fcaold; gold = sg; sg = G_INF-(G_INF-sg)*exptaug; if(sg>gold && (svolt)>-37) sg=gold; //update voltage rDY_[0] = svolt + dt*(-sItot); rDY_[11] = sfca; rDY_[12] = sg; rDY_[13] = Cai; rDY_[14] = CaSR; rDY_[15] = Nai; rDY_[16] = Ki; }
c406b4999635a24d97107f61845495788030e4e6.cu
#include <stddef.h> #include <stdint.h> #include "model_gpu_utils.h" #include "ten_tusscher_2004_epi_S1_5.h" extern "C" SET_ODE_INITIAL_CONDITIONS_GPU(set_model_initial_conditions_gpu) { print_to_stdout_and_file("Using ten Tusscher 2004 epi GPU model\n"); // execution configuration const int GRID = (num_volumes + BLOCK_SIZE - 1)/BLOCK_SIZE; size_t size = num_volumes*sizeof(real); check_cuda_error(cudaMallocPitch((void **) &(*sv), &pitch_h, size, (size_t )NEQ)); check_cuda_error(cudaMemcpyToSymbol(pitch, &pitch_h, sizeof(size_t))); kernel_set_model_inital_conditions <<<GRID, BLOCK_SIZE>>>(*sv, num_volumes); check_cuda_error( cudaPeekAtLastError() ); cudaDeviceSynchronize(); return pitch_h; } extern "C" SOLVE_MODEL_ODES_GPU(solve_model_odes_gpu) { // execution configuration const int GRID = ((int)num_cells_to_solve + BLOCK_SIZE - 1)/BLOCK_SIZE; size_t stim_currents_size = sizeof(real)*num_cells_to_solve; size_t cells_to_solve_size = sizeof(uint32_t)*num_cells_to_solve; real *stims_currents_device; check_cuda_error(cudaMalloc((void **) &stims_currents_device, stim_currents_size)); check_cuda_error(cudaMemcpy(stims_currents_device, stim_currents, stim_currents_size, cudaMemcpyHostToDevice)); //the array cells to solve is passed when we are using and adapative mesh uint32_t *cells_to_solve_device = NULL; if(cells_to_solve != NULL) { check_cuda_error(cudaMalloc((void **) &cells_to_solve_device, cells_to_solve_size)); check_cuda_error(cudaMemcpy(cells_to_solve_device, cells_to_solve, cells_to_solve_size, cudaMemcpyHostToDevice)); } solve_gpu <<<GRID, BLOCK_SIZE>>>(dt, sv, stims_currents_device, cells_to_solve_device, num_cells_to_solve, num_steps); check_cuda_error( cudaPeekAtLastError() ); check_cuda_error(cudaFree(stims_currents_device)); if(cells_to_solve_device) check_cuda_error(cudaFree(cells_to_solve_device)); } __global__ void kernel_set_model_inital_conditions(real *sv, int num_volumes) { // Thread ID int threadID = blockDim.x * blockIdx.x + threadIdx.x; if(threadID < num_volumes) { /* *((real*)((char*)sv + pitch * 0) + threadID) = INITIAL_V; // V; millivolt *((real*)((char*)sv + pitch * 1) + threadID) = 0.f; //M *((real*)((char*)sv + pitch * 2) + threadID) = 0.75; //H *((real*)((char*)sv + pitch * 3) + threadID) = 0.75f; //J *((real*)((char*)sv + pitch * 4) + threadID) = 0.f; //Xr1 *((real*)((char*)sv + pitch * 5) + threadID) = 1.f; //Xr2 *((real*)((char*)sv + pitch * 6) + threadID) = 0.f; //Xs *((real*)((char*)sv + pitch * 7) + threadID) = 1.f; //S *((real*)((char*)sv + pitch * 8) + threadID) = 0.f; //R *((real*)((char*)sv + pitch * 9) + threadID) = 0.f; //D *((real*)((char*)sv + pitch * 10) + threadID) = 1.f; //F *((real*)((char*)sv + pitch * 11) + threadID) = 1.f; //FCa *((real*)((char*)sv + pitch * 12) + threadID) = 1.f; //G *((real*)((char*)sv + pitch * 13) + threadID) = 0.0002; //Cai *((real*)((char*)sv + pitch * 14) + threadID) = 0.2f; //CaSR *((real*)((char*)sv + pitch * 15) + threadID) = 11.6f; //Nai *((real*)((char*)sv + pitch * 16) + threadID) = 138.3f; //Ki */ // Elnaz's steady-state initial conditions real sv_sst[]={-86.6880446253658,0.00125748326662192,0.782630860941001,0.782399247016589,0.000171805994132443,0.486336701083246,0.00291533408331613,0.999998387847902,1.89530036405156e-08,1.86110630763852e-05,0.999771863293389,1.00763534787491,0.999999376554314,3.23201451342761e-05,0.778783481229109,9.83430442797094,139.599986144533}; for (uint32_t i = 0; i < NEQ; i++) *((real*)((char*)sv + pitch * i) + threadID) = sv_sst[i]; } } // Solving the model for each cell in the tissue matrix ni x nj __global__ void solve_gpu(real dt, real *sv, real* stim_currents, uint32_t *cells_to_solve, uint32_t num_cells_to_solve, int num_steps) { int threadID = blockDim.x * blockIdx.x + threadIdx.x; int sv_id; // Each thread solves one cell model if(threadID < num_cells_to_solve) { if(cells_to_solve) sv_id = cells_to_solve[threadID]; else sv_id = threadID; real rDY[NEQ]; for (int n = 0; n < num_steps; ++n) { RHS_gpu(sv, rDY, stim_currents[threadID], sv_id, dt); *((real*)((char*)sv) + sv_id) = dt*rDY[0] + *((real*)((char*)sv) + sv_id); for(int i = 0; i < NEQ; i++) { *((real*)((char*)sv + pitch * i) + sv_id) = rDY[i]; } } } } inline __device__ void RHS_gpu(real *sv, real *rDY_, real stim_current, int threadID_, real dt) { // State variables real svolt = *((real*)((char*)sv + pitch * 0) + threadID_); real sm = *((real*)((char*)sv + pitch * 1) + threadID_); real sh = *((real*)((char*)sv + pitch * 2) + threadID_); real sj = *((real*)((char*)sv + pitch * 3) + threadID_); real sxr1 = *((real*)((char*)sv + pitch * 4) + threadID_); real sxr2 = *((real*)((char*)sv + pitch * 5) + threadID_); real sxs = *((real*)((char*)sv + pitch * 6) + threadID_); real ss = *((real*)((char*)sv + pitch * 7) + threadID_); real sr = *((real*)((char*)sv + pitch * 8) + threadID_); real sd = *((real*)((char*)sv + pitch * 9) + threadID_); real sf = *((real*)((char*)sv + pitch * 10) + threadID_); real sfca = *((real*)((char*)sv + pitch * 11) + threadID_); real sg = *((real*)((char*)sv + pitch * 12) + threadID_); real Cai = *((real*)((char*)sv + pitch * 13) + threadID_); real CaSR = *((real*)((char*)sv + pitch * 14) + threadID_); real Nai = *((real*)((char*)sv + pitch * 15) + threadID_); real Ki = *((real*)((char*)sv + pitch * 16) + threadID_); //External concentrations real Ko=5.4; real Cao=2.0; real Nao=140.0; //Intracellular volumes real Vc=0.016404; real Vsr=0.001094; //Calcium dynamics real Bufc=0.15f; real Kbufc=0.001f; real Bufsr=10.f; real Kbufsr=0.3f; real taufca=2.f; real taug=2.f; real Vmaxup=0.000425f; real Kup=0.00025f; //Constants const real R = 8314.472f; const real F = 96485.3415f; const real T =310.0f; real RTONF =(R*T)/F; //Cellular capacitance real CAPACITANCE=0.185; //Parameters for currents //Parameters for IKr real Gkr=0.096; //Parameters for Iks real pKNa=0.03; ///#ifdef EPI real Gks=0.245; ///#endif ///#ifdef ENDO /// real Gks=0.245; ///#endif ///#ifdef MCELL //real Gks=0.062; ///#endif //Parameters for Ik1 real GK1=5.405; //Parameters for Ito ///#ifdef EPI real Gto=0.294; ///#endif ///#ifdef ENDO /// real Gto=0.073; ///#endif ///#ifdef MCELL /// real Gto=0.294; ///#endif //Parameters for INa real GNa=14.838; //Parameters for IbNa real GbNa=0.00029; //Parameters for INaK real KmK=1.0; real KmNa=40.0; real knak=1.362; //Parameters for ICaL real GCaL=0.000175; //Parameters for IbCa real GbCa=0.000592; //Parameters for INaCa real knaca=1000; real KmNai=87.5; real KmCa=1.38; real ksat=0.1; real n=0.35; //Parameters for IpCa real GpCa=0.825; real KpCa=0.0005; //Parameters for IpK; real GpK=0.0146; // Setting Elnaz's parameters real parameters []={14.0289144242720,0.000312520083267775,0.000172516588156547,0.000423156343342324,0.292594058953883,0.187210180830377,0.140760932670556,3.33202996756470,0.0168705003140195,1.86294307706617,1087.32533265941,0.000454183662956629,0.502574880649717,0.0199174549938367,0.00451892682878852,1.20599840550585e-05}; GNa=parameters[0]; GbNa=parameters[1]; GCaL=parameters[2]; GbCa=parameters[3]; Gto=parameters[4]; Gkr=parameters[5]; Gks=parameters[6]; GK1=parameters[7]; GpK=parameters[8]; knak=parameters[9]; knaca=parameters[10]; Vmaxup=parameters[11]; GpCa=parameters[12]; real arel=parameters[13]; real crel=parameters[14]; real Vleak=parameters[15]; real IKr; real IKs; real IK1; real Ito; real INa; real IbNa; real ICaL; real IbCa; real INaCa; real IpCa; real IpK; real INaK; real Irel; real Ileak; real dNai; real dKi; real dCai; real dCaSR; real A; // real BufferFactorc; // real BufferFactorsr; real SERCA; real Caisquare; real CaSRsquare; real CaCurrent; real CaSRCurrent; real fcaold; real gold; real Ek; real Ena; real Eks; real Eca; real CaCSQN; real bjsr; real cjsr; real CaBuf; real bc; real cc; real Ak1; real Bk1; real rec_iK1; real rec_ipK; real rec_iNaK; real AM; real BM; real AH_1; real BH_1; real AH_2; real BH_2; real AJ_1; real BJ_1; real AJ_2; real BJ_2; real M_INF; real H_INF; real J_INF; real TAU_M; real TAU_H; real TAU_J; real axr1; real bxr1; real axr2; real bxr2; real Xr1_INF; real Xr2_INF; real TAU_Xr1; real TAU_Xr2; real Axs; real Bxs; real Xs_INF; real TAU_Xs; real R_INF; real TAU_R; real S_INF; real TAU_S; real Ad; real Bd; real Cd; real TAU_D; real D_INF; real TAU_F; real F_INF; real FCa_INF; real G_INF; real inverseVcF2=1/(2*Vc*F); real inverseVcF=1./(Vc*F); real Kupsquare=Kup*Kup; // real BufcKbufc=Bufc*Kbufc; // real Kbufcsquare=Kbufc*Kbufc; // real Kbufc2=2*Kbufc; // real BufsrKbufsr=Bufsr*Kbufsr; // const real Kbufsrsquare=Kbufsr*Kbufsr; // const real Kbufsr2=2*Kbufsr; const real exptaufca=exp(-dt/taufca); const real exptaug=exp(-dt/taug); real sItot; //Needed to compute currents Ek=RTONF*(log((Ko/Ki))); Ena=RTONF*(log((Nao/Nai))); Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai))); Eca=0.5*RTONF*(log((Cao/Cai))); Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200))); Bk1=(3.*exp(0.0002*(svolt-Ek+100))+ exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek))); rec_iK1=Ak1/(Ak1+Bk1); rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T)))); rec_ipK=1./(1.+exp((25-svolt)/5.98)); //Compute currents INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena); ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))* (exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.); Ito=Gto*sr*ss*(svolt-Ek); IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek); IKs=Gks*sxs*sxs*(svolt-Eks); IK1=GK1*rec_iK1*(svolt-Ek); INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))* (1./(1+ksat*exp((n-1)*svolt*F/(R*T))))* (exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao- exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5); INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK; IpCa=GpCa*Cai/(KpCa+Cai); IpK=GpK*rec_ipK*(svolt-Ek); IbNa=GbNa*(svolt-Ena); IbCa=GbCa*(svolt-Eca); //Determine total current (sItot) = IKr + IKs + IK1 + Ito + INa + IbNa + ICaL + IbCa + INaK + INaCa + IpCa + IpK + stim_current; //update concentrations Caisquare=Cai*Cai; CaSRsquare=CaSR*CaSR; CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE; /// A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f; A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel; Irel=A*sd*sg; ///Ileak=0.00008f*(CaSR-Cai); Ileak=Vleak*(CaSR-Cai); SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare)); CaSRCurrent=SERCA-Irel-Ileak; CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr); dCaSR=dt*(Vc/Vsr)*CaSRCurrent; bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr; cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR); CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.; CaBuf=Bufc*Cai/(Cai+Kbufc); dCai=dt*(CaCurrent-CaSRCurrent); bc=Bufc-CaBuf-dCai-Cai+Kbufc; cc=Kbufc*(CaBuf+dCai+Cai); Cai=(sqrt(bc*bc+4*cc)-bc)/2; dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE; Nai+=dt*dNai; dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE; Ki+=dt*dKi; //compute steady state values and time constants AM=1./(1.+exp((-60.-svolt)/5.)); BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.)); TAU_M=AM*BM; M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03))); if (svolt>=-40.) { AH_1=0.; BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1)))); TAU_H= 1.0/(AH_1+BH_1); } else { AH_2=(0.057*exp(-(svolt+80.)/6.8)); BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt)); TAU_H=1.0/(AH_2+BH_2); } H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43))); if(svolt>=-40.) { AJ_1=0.; BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.)))); TAU_J= 1.0/(AJ_1+BJ_1); } else { AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)* exp(-0.04391*svolt))*(svolt+37.78)/ (1.+exp(0.311*(svolt+79.23)))); BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14)))); TAU_J= 1.0/(AJ_2+BJ_2); } J_INF=H_INF; Xr1_INF=1./(1.+exp((-26.-svolt)/7.)); axr1=450./(1.+exp((-45.-svolt)/10.)); bxr1=6./(1.+exp((svolt-(-30.))/11.5)); TAU_Xr1=axr1*bxr1; Xr2_INF=1./(1.+exp((svolt-(-88.))/24.)); axr2=3./(1.+exp((-60.-svolt)/20.)); bxr2=1.12/(1.+exp((svolt-60.)/20.)); TAU_Xr2=axr2*bxr2; Xs_INF=1./(1.+exp((-5.-svolt)/14.)); Axs=1100./(sqrt(1.+exp((-10.-svolt)/6))); Bxs=1./(1.+exp((svolt-60.)/20.)); TAU_Xs=Axs*Bxs; #ifdef EPI R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; #endif #ifdef ENDO R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+28)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=1000.*exp(-(svolt+67)*(svolt+67)/1000.)+8.; #endif #ifdef MCELL R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; #endif D_INF=1./(1.+exp((-5-svolt)/7.5)); Ad=1.4/(1.+exp((-35-svolt)/13))+0.25; Bd=1.4/(1.+exp((svolt+5)/5)); Cd=1./(1.+exp((50-svolt)/20)); TAU_D=Ad*Bd+Cd; F_INF=1./(1.+exp((svolt+20)/7)); TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); FCa_INF=(1./(1.+pow((Cai/0.000325),8))+ 0.1/(1.+exp((Cai-0.0005)/0.0001))+ 0.20/(1.+exp((Cai-0.00075)/0.0008))+ 0.23 )/1.46; if(Cai<0.00035) G_INF=1./(1.+pow((Cai/0.00035),6)); else G_INF=1./(1.+pow((Cai/0.00035),16)); //Update gates rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M); rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H); rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J); rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1); rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2); rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs); rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S); rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R); rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D); rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F); fcaold= sfca; sfca = FCa_INF-(FCa_INF-sfca)*exptaufca; if(sfca>fcaold && (svolt)>-37) sfca = fcaold; gold = sg; sg = G_INF-(G_INF-sg)*exptaug; if(sg>gold && (svolt)>-37) sg=gold; //update voltage rDY_[0] = svolt + dt*(-sItot); rDY_[11] = sfca; rDY_[12] = sg; rDY_[13] = Cai; rDY_[14] = CaSR; rDY_[15] = Nai; rDY_[16] = Ki; }
d0a2363a9c9865a0d005534f8834575349d9d56a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #if VECTOR_SIZE == 1 typedef float VEC; #endif #if VECTOR_SIZE == 2 typedef float2 VEC; #endif #if VECTOR_SIZE == 4 typedef float4 VEC; #endif /*#if VECTOR_SIZE == 8 typedef float8 VEC; #endif #if VECTOR_SIZE == 16 typedef float16 VEC; #endif*/ inline __host__ __device__ void operator+=(float2 &a, float2 b) { a.x += b.x; a.y += b.y; } inline __host__ __device__ void operator+=(float4 &a, float4 b) { a.x += b.x; a.y += b.y; a.z += b.z; a.w += b.w; } extern "C" __global__ void reduce( const VEC* in, float* out, unsigned int n, unsigned int inOffset, unsigned int outOffset) { unsigned int tid = threadIdx.x; unsigned int i = blockDim.x*blockIdx.x + threadIdx.x; __shared__ float buf[WORK_GROUP_SIZE_X]; #if UNBOUNDED_WG == 1 unsigned int addr = i+inOffset; if (i < (n+VECTOR_SIZE-1)/VECTOR_SIZE) { #if VECTOR_SIZE == 1 buf[tid] = in[addr]; #endif #if VECTOR_SIZE == 2 buf[tid] = in[addr].x+in[addr].y; #endif #if VECTOR_SIZE == 4 buf[tid] = in[addr].x+in[addr].y+in[addr].z+in[addr].w; #endif /*#if VECTOR_SIZE == 8 buf[tid] = in[addr].s0+in[addr].s1+in[addr].s2+in[addr].s3+in[addr].s4+in[addr].s5+in[addr].s6+in[addr].s7; #endif #if VECTOR_SIZE == 16 buf[tid] = in[addr].s0+in[addr].s1+in[addr].s2+in[addr].s3+in[addr].s4+in[addr].s5+in[addr].s6+in[addr].s7+in[addr].s8+in[addr].s9+in[addr].sa+in[addr].sb+in[addr].sc+in[addr].sd+in[addr].se+in[addr].sf; #endif*/ } else { buf[tid] = 0.0f; } #else /*UNBOUNDED_WG != 1*/ VEC partial; #if VECTOR_SIZE == 1 partial = 0.0f; #endif #if VECTOR_SIZE == 2 partial.x = 0.0f; partial.y = 0.0f; #endif #if VECTOR_SIZE == 4 partial.x = 0.0f; partial.y = 0.0f; partial.z = 0.0f; partial.w = 0.0f; #endif /*#if VECTOR_SIZE == 8 partial = (0,0,0,0,0,0,0,0); #endif #if VECTOR_SIZE == 16 partial = (0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0); #endif*/ while (i < n/VECTOR_SIZE) { partial += in[i+inOffset]; i += WORK_GROUP_SIZE_X*WG_NUM; } #if VECTOR_SIZE == 1 buf[tid] = partial; #endif #if VECTOR_SIZE == 2 buf[tid] = partial.x + partial.y; #endif #if VECTOR_SIZE == 4 buf[tid] = partial.x + partial.y + partial.z + partial.w; #endif /*#if VECTOR_SIZE == 8 buf[tid] = partial.s0 + partial.s1 + partial.s2 + partial.s3 + partial.s4 + partial.s5 + partial.s6 + partial.s7; #endif #if VECTOR_SIZE == 16 buf[tid] = partial.s0 + partial.s1 + partial.s2 + partial.s3 + partial.s4 + partial.s5 + partial.s6 + partial.s7 + partial.s8 + partial.s9 + partial.sa + partial.sb + partial.sc + partial.sd + partial.se + partial.sf; #endif*/ #endif /* UNBOUNDED_WG != 1 */ __syncthreads(); #if WORK_GROUP_SIZE_X >= 512 if (tid < 256) buf[tid] += buf[tid + 256]; __syncthreads(); #endif #if WORK_GROUP_SIZE_X >= 256 if (tid < 128) buf[tid] += buf[tid + 128]; __syncthreads(); #endif #if WORK_GROUP_SIZE_X >= 128 if (tid < 64) buf[tid] += buf[tid + 64]; __syncthreads(); #endif #if WORK_GROUP_SIZE_X >= 64 if (tid < 32) buf[tid] += buf[tid + 32]; __syncthreads(); #endif #if WORK_GROUP_SIZE_X >= 32 if (tid < 16) buf[tid] += buf[tid + 16]; __syncthreads(); #endif #if WORK_GROUP_SIZE_X >= 16 if (tid < 8) buf[tid] += buf[tid + 8]; __syncthreads(); #endif #if WORK_GROUP_SIZE_X >= 8 if (tid < 4) buf[tid] += buf[tid + 4]; __syncthreads(); #endif #if WORK_GROUP_SIZE_X >= 4 if (tid < 2) buf[tid] += buf[tid + 2]; __syncthreads(); #endif #if WORK_GROUP_SIZE_X >= 2 if (tid < 1) { buf[0] += buf[1]; } #endif #if USE_ATOMICS == 1 if (tid < 1) atomicAdd(out+outOffset, buf[0]); #else // the last group if (blockIdx.x == gridDim.x-1) { if (tid == 0) out[blockIdx.x + outOffset] = buf[0]; else if (tid < VECTOR_SIZE) out[blockIdx.x + outOffset + tid] = 0.0f; } // other groups else { if (tid == 0) out[blockIdx.x + outOffset] = buf[0]; } #endif }
d0a2363a9c9865a0d005534f8834575349d9d56a.cu
#if VECTOR_SIZE == 1 typedef float VEC; #endif #if VECTOR_SIZE == 2 typedef float2 VEC; #endif #if VECTOR_SIZE == 4 typedef float4 VEC; #endif /*#if VECTOR_SIZE == 8 typedef float8 VEC; #endif #if VECTOR_SIZE == 16 typedef float16 VEC; #endif*/ inline __host__ __device__ void operator+=(float2 &a, float2 b) { a.x += b.x; a.y += b.y; } inline __host__ __device__ void operator+=(float4 &a, float4 b) { a.x += b.x; a.y += b.y; a.z += b.z; a.w += b.w; } extern "C" __global__ void reduce( const VEC* in, float* out, unsigned int n, unsigned int inOffset, unsigned int outOffset) { unsigned int tid = threadIdx.x; unsigned int i = blockDim.x*blockIdx.x + threadIdx.x; __shared__ float buf[WORK_GROUP_SIZE_X]; #if UNBOUNDED_WG == 1 unsigned int addr = i+inOffset; if (i < (n+VECTOR_SIZE-1)/VECTOR_SIZE) { #if VECTOR_SIZE == 1 buf[tid] = in[addr]; #endif #if VECTOR_SIZE == 2 buf[tid] = in[addr].x+in[addr].y; #endif #if VECTOR_SIZE == 4 buf[tid] = in[addr].x+in[addr].y+in[addr].z+in[addr].w; #endif /*#if VECTOR_SIZE == 8 buf[tid] = in[addr].s0+in[addr].s1+in[addr].s2+in[addr].s3+in[addr].s4+in[addr].s5+in[addr].s6+in[addr].s7; #endif #if VECTOR_SIZE == 16 buf[tid] = in[addr].s0+in[addr].s1+in[addr].s2+in[addr].s3+in[addr].s4+in[addr].s5+in[addr].s6+in[addr].s7+in[addr].s8+in[addr].s9+in[addr].sa+in[addr].sb+in[addr].sc+in[addr].sd+in[addr].se+in[addr].sf; #endif*/ } else { buf[tid] = 0.0f; } #else /*UNBOUNDED_WG != 1*/ VEC partial; #if VECTOR_SIZE == 1 partial = 0.0f; #endif #if VECTOR_SIZE == 2 partial.x = 0.0f; partial.y = 0.0f; #endif #if VECTOR_SIZE == 4 partial.x = 0.0f; partial.y = 0.0f; partial.z = 0.0f; partial.w = 0.0f; #endif /*#if VECTOR_SIZE == 8 partial = (0,0,0,0,0,0,0,0); #endif #if VECTOR_SIZE == 16 partial = (0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0); #endif*/ while (i < n/VECTOR_SIZE) { partial += in[i+inOffset]; i += WORK_GROUP_SIZE_X*WG_NUM; } #if VECTOR_SIZE == 1 buf[tid] = partial; #endif #if VECTOR_SIZE == 2 buf[tid] = partial.x + partial.y; #endif #if VECTOR_SIZE == 4 buf[tid] = partial.x + partial.y + partial.z + partial.w; #endif /*#if VECTOR_SIZE == 8 buf[tid] = partial.s0 + partial.s1 + partial.s2 + partial.s3 + partial.s4 + partial.s5 + partial.s6 + partial.s7; #endif #if VECTOR_SIZE == 16 buf[tid] = partial.s0 + partial.s1 + partial.s2 + partial.s3 + partial.s4 + partial.s5 + partial.s6 + partial.s7 + partial.s8 + partial.s9 + partial.sa + partial.sb + partial.sc + partial.sd + partial.se + partial.sf; #endif*/ #endif /* UNBOUNDED_WG != 1 */ __syncthreads(); #if WORK_GROUP_SIZE_X >= 512 if (tid < 256) buf[tid] += buf[tid + 256]; __syncthreads(); #endif #if WORK_GROUP_SIZE_X >= 256 if (tid < 128) buf[tid] += buf[tid + 128]; __syncthreads(); #endif #if WORK_GROUP_SIZE_X >= 128 if (tid < 64) buf[tid] += buf[tid + 64]; __syncthreads(); #endif #if WORK_GROUP_SIZE_X >= 64 if (tid < 32) buf[tid] += buf[tid + 32]; __syncthreads(); #endif #if WORK_GROUP_SIZE_X >= 32 if (tid < 16) buf[tid] += buf[tid + 16]; __syncthreads(); #endif #if WORK_GROUP_SIZE_X >= 16 if (tid < 8) buf[tid] += buf[tid + 8]; __syncthreads(); #endif #if WORK_GROUP_SIZE_X >= 8 if (tid < 4) buf[tid] += buf[tid + 4]; __syncthreads(); #endif #if WORK_GROUP_SIZE_X >= 4 if (tid < 2) buf[tid] += buf[tid + 2]; __syncthreads(); #endif #if WORK_GROUP_SIZE_X >= 2 if (tid < 1) { buf[0] += buf[1]; } #endif #if USE_ATOMICS == 1 if (tid < 1) atomicAdd(out+outOffset, buf[0]); #else // the last group if (blockIdx.x == gridDim.x-1) { if (tid == 0) out[blockIdx.x + outOffset] = buf[0]; else if (tid < VECTOR_SIZE) out[blockIdx.x + outOffset + tid] = 0.0f; } // other groups else { if (tid == 0) out[blockIdx.x + outOffset] = buf[0]; } #endif }
0a8e694e96c7cce4c10603a900de268ccdd3fd9f.hip
// !!! This is a file automatically generated by hipify!!! /* * Discrete Sine Transform in row wise (DST one) * DST_I_Row * This CUDA code can handle/work with any type of the input mxArrays, * GPUarray or standard matlab CPU array as input {prhs[0] := mxGPUArray or CPU Array} * gpuArray output, B=DST_I_Row(A)=mexFunction(A). * Developed at UCL, Institute of Neurology, 12 Queen Square, WC1N 3AR, London * Wellcome Trust Centre for Neuroimaging * Part of the project SPM(http://www.fil.ion.ucl.ac.uk/spm) * Copyright 2018 * Kevin Bronik */ #include "matrix.h" #include "DST_I_Row.cuh" #include "mex.h" #include "gpu/mxGPUArray.h" #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include "ERRORCHK.h" // #define TILE_DIM 16 #define DEFAULT_DIM 32 // Tile dimension #define DELTA(i, j) ((i==j)?1:0) //const float PI_d = 3.141592653589793238462643383279502884f; //pi template <unsigned int TILE_DIM > __global__ void DSTI_Row_Kernelx(float *A, float *C, int numARows, int numAColumns, int numCRows, int numCColumns) { float CValue = 0.0f; const float PI_d = 3.141592653589793238462643383279502884f; //pi int Row = blockIdx.y*TILE_DIM + threadIdx.y; int Col = blockIdx.x*TILE_DIM + threadIdx.x; __shared__ float As[TILE_DIM][TILE_DIM]; __shared__ float Bs[TILE_DIM][TILE_DIM]; for (int k = 0; k < (TILE_DIM + numAColumns - 1) / TILE_DIM; k++) { if (k*TILE_DIM + threadIdx.x < numAColumns && Row < numARows) { As[threadIdx.y][threadIdx.x] = A[Row*numAColumns + k*TILE_DIM + threadIdx.x]; } else { As[threadIdx.y][threadIdx.x] = 0.0; } //Bs[threadIdx.y][threadIdx.x] = __cosf(((threadIdx.y + k*TILE_DIM)*PI_d*Col / (numAColumns - 1)))*sqrtf(1.0 / (1 + DELTA(Col + 1, 1) + DELTA(Col + 1, numAColumns)))*sqrtf(1.0 / (1 + DELTA(1, (threadIdx.y + k*TILE_DIM) + 1) + DELTA(numAColumns, (threadIdx.y + k*TILE_DIM) + 1)))*sqrtf(2.0 / numAColumns); if (k*TILE_DIM + threadIdx.y < numAColumns && Col < numAColumns) { Bs[threadIdx.y][threadIdx.x] = __sinf(((((threadIdx.y + k*TILE_DIM)+1)*PI_d*(Col+1)) / (numAColumns + 1)))*sqrtf(2.0 / (numAColumns+1)); } //Bs[threadIdx.y][threadIdx.x] = B[(k*TILE_DIM + threadIdx.y)*BCols + Col]; else { Bs[threadIdx.y][threadIdx.x] = 0.0; } __syncthreads(); for (int n = 0; n < TILE_DIM; ++n) { CValue += As[threadIdx.y][n] * Bs[n][threadIdx.x]; } __syncthreads(); } if (Row < numCRows && Col < numCColumns) { C[((blockIdx.y * blockDim.y + threadIdx.y)*numCColumns) + (blockIdx.x*blockDim.x) + threadIdx.x] = CValue; } } // Matrix multiplication - Host code // Matrix dimensions are assumed to be multiples of BLOCK_SIZE extern "C" void CalculateTransformDSTRowOneS(float * A, float * C, int numARows, int numAColumns, int numCRows, int numCColumns) { float * hostA = A; // The A matrix //float * hostB = B; // The B matrix float * hostC = C; // The output C matrix //float * hostComputedC; float * deviceA; //float * deviceB; float * deviceC; //hostA = (float *)malloc(sizeof(float)*numARows*numAColumns); hipError_t error; int devID = 0; // get number of SMs on this GPU error = hipGetDevice(&devID); hipDeviceProp_t deviceProp; error = hipGetDeviceProperties(&deviceProp, devID); if (error != hipSuccess) { printf("hipGetDeviceProperties returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } int TILEDIM = (deviceProp.major < 2) ? 16 : 32; // Setting numCRows and numCColumns numCRows = numARows; numCColumns = numAColumns; //hostC = (float *)malloc(sizeof(float)*numCRows*numCColumns); //hostComputedC = (float *)malloc(sizeof(float)*numCRows*numCColumns); // Allocating GPU memory gpuErrchk(hipMalloc((void **)&deviceA, sizeof(float)*numARows*numAColumns)); //hipMalloc((void **)&deviceB, sizeof(float)*numBRows*numBColumns); gpuErrchk(hipMalloc((void **)&deviceC, sizeof(float)*numCRows*numCColumns)); //thrust::device_ptr< float >dev_ptr_A(deviceA); //thrust::device_ptr< float >dev_ptr_C(deviceC); // Copy memory to the GPU gpuErrchk(hipMemcpy(deviceA, hostA, sizeof(float)*numARows*numAColumns, hipMemcpyHostToDevice)); //hipMemcpy(deviceB, hostB, sizeof(float)*numBRows*numBColumns, hipMemcpyHostToDevice); ///////////////////////////////////////////////////////// unsigned int TILE_DIM=16; dim3 dimBlock; dim3 dimGrid; switch (TILEDIM){ case 16: TILE_DIM= TILEDIM; dimBlock.x=TILE_DIM; dimBlock.y=TILE_DIM; dimBlock.z=1; dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x; dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y; DSTI_Row_Kernelx <16> << <dimGrid, dimBlock >> >(deviceA, deviceC, numARows, numAColumns, numCRows, numCColumns); //matrixMultiplyShared << <dimGrid, dimBlock >> >(thrust::raw_pointer_cast(&dev_ptr_A[0]), thrust::raw_pointer_cast(&dev_ptr_C[0]), numARows, numAColumns, numCRows, numCColumns); gpuErrchk(hipPeekAtLastError()); gpuErrchk(hipDeviceSynchronize()); // Copy the results in GPU memory back to the CPU gpuErrchk(hipMemcpy(hostC, deviceC, sizeof(float)*numCRows*numCColumns, hipMemcpyDeviceToHost)); C = hostC; //thrust::device_free(dev_ptr_A); //thrust::device_free(dev_ptr_C); gpuErrchk(hipFree(deviceA)); //hipFree(deviceB); gpuErrchk(hipFree(deviceC)); return; case 32: TILE_DIM= TILEDIM; dimBlock.x=TILE_DIM; dimBlock.y=TILE_DIM; dimBlock.z=1; dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x; dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y; DSTI_Row_Kernelx <32> << <dimGrid, dimBlock >> >(deviceA, deviceC, numARows, numAColumns, numCRows, numCColumns); //matrixMultiplyShared << <dimGrid, dimBlock >> >(thrust::raw_pointer_cast(&dev_ptr_A[0]), thrust::raw_pointer_cast(&dev_ptr_C[0]), numARows, numAColumns, numCRows, numCColumns); gpuErrchk(hipPeekAtLastError()); gpuErrchk(hipDeviceSynchronize()); // Copy the results in GPU memory back to the CPU gpuErrchk(hipMemcpy(hostC, deviceC, sizeof(float)*numCRows*numCColumns, hipMemcpyDeviceToHost)); C = hostC; //thrust::device_free(dev_ptr_A); //thrust::device_free(dev_ptr_C); gpuErrchk(hipFree(deviceA)); //hipFree(deviceB); gpuErrchk(hipFree(deviceC)); return; } }
0a8e694e96c7cce4c10603a900de268ccdd3fd9f.cu
/* * Discrete Sine Transform in row wise (DST one) * DST_I_Row * This CUDA code can handle/work with any type of the input mxArrays, * GPUarray or standard matlab CPU array as input {prhs[0] := mxGPUArray or CPU Array} * gpuArray output, B=DST_I_Row(A)=mexFunction(A). * Developed at UCL, Institute of Neurology, 12 Queen Square, WC1N 3AR, London * Wellcome Trust Centre for Neuroimaging * Part of the project SPM(http://www.fil.ion.ucl.ac.uk/spm) * Copyright 2018 * Kevin Bronik */ #include "matrix.h" #include "DST_I_Row.cuh" #include "mex.h" #include "gpu/mxGPUArray.h" #include <cuda.h> #include <cuda_runtime.h> #include "ERRORCHK.h" // #define TILE_DIM 16 #define DEFAULT_DIM 32 // Tile dimension #define DELTA(i, j) ((i==j)?1:0) //const float PI_d = 3.141592653589793238462643383279502884f; //pi template <unsigned int TILE_DIM > __global__ void DSTI_Row_Kernelx(float *A, float *C, int numARows, int numAColumns, int numCRows, int numCColumns) { float CValue = 0.0f; const float PI_d = 3.141592653589793238462643383279502884f; //pi int Row = blockIdx.y*TILE_DIM + threadIdx.y; int Col = blockIdx.x*TILE_DIM + threadIdx.x; __shared__ float As[TILE_DIM][TILE_DIM]; __shared__ float Bs[TILE_DIM][TILE_DIM]; for (int k = 0; k < (TILE_DIM + numAColumns - 1) / TILE_DIM; k++) { if (k*TILE_DIM + threadIdx.x < numAColumns && Row < numARows) { As[threadIdx.y][threadIdx.x] = A[Row*numAColumns + k*TILE_DIM + threadIdx.x]; } else { As[threadIdx.y][threadIdx.x] = 0.0; } //Bs[threadIdx.y][threadIdx.x] = __cosf(((threadIdx.y + k*TILE_DIM)*PI_d*Col / (numAColumns - 1)))*sqrtf(1.0 / (1 + DELTA(Col + 1, 1) + DELTA(Col + 1, numAColumns)))*sqrtf(1.0 / (1 + DELTA(1, (threadIdx.y + k*TILE_DIM) + 1) + DELTA(numAColumns, (threadIdx.y + k*TILE_DIM) + 1)))*sqrtf(2.0 / numAColumns); if (k*TILE_DIM + threadIdx.y < numAColumns && Col < numAColumns) { Bs[threadIdx.y][threadIdx.x] = __sinf(((((threadIdx.y + k*TILE_DIM)+1)*PI_d*(Col+1)) / (numAColumns + 1)))*sqrtf(2.0 / (numAColumns+1)); } //Bs[threadIdx.y][threadIdx.x] = B[(k*TILE_DIM + threadIdx.y)*BCols + Col]; else { Bs[threadIdx.y][threadIdx.x] = 0.0; } __syncthreads(); for (int n = 0; n < TILE_DIM; ++n) { CValue += As[threadIdx.y][n] * Bs[n][threadIdx.x]; } __syncthreads(); } if (Row < numCRows && Col < numCColumns) { C[((blockIdx.y * blockDim.y + threadIdx.y)*numCColumns) + (blockIdx.x*blockDim.x) + threadIdx.x] = CValue; } } // Matrix multiplication - Host code // Matrix dimensions are assumed to be multiples of BLOCK_SIZE extern "C" void CalculateTransformDSTRowOneS(float * A, float * C, int numARows, int numAColumns, int numCRows, int numCColumns) { float * hostA = A; // The A matrix //float * hostB = B; // The B matrix float * hostC = C; // The output C matrix //float * hostComputedC; float * deviceA; //float * deviceB; float * deviceC; //hostA = (float *)malloc(sizeof(float)*numARows*numAColumns); cudaError_t error; int devID = 0; // get number of SMs on this GPU error = cudaGetDevice(&devID); cudaDeviceProp deviceProp; error = cudaGetDeviceProperties(&deviceProp, devID); if (error != cudaSuccess) { printf("cudaGetDeviceProperties returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } int TILEDIM = (deviceProp.major < 2) ? 16 : 32; // Setting numCRows and numCColumns numCRows = numARows; numCColumns = numAColumns; //hostC = (float *)malloc(sizeof(float)*numCRows*numCColumns); //hostComputedC = (float *)malloc(sizeof(float)*numCRows*numCColumns); // Allocating GPU memory gpuErrchk(cudaMalloc((void **)&deviceA, sizeof(float)*numARows*numAColumns)); //cudaMalloc((void **)&deviceB, sizeof(float)*numBRows*numBColumns); gpuErrchk(cudaMalloc((void **)&deviceC, sizeof(float)*numCRows*numCColumns)); //thrust::device_ptr< float >dev_ptr_A(deviceA); //thrust::device_ptr< float >dev_ptr_C(deviceC); // Copy memory to the GPU gpuErrchk(cudaMemcpy(deviceA, hostA, sizeof(float)*numARows*numAColumns, cudaMemcpyHostToDevice)); //cudaMemcpy(deviceB, hostB, sizeof(float)*numBRows*numBColumns, cudaMemcpyHostToDevice); ///////////////////////////////////////////////////////// unsigned int TILE_DIM=16; dim3 dimBlock; dim3 dimGrid; switch (TILEDIM){ case 16: TILE_DIM= TILEDIM; dimBlock.x=TILE_DIM; dimBlock.y=TILE_DIM; dimBlock.z=1; dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x; dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y; DSTI_Row_Kernelx <16> << <dimGrid, dimBlock >> >(deviceA, deviceC, numARows, numAColumns, numCRows, numCColumns); //matrixMultiplyShared << <dimGrid, dimBlock >> >(thrust::raw_pointer_cast(&dev_ptr_A[0]), thrust::raw_pointer_cast(&dev_ptr_C[0]), numARows, numAColumns, numCRows, numCColumns); gpuErrchk(cudaPeekAtLastError()); gpuErrchk(cudaDeviceSynchronize()); // Copy the results in GPU memory back to the CPU gpuErrchk(cudaMemcpy(hostC, deviceC, sizeof(float)*numCRows*numCColumns, cudaMemcpyDeviceToHost)); C = hostC; //thrust::device_free(dev_ptr_A); //thrust::device_free(dev_ptr_C); gpuErrchk(cudaFree(deviceA)); //cudaFree(deviceB); gpuErrchk(cudaFree(deviceC)); return; case 32: TILE_DIM= TILEDIM; dimBlock.x=TILE_DIM; dimBlock.y=TILE_DIM; dimBlock.z=1; dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x; dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y; DSTI_Row_Kernelx <32> << <dimGrid, dimBlock >> >(deviceA, deviceC, numARows, numAColumns, numCRows, numCColumns); //matrixMultiplyShared << <dimGrid, dimBlock >> >(thrust::raw_pointer_cast(&dev_ptr_A[0]), thrust::raw_pointer_cast(&dev_ptr_C[0]), numARows, numAColumns, numCRows, numCColumns); gpuErrchk(cudaPeekAtLastError()); gpuErrchk(cudaDeviceSynchronize()); // Copy the results in GPU memory back to the CPU gpuErrchk(cudaMemcpy(hostC, deviceC, sizeof(float)*numCRows*numCColumns, cudaMemcpyDeviceToHost)); C = hostC; //thrust::device_free(dev_ptr_A); //thrust::device_free(dev_ptr_C); gpuErrchk(cudaFree(deviceA)); //cudaFree(deviceB); gpuErrchk(cudaFree(deviceC)); return; } }
3eaec5d7f6a9f06b0878c2152110f7eeb635b217.hip
// !!! This is a file automatically generated by hipify!!! #include <cstdint> #include <iostream> #include <hip/hip_runtime.h> #include <device_launch_parameters.h> #include "cuda_helper.h" // matrix addition template <int M, int N> __global__ void mat_add(float* A, float* B, float* C) { // note, the layout of threads is column major int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; C[row * N + col] = A[row * N + col] + B[row * N + col]; } int main() { const int M = 24; const int N = 16; float h_A[M][N]; float h_B[M][N]; float h_C[M][N]; for (int i = 0; i < M; i++) { for (int j = 0; j < N; j++) { h_A[i][j] = -i * N - j; if (i == j) { h_B[i][j] = i * N + j + 1; } else { h_B[i][j] = i * N + j; } } } float *d_A, *d_B, *d_C; // malloc for arrays on device cc(hipMalloc(&d_A, sizeof(float) * M * N)); cc(hipMalloc(&d_B, sizeof(float) * M * N)); cc(hipMalloc(&d_C, sizeof(float) * M * N)); // copy memory from host to device cc(hipMemcpy(d_A, h_A, sizeof(float) * M * N, hipMemcpyHostToDevice)); cc(hipMemcpy(d_B, h_B, sizeof(float) * M * N, hipMemcpyHostToDevice)); dim3 num_of_block = { 4, 3 }; dim3 thread_per_block = { 4, 8 }; // kernel invocation with N threads mat_add<M, N> << <num_of_block, thread_per_block >> > (d_A, d_B, d_C); // waiting until device completed cc(hipDeviceSynchronize()); // copy result from device to host cc(hipMemcpy(h_C, d_C, sizeof(float) * M * N, hipMemcpyDeviceToHost)); cc(hipFree(d_A)); cc(hipFree(d_B)); cc(hipFree(d_C)); std::cout << "A:\n"; for (int i = 0; i < M; i++) { for (int j = 0; j < N; j++) { std::cout << h_A[i][j] << " "; } std::cout << std::endl; } std::cout << std::endl; std::cout << "B:\n"; for (int i = 0; i < M; i++) { for (int j = 0; j < N; j++) { std::cout << h_B[i][j] << " "; } std::cout << std::endl; } std::cout << std::endl; std::cout << "C:\n"; for (int i = 0; i < M; i++) { for (int j = 0; j < N; j++) { std::cout << h_C[i][j] << " "; } std::cout << std::endl; } std::cout << std::endl; // Output of matrix C: // 1 if on main diagonal // 0 otherwise return 0; }
3eaec5d7f6a9f06b0878c2152110f7eeb635b217.cu
#include <cstdint> #include <iostream> #include <cuda_runtime.h> #include <device_launch_parameters.h> #include "cuda_helper.h" // matrix addition template <int M, int N> __global__ void mat_add(float* A, float* B, float* C) { // note, the layout of threads is column major int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; C[row * N + col] = A[row * N + col] + B[row * N + col]; } int main() { const int M = 24; const int N = 16; float h_A[M][N]; float h_B[M][N]; float h_C[M][N]; for (int i = 0; i < M; i++) { for (int j = 0; j < N; j++) { h_A[i][j] = -i * N - j; if (i == j) { h_B[i][j] = i * N + j + 1; } else { h_B[i][j] = i * N + j; } } } float *d_A, *d_B, *d_C; // malloc for arrays on device cc(cudaMalloc(&d_A, sizeof(float) * M * N)); cc(cudaMalloc(&d_B, sizeof(float) * M * N)); cc(cudaMalloc(&d_C, sizeof(float) * M * N)); // copy memory from host to device cc(cudaMemcpy(d_A, h_A, sizeof(float) * M * N, cudaMemcpyHostToDevice)); cc(cudaMemcpy(d_B, h_B, sizeof(float) * M * N, cudaMemcpyHostToDevice)); dim3 num_of_block = { 4, 3 }; dim3 thread_per_block = { 4, 8 }; // kernel invocation with N threads mat_add<M, N> << <num_of_block, thread_per_block >> > (d_A, d_B, d_C); // waiting until device completed cc(cudaDeviceSynchronize()); // copy result from device to host cc(cudaMemcpy(h_C, d_C, sizeof(float) * M * N, cudaMemcpyDeviceToHost)); cc(cudaFree(d_A)); cc(cudaFree(d_B)); cc(cudaFree(d_C)); std::cout << "A:\n"; for (int i = 0; i < M; i++) { for (int j = 0; j < N; j++) { std::cout << h_A[i][j] << " "; } std::cout << std::endl; } std::cout << std::endl; std::cout << "B:\n"; for (int i = 0; i < M; i++) { for (int j = 0; j < N; j++) { std::cout << h_B[i][j] << " "; } std::cout << std::endl; } std::cout << std::endl; std::cout << "C:\n"; for (int i = 0; i < M; i++) { for (int j = 0; j < N; j++) { std::cout << h_C[i][j] << " "; } std::cout << std::endl; } std::cout << std::endl; // Output of matrix C: // 1 if on main diagonal // 0 otherwise return 0; }
870c0973d0ec10633be805810f61f8512c94e987.hip
// !!! This is a file automatically generated by hipify!!! #include <kernels/gpu/dimshuffle.h> #include <core/tensor_builder.h> #include <set> #include <global/operator_factory.h> #include <global/fp16_operator_factory.h> #include <backend/name.h> #include <core/device.h> #include <utils/assert.h> #include <hip/hip_runtime.h> #include <hip/hip_fp16.h> #include "kernels/gpu/gpu_kernel.h" namespace ts { namespace gpu { template <typename T> static __global__ void gpu_dimshuffle_kernel(int count, const T* in, GpuHypeShape in_shape, T *out, GpuHypeShape out_shape, int dim, int *shuffle) { int index = blockDim.x * blockIdx.x + threadIdx.x; if (index >= count) return; int out_index = index; int in_index = 0; auto out_weight_it = out_shape.weights + 1; auto in_weight_it = in_shape.weights + 1; /* ============================================ */ int running_dim = 0; /* -------------------------------------------- */ for (int times = out_shape.dims - 1; times; --times) { auto coord = index / *out_weight_it; /* ============================================ */ if (running_dim == dim) coord = shuffle[coord]; ++running_dim; /* -------------------------------------------- */ in_index += coord * *in_weight_it; index %= *out_weight_it; ++out_weight_it; ++in_weight_it; } auto coord = index; /* ============================================ */ if (running_dim == dim) coord = shuffle[coord]; /* -------------------------------------------- */ in_index += coord; /* ++++++++++++++++++++++++++++++++++++++++++++ */ out[out_index] = in[in_index]; } template <typename T> static void gpu_dimshuffle_comput_run(const Tensor &x, int dim, const std::vector<int> &shuffle, Tensor &out) { int *gpu_shuffle = nullptr; auto gpu_memory = MakeGPUHypeShape(out.device(), {x.sizes(), out.sizes()}, {{(void *) (shuffle.data()), int(sizeof(int) * shuffle.size())}}, {(void **) (&gpu_shuffle)}); auto &gpu_in_shape = gpu_memory.second[0]; auto &gpu_out_shape = gpu_memory.second[1]; auto in_data = x.data<T>(); auto out_data = out.data<T>(); auto count = out.count(); RUN_KERNEL(gpu_dimshuffle_kernel<T>, CUDA_BLOCK(count, CUDA_THREAD_NUM), CUDA_THREAD_NUM, count, in_data, gpu_in_shape, out_data, gpu_out_shape, dim, gpu_shuffle); } void Dimshuffle::dimshuffle(const Tensor &x, int dim, const std::vector<int> &shuffle, Tensor &out) { DTYPE dtype = out.dtype(); switch(dtype) { #define DECLARE_COMPUTE_RUN(DTYPE, TYPE) \ case DTYPE: { gpu_dimshuffle_comput_run<TYPE>(x, dim, shuffle, out); break; } DECLARE_COMPUTE_RUN(INT8, int8_t); DECLARE_COMPUTE_RUN(UINT8, uint8_t); DECLARE_COMPUTE_RUN(INT16, int16_t); DECLARE_COMPUTE_RUN(UINT16, uint16_t); DECLARE_COMPUTE_RUN(INT32, int32_t); DECLARE_COMPUTE_RUN(UINT32, uint32_t); DECLARE_COMPUTE_RUN(INT64, int64_t); DECLARE_COMPUTE_RUN(UINT64, uint64_t); DECLARE_COMPUTE_RUN(FLOAT16, half); DECLARE_COMPUTE_RUN(FLOAT32, float); DECLARE_COMPUTE_RUN(FLOAT64, double); #undef DECLARE_COMPUTE_RUN default: { TS_LOG_ERROR << this->op() << " not support data type(" << dtype << "): " << type_str(dtype) << eject; break; } } } } } /////////////////////////////////////////// using namespace ts; using namespace gpu; TS_REGISTER_OPERATOR(Dimshuffle, GPU, name::layer::dimshuffle()) TS_REGISTER_FP16_OPERATOR(Dimshuffle, ts::GPU, name::layer::dimshuffle())
870c0973d0ec10633be805810f61f8512c94e987.cu
#include <kernels/gpu/dimshuffle.h> #include <core/tensor_builder.h> #include <set> #include <global/operator_factory.h> #include <global/fp16_operator_factory.h> #include <backend/name.h> #include <core/device.h> #include <utils/assert.h> #include <cuda_runtime.h> #include <cuda_fp16.h> #include "kernels/gpu/gpu_kernel.h" namespace ts { namespace gpu { template <typename T> static __global__ void gpu_dimshuffle_kernel(int count, const T* in, GpuHypeShape in_shape, T *out, GpuHypeShape out_shape, int dim, int *shuffle) { int index = blockDim.x * blockIdx.x + threadIdx.x; if (index >= count) return; int out_index = index; int in_index = 0; auto out_weight_it = out_shape.weights + 1; auto in_weight_it = in_shape.weights + 1; /* ============================================ */ int running_dim = 0; /* -------------------------------------------- */ for (int times = out_shape.dims - 1; times; --times) { auto coord = index / *out_weight_it; /* ============================================ */ if (running_dim == dim) coord = shuffle[coord]; ++running_dim; /* -------------------------------------------- */ in_index += coord * *in_weight_it; index %= *out_weight_it; ++out_weight_it; ++in_weight_it; } auto coord = index; /* ============================================ */ if (running_dim == dim) coord = shuffle[coord]; /* -------------------------------------------- */ in_index += coord; /* ++++++++++++++++++++++++++++++++++++++++++++ */ out[out_index] = in[in_index]; } template <typename T> static void gpu_dimshuffle_comput_run(const Tensor &x, int dim, const std::vector<int> &shuffle, Tensor &out) { int *gpu_shuffle = nullptr; auto gpu_memory = MakeGPUHypeShape(out.device(), {x.sizes(), out.sizes()}, {{(void *) (shuffle.data()), int(sizeof(int) * shuffle.size())}}, {(void **) (&gpu_shuffle)}); auto &gpu_in_shape = gpu_memory.second[0]; auto &gpu_out_shape = gpu_memory.second[1]; auto in_data = x.data<T>(); auto out_data = out.data<T>(); auto count = out.count(); RUN_KERNEL(gpu_dimshuffle_kernel<T>, CUDA_BLOCK(count, CUDA_THREAD_NUM), CUDA_THREAD_NUM, count, in_data, gpu_in_shape, out_data, gpu_out_shape, dim, gpu_shuffle); } void Dimshuffle::dimshuffle(const Tensor &x, int dim, const std::vector<int> &shuffle, Tensor &out) { DTYPE dtype = out.dtype(); switch(dtype) { #define DECLARE_COMPUTE_RUN(DTYPE, TYPE) \ case DTYPE: { gpu_dimshuffle_comput_run<TYPE>(x, dim, shuffle, out); break; } DECLARE_COMPUTE_RUN(INT8, int8_t); DECLARE_COMPUTE_RUN(UINT8, uint8_t); DECLARE_COMPUTE_RUN(INT16, int16_t); DECLARE_COMPUTE_RUN(UINT16, uint16_t); DECLARE_COMPUTE_RUN(INT32, int32_t); DECLARE_COMPUTE_RUN(UINT32, uint32_t); DECLARE_COMPUTE_RUN(INT64, int64_t); DECLARE_COMPUTE_RUN(UINT64, uint64_t); DECLARE_COMPUTE_RUN(FLOAT16, half); DECLARE_COMPUTE_RUN(FLOAT32, float); DECLARE_COMPUTE_RUN(FLOAT64, double); #undef DECLARE_COMPUTE_RUN default: { TS_LOG_ERROR << this->op() << " not support data type(" << dtype << "): " << type_str(dtype) << eject; break; } } } } } /////////////////////////////////////////// using namespace ts; using namespace gpu; TS_REGISTER_OPERATOR(Dimshuffle, GPU, name::layer::dimshuffle()) TS_REGISTER_FP16_OPERATOR(Dimshuffle, ts::GPU, name::layer::dimshuffle())
15d130185a6df7c2138ff021d83c66763f126231.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <algorithm> #include <cmath> __global__ void mish(float* tx, float* aten_mul) { float tx_1 = __ldg(tx + (long long)(threadIdx.x) + 512ll * (long long)(blockIdx.x)); aten_mul[(long long)(threadIdx.x) + 512ll * (long long)(blockIdx.x)] = tx_1 * (tanhf(tx_1>20.f ? tx_1 : (log1pf(expf(tx_1))) / 1.f)); } #include <algorithm> #include <cmath> #include <stdio.h> template<typename T, typename U> constexpr T ceildiv(T t, U u) { return (t + u - 1) / u; } int main() { constexpr int N = 1 << 20; float *x, *y, *d_x, *d_y; x = (float*)malloc(N * sizeof(float)); y = (float*)malloc(N * sizeof(float)); hipMalloc(&d_x, N * sizeof(float)); hipMalloc(&d_y, N * sizeof(float)); for (int i = 0; i < N; i++) { x[i] = 3.0f; y[i] = 2.0f; } hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipMemcpy(d_x, x, N * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_y, y, N * sizeof(float), hipMemcpyHostToDevice); constexpr int blockSize = 512; constexpr int nBlocks = ceildiv(N, blockSize); float millis = 0.0f; float temp = 0.0f; for (int i = 0; i < 500; i++) { hipEventRecord(start); hipLaunchKernelGGL(( mish), dim3(nBlocks), dim3(blockSize), 0, 0, d_x, d_y); hipEventRecord(stop); hipEventSynchronize(stop); hipEventElapsedTime(&temp, start, stop); millis += temp; } millis = millis / 500; hipMemcpy(y, d_y, N * sizeof(float), hipMemcpyDeviceToHost); float maxError = 0.0f; for (int i = 0; i < N; i++) { float mv = 3.0f * tanhf(std::log1p(::exp(3.0))); maxError = ::max(maxError, std::abs(mv - y[i])); } printf("max error: %f\n", maxError); printf("duration (ms): %f\n", millis); printf("effective bandwidth (gb/s): %f\n", (float)N * sizeof(float) * 3 / millis / 1e6); hipFree(d_x); hipFree(d_y); free(x); free(y); return 0; }
15d130185a6df7c2138ff021d83c66763f126231.cu
#include <stdio.h> #include <algorithm> #include <cmath> __global__ void mish(float* tx, float* aten_mul) { float tx_1 = __ldg(tx + (long long)(threadIdx.x) + 512ll * (long long)(blockIdx.x)); aten_mul[(long long)(threadIdx.x) + 512ll * (long long)(blockIdx.x)] = tx_1 * (tanhf(tx_1>20.f ? tx_1 : (log1pf(expf(tx_1))) / 1.f)); } #include <algorithm> #include <cmath> #include <stdio.h> template<typename T, typename U> constexpr T ceildiv(T t, U u) { return (t + u - 1) / u; } int main() { constexpr int N = 1 << 20; float *x, *y, *d_x, *d_y; x = (float*)malloc(N * sizeof(float)); y = (float*)malloc(N * sizeof(float)); cudaMalloc(&d_x, N * sizeof(float)); cudaMalloc(&d_y, N * sizeof(float)); for (int i = 0; i < N; i++) { x[i] = 3.0f; y[i] = 2.0f; } cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaMemcpy(d_x, x, N * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_y, y, N * sizeof(float), cudaMemcpyHostToDevice); constexpr int blockSize = 512; constexpr int nBlocks = ceildiv(N, blockSize); float millis = 0.0f; float temp = 0.0f; for (int i = 0; i < 500; i++) { cudaEventRecord(start); mish<<<nBlocks, blockSize>>>(d_x, d_y); cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&temp, start, stop); millis += temp; } millis = millis / 500; cudaMemcpy(y, d_y, N * sizeof(float), cudaMemcpyDeviceToHost); float maxError = 0.0f; for (int i = 0; i < N; i++) { float mv = 3.0f * tanhf(std::log1p(std::exp(3.0))); maxError = std::max(maxError, std::abs(mv - y[i])); } printf("max error: %f\n", maxError); printf("duration (ms): %f\n", millis); printf("effective bandwidth (gb/s): %f\n", (float)N * sizeof(float) * 3 / millis / 1e6); cudaFree(d_x); cudaFree(d_y); free(x); free(y); return 0; }
92168c8bd4fc34e003749f0094b5a2098be5c272.hip
// !!! This is a file automatically generated by hipify!!! #include <stdlib.h> #include <stdio.h> #include <assert.h> #include <math.h> #include <hip/hip_runtime.h> #include "common.h" #define NUM_THREADS 256 const double DIM = 2.0 * cutoff; int num, tot_num; double dim; extern double size; // // benchmarking program // __device__ int locationToID(double x, double y, double dim, int num) { int xID = x / dim; int yID = y / dim; return xID * num + yID; } __device__ int locationToID(particle_t &particle, double dim, int num) { int xID = particle.x / dim; int yID = particle.y / dim; return xID * num + yID; } __global__ void assign_particles(int n, particle_t * particles, int* d_next, int* d_grids, double dim, int num) { int tid = threadIdx.x + blockIdx.x * blockDim.x; if(tid >= n) return; int k = locationToID(particles[tid], dim, num); d_next[tid] = atomicExch(&d_grids[k], tid); } __global__ void clear_grids(int tot_num, int* d_grids) { int tid = threadIdx.x + blockIdx.x * blockDim.x; if(tid >= tot_num) return; d_grids[tid] = -1; } __device__ void apply_force_gpu(particle_t &particle, particle_t &neighbor) { double dx = neighbor.x - particle.x; double dy = neighbor.y - particle.y; double r2 = dx * dx + dy * dy; if( r2 > cutoff*cutoff ) return; //r2 = fmax( r2, min_r*min_r ); r2 = (r2 > min_r*min_r) ? r2 : min_r*min_r; double r = sqrt( r2 ); // // very simple short-range repulsive force // double coef = ( 1 - cutoff / r ) / r2 / mass; particle.ax += coef * dx; particle.ay += coef * dy; } __device__ void compute_self_grid_forces(int tid, particle_t * particles, int * d_next, int head) { particle_t* p = &particles[tid]; for(int i = head; i != -1; i = d_next[i]) { if(i != tid) apply_force_gpu(*p, particles[i]); } } __device__ void compute_grid_forces(int tid, particle_t * particles, int * d_next, int head) { particle_t* p = &particles[tid]; for(int i = head; i != -1; i = d_next[i]) { apply_force_gpu(*p, particles[i]); } } __global__ void compute_forces_gpu(int n, particle_t * particles, int * d_next, int * d_grids, double dim, int num) { // Get thread (particle) ID int tid = threadIdx.x + blockIdx.x * blockDim.x; if(tid >= n) return; particle_t * p = &particles[tid]; int xID = p->x / dim; int yID = p->y / dim; int k = xID * num + yID; p->ax = p->ay = 0; // check self compute_self_grid_forces(tid, particles, d_next, d_grids[k]); // check other if(xID > 0) { compute_grid_forces(tid, particles, d_next, d_grids[k - num]); if(yID > 0) compute_grid_forces(tid, particles, d_next, d_grids[k - num - 1]); if(yID < num - 1) compute_grid_forces(tid, particles, d_next, d_grids[k - num + 1]); } if(xID < num - 1) { compute_grid_forces(tid, particles, d_next, d_grids[k + num]); if(yID > 0) compute_grid_forces(tid, particles, d_next, d_grids[k + num - 1]); if(yID < num - 1) compute_grid_forces(tid, particles, d_next, d_grids[k + num + 1]); } if(yID > 0) compute_grid_forces(tid, particles, d_next, d_grids[k - 1]); if(yID < num - 1) compute_grid_forces(tid, particles, d_next, d_grids[k + 1]); } __global__ void compute_grid_forces_gpu(particle_t * particles, int * d_next,int tot_num, int * d_grids, double dim, int num) { // Get thread (particle) ID int tid = threadIdx.x + blockIdx.x * blockDim.x; if(tid >= tot_num) return; int xID = tid / num; int yID = tid % num; int k = tid; for(int i = d_grids[tid]; i != -1; i = d_next[i]) { particle_t * p = &particles[i]; p->ax = p->ay = 0; // check self compute_self_grid_forces(i, particles, d_next, d_grids[k]); // check other if(xID > 0) { compute_grid_forces(i, particles, d_next, d_grids[k - num]); if(yID > 0) compute_grid_forces(i, particles, d_next, d_grids[k - num - 1]); if(yID < num - 1) compute_grid_forces(i, particles, d_next, d_grids[k - num + 1]); } if(xID < num - 1) { compute_grid_forces(i, particles, d_next, d_grids[k + num]); if(yID > 0) compute_grid_forces(i, particles, d_next, d_grids[k + num - 1]); if(yID < num - 1) compute_grid_forces(i, particles, d_next, d_grids[k + num + 1]); } if(yID > 0) compute_grid_forces(i, particles, d_next, d_grids[k - 1]); if(yID < num - 1) compute_grid_forces(i, particles, d_next, d_grids[k + 1]); } } __global__ void move_gpu (particle_t * particles, int n, double size) { // Get thread (particle) ID int tid = threadIdx.x + blockIdx.x * blockDim.x; if(tid >= n) return; particle_t * p = &particles[tid]; // // slightly simplified Velocity Verlet integration // conserves energy better than explicit Euler method // p->vx += p->ax * dt; p->vy += p->ay * dt; p->x += p->vx * dt; p->y += p->vy * dt; // // bounce from walls // while( p->x < 0 || p->x > size ) { p->x = p->x < 0 ? -(p->x) : 2*size-p->x; p->vx = -(p->vx); } while( p->y < 0 || p->y > size ) { p->y = p->y < 0 ? -(p->y) : 2*size-p->y; p->vy = -(p->vy); } } int main( int argc, char **argv ) { // This takes a few seconds to initialize the runtime hipDeviceSynchronize(); if( find_option( argc, argv, "-h" ) >= 0 ) { printf( "Options:\n" ); printf( "-h to see this help\n" ); printf( "-n <int> to set the number of particles\n" ); printf( "-o <filename> to specify the output file name\n" ); return 0; } int n = read_int( argc, argv, "-n", 1000 ); char *savename = read_string( argc, argv, "-o", NULL ); FILE *fsave = savename ? fopen( savename, "w" ) : NULL; particle_t *particles = (particle_t*) malloc( n * sizeof(particle_t) ); // GPU particle data structure particle_t * d_particles; hipMalloc((void **) &d_particles, n * sizeof(particle_t)); set_size( n ); init_particles( n, particles ); hipDeviceSynchronize(); double copy_time = read_timer( ); // Copy the particles to the GPU hipMemcpy(d_particles, particles, n * sizeof(particle_t), hipMemcpyHostToDevice); hipDeviceSynchronize(); copy_time = read_timer( ) - copy_time; // set up grids double set_grid_time = read_timer(); num = (int)ceil(size*1.0 / DIM); // we get the num of the grid for one directions tot_num = num * num; // total number of grids dim = size/num; // the acutal size of a subgrid int blks = (n + NUM_THREADS - 1) / NUM_THREADS; int g_blks = (tot_num + NUM_THREADS - 1) / NUM_THREADS; int * d_grids; hipMalloc((void **) &d_grids, tot_num * sizeof(int)); int * d_next; hipMalloc((void **) &d_next, n * sizeof(int)); hipDeviceSynchronize(); hipLaunchKernelGGL(( clear_grids) , dim3(g_blks), dim3(NUM_THREADS) , 0, 0, tot_num, d_grids); hipLaunchKernelGGL(( assign_particles) , dim3(blks), dim3(NUM_THREADS) , 0, 0, n, d_particles, d_next, d_grids, dim, num); hipDeviceSynchronize(); set_grid_time = read_timer() - set_grid_time; // // simulate a number of time steps // hipDeviceSynchronize(); double simulation_time = read_timer( ); for( int step = 0; step < NSTEPS; step++ ) { // // compute forces // hipLaunchKernelGGL(( compute_forces_gpu) , dim3(blks), dim3(NUM_THREADS) , 0, 0, n, d_particles, d_next, d_grids, dim, num); //compute_grid_forces_gpu <<< g_blks, NUM_THREADS >>> (d_particles, d_next, tot_num, d_grids, dim, num); // // move particles // hipLaunchKernelGGL(( move_gpu) , dim3(blks), dim3(NUM_THREADS) , 0, 0, d_particles, n, size); // Re-locate all the particles to grids hipLaunchKernelGGL(( clear_grids) , dim3(g_blks), dim3(NUM_THREADS) , 0, 0, tot_num, d_grids); hipLaunchKernelGGL(( assign_particles) , dim3(blks), dim3(NUM_THREADS) , 0, 0, n, d_particles, d_next, d_grids, dim, num); // // save if necessary // if( fsave && (step%SAVEFREQ) == 0 ) { // Copy the particles back to the CPU hipMemcpy(particles, d_particles, n * sizeof(particle_t), hipMemcpyDeviceToHost); save( fsave, n, particles); } } hipDeviceSynchronize(); simulation_time = read_timer( ) - simulation_time; printf( "CPU-GPU copy time = %g seconds\n", copy_time); printf( "n = %d, simulation time = %g seconds\n", n, simulation_time ); free( particles ); hipFree(d_particles); hipFree(d_grids); hipFree(d_next); if( fsave ) fclose( fsave ); return 0; }
92168c8bd4fc34e003749f0094b5a2098be5c272.cu
#include <stdlib.h> #include <stdio.h> #include <assert.h> #include <math.h> #include <cuda.h> #include "common.h" #define NUM_THREADS 256 const double DIM = 2.0 * cutoff; int num, tot_num; double dim; extern double size; // // benchmarking program // __device__ int locationToID(double x, double y, double dim, int num) { int xID = x / dim; int yID = y / dim; return xID * num + yID; } __device__ int locationToID(particle_t &particle, double dim, int num) { int xID = particle.x / dim; int yID = particle.y / dim; return xID * num + yID; } __global__ void assign_particles(int n, particle_t * particles, int* d_next, int* d_grids, double dim, int num) { int tid = threadIdx.x + blockIdx.x * blockDim.x; if(tid >= n) return; int k = locationToID(particles[tid], dim, num); d_next[tid] = atomicExch(&d_grids[k], tid); } __global__ void clear_grids(int tot_num, int* d_grids) { int tid = threadIdx.x + blockIdx.x * blockDim.x; if(tid >= tot_num) return; d_grids[tid] = -1; } __device__ void apply_force_gpu(particle_t &particle, particle_t &neighbor) { double dx = neighbor.x - particle.x; double dy = neighbor.y - particle.y; double r2 = dx * dx + dy * dy; if( r2 > cutoff*cutoff ) return; //r2 = fmax( r2, min_r*min_r ); r2 = (r2 > min_r*min_r) ? r2 : min_r*min_r; double r = sqrt( r2 ); // // very simple short-range repulsive force // double coef = ( 1 - cutoff / r ) / r2 / mass; particle.ax += coef * dx; particle.ay += coef * dy; } __device__ void compute_self_grid_forces(int tid, particle_t * particles, int * d_next, int head) { particle_t* p = &particles[tid]; for(int i = head; i != -1; i = d_next[i]) { if(i != tid) apply_force_gpu(*p, particles[i]); } } __device__ void compute_grid_forces(int tid, particle_t * particles, int * d_next, int head) { particle_t* p = &particles[tid]; for(int i = head; i != -1; i = d_next[i]) { apply_force_gpu(*p, particles[i]); } } __global__ void compute_forces_gpu(int n, particle_t * particles, int * d_next, int * d_grids, double dim, int num) { // Get thread (particle) ID int tid = threadIdx.x + blockIdx.x * blockDim.x; if(tid >= n) return; particle_t * p = &particles[tid]; int xID = p->x / dim; int yID = p->y / dim; int k = xID * num + yID; p->ax = p->ay = 0; // check self compute_self_grid_forces(tid, particles, d_next, d_grids[k]); // check other if(xID > 0) { compute_grid_forces(tid, particles, d_next, d_grids[k - num]); if(yID > 0) compute_grid_forces(tid, particles, d_next, d_grids[k - num - 1]); if(yID < num - 1) compute_grid_forces(tid, particles, d_next, d_grids[k - num + 1]); } if(xID < num - 1) { compute_grid_forces(tid, particles, d_next, d_grids[k + num]); if(yID > 0) compute_grid_forces(tid, particles, d_next, d_grids[k + num - 1]); if(yID < num - 1) compute_grid_forces(tid, particles, d_next, d_grids[k + num + 1]); } if(yID > 0) compute_grid_forces(tid, particles, d_next, d_grids[k - 1]); if(yID < num - 1) compute_grid_forces(tid, particles, d_next, d_grids[k + 1]); } __global__ void compute_grid_forces_gpu(particle_t * particles, int * d_next,int tot_num, int * d_grids, double dim, int num) { // Get thread (particle) ID int tid = threadIdx.x + blockIdx.x * blockDim.x; if(tid >= tot_num) return; int xID = tid / num; int yID = tid % num; int k = tid; for(int i = d_grids[tid]; i != -1; i = d_next[i]) { particle_t * p = &particles[i]; p->ax = p->ay = 0; // check self compute_self_grid_forces(i, particles, d_next, d_grids[k]); // check other if(xID > 0) { compute_grid_forces(i, particles, d_next, d_grids[k - num]); if(yID > 0) compute_grid_forces(i, particles, d_next, d_grids[k - num - 1]); if(yID < num - 1) compute_grid_forces(i, particles, d_next, d_grids[k - num + 1]); } if(xID < num - 1) { compute_grid_forces(i, particles, d_next, d_grids[k + num]); if(yID > 0) compute_grid_forces(i, particles, d_next, d_grids[k + num - 1]); if(yID < num - 1) compute_grid_forces(i, particles, d_next, d_grids[k + num + 1]); } if(yID > 0) compute_grid_forces(i, particles, d_next, d_grids[k - 1]); if(yID < num - 1) compute_grid_forces(i, particles, d_next, d_grids[k + 1]); } } __global__ void move_gpu (particle_t * particles, int n, double size) { // Get thread (particle) ID int tid = threadIdx.x + blockIdx.x * blockDim.x; if(tid >= n) return; particle_t * p = &particles[tid]; // // slightly simplified Velocity Verlet integration // conserves energy better than explicit Euler method // p->vx += p->ax * dt; p->vy += p->ay * dt; p->x += p->vx * dt; p->y += p->vy * dt; // // bounce from walls // while( p->x < 0 || p->x > size ) { p->x = p->x < 0 ? -(p->x) : 2*size-p->x; p->vx = -(p->vx); } while( p->y < 0 || p->y > size ) { p->y = p->y < 0 ? -(p->y) : 2*size-p->y; p->vy = -(p->vy); } } int main( int argc, char **argv ) { // This takes a few seconds to initialize the runtime cudaThreadSynchronize(); if( find_option( argc, argv, "-h" ) >= 0 ) { printf( "Options:\n" ); printf( "-h to see this help\n" ); printf( "-n <int> to set the number of particles\n" ); printf( "-o <filename> to specify the output file name\n" ); return 0; } int n = read_int( argc, argv, "-n", 1000 ); char *savename = read_string( argc, argv, "-o", NULL ); FILE *fsave = savename ? fopen( savename, "w" ) : NULL; particle_t *particles = (particle_t*) malloc( n * sizeof(particle_t) ); // GPU particle data structure particle_t * d_particles; cudaMalloc((void **) &d_particles, n * sizeof(particle_t)); set_size( n ); init_particles( n, particles ); cudaThreadSynchronize(); double copy_time = read_timer( ); // Copy the particles to the GPU cudaMemcpy(d_particles, particles, n * sizeof(particle_t), cudaMemcpyHostToDevice); cudaThreadSynchronize(); copy_time = read_timer( ) - copy_time; // set up grids double set_grid_time = read_timer(); num = (int)ceil(size*1.0 / DIM); // we get the num of the grid for one directions tot_num = num * num; // total number of grids dim = size/num; // the acutal size of a subgrid int blks = (n + NUM_THREADS - 1) / NUM_THREADS; int g_blks = (tot_num + NUM_THREADS - 1) / NUM_THREADS; int * d_grids; cudaMalloc((void **) &d_grids, tot_num * sizeof(int)); int * d_next; cudaMalloc((void **) &d_next, n * sizeof(int)); cudaThreadSynchronize(); clear_grids <<< g_blks, NUM_THREADS >>> (tot_num, d_grids); assign_particles <<< blks, NUM_THREADS >>> (n, d_particles, d_next, d_grids, dim, num); cudaThreadSynchronize(); set_grid_time = read_timer() - set_grid_time; // // simulate a number of time steps // cudaThreadSynchronize(); double simulation_time = read_timer( ); for( int step = 0; step < NSTEPS; step++ ) { // // compute forces // compute_forces_gpu <<< blks, NUM_THREADS >>> (n, d_particles, d_next, d_grids, dim, num); //compute_grid_forces_gpu <<< g_blks, NUM_THREADS >>> (d_particles, d_next, tot_num, d_grids, dim, num); // // move particles // move_gpu <<< blks, NUM_THREADS >>> (d_particles, n, size); // Re-locate all the particles to grids clear_grids <<< g_blks, NUM_THREADS >>> (tot_num, d_grids); assign_particles <<< blks, NUM_THREADS >>> (n, d_particles, d_next, d_grids, dim, num); // // save if necessary // if( fsave && (step%SAVEFREQ) == 0 ) { // Copy the particles back to the CPU cudaMemcpy(particles, d_particles, n * sizeof(particle_t), cudaMemcpyDeviceToHost); save( fsave, n, particles); } } cudaThreadSynchronize(); simulation_time = read_timer( ) - simulation_time; printf( "CPU-GPU copy time = %g seconds\n", copy_time); printf( "n = %d, simulation time = %g seconds\n", n, simulation_time ); free( particles ); cudaFree(d_particles); cudaFree(d_grids); cudaFree(d_next); if( fsave ) fclose( fsave ); return 0; }
500784cf76a53f4f6a309ef60ac5a74506c11598.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "cudaSelectedBlock.hh" #include "kernelfuncs.h" #include "kerneltemplate.hh" #include <thrust/device_vector.h> #include <thrust/sort.h> cudaSelectedBlock::~cudaSelectedBlock() { if (selectedBlock!=NULL) hipFree(selectedBlock); } void cudaSelectedBlock::setupCutoffBlock(real rmax, bool periodic) { cudaCutoffBlock::setupCutoffBlock(rmax, periodic); hipMalloc((void **)&selectedBlock, sizeof(uint32_t)*totalNumBlock); if (withInfo) ErrorInfo("malloc selectedBlock[] on GPU"); } void cudaSelectedBlock::selectBlocks(void) { hipLaunchKernelGGL(( clearArray), dim3(MPnum), dim3(THnum1D), 0, 0, selectedBlock, totalNumBlock); //dim3 _thnum; //_thnum.x = THnum2D; _thnum.y = THnum2D; _thnum.z = 1; hipLaunchKernelGGL(( checkBlocks), dim3(MPnum), dim3(THnum1D), 0, 0, selectedBlock, pid, move, bindex, totalNumBlock); if (withInfo) ErrorInfo("checkBlocks"); hipLaunchKernelGGL(( accumulate_F4), dim3(1), dim3(threadsMax), sizeof(uint32_t)*threadsMax, 0, selectedBlock, totalNumBlock, tmp3N); real R=0.0; hipMemcpy(&R, tmp3N, sizeof(real), hipMemcpyDeviceToHost); numSelected = static_cast<long>(R); myBlockSelected = static_cast<int>( numSelected * (static_cast<real>(myBlockNum) / totalNumBlock)+0.5); if (withInfo) ErrorInfo("accumulate"); hipLaunchKernelGGL(( writeBlockID), dim3(MPnum), dim3(THnum1D), 0, 0, selectedBlock, totalNumBlock); if (withInfo) ErrorInfo("writeBlockID"); // sort by Thrust thrust::device_ptr<uint32_t> dev_ptr(selectedBlock); thrust::sort(dev_ptr, &(dev_ptr[totalNumBlock])); if (withInfo) ErrorInfo("thrrust::sort"); } void cudaSelectedBlock::getForceSelected(const ExchangeMode typeID) { assert(typeID!=ExchangeMode::torque); assert(typeID!=ExchangeMode::density); const size_t pstart = (typeID==ExchangeMode::coordnumber) ? p1 : p3; const size_t pend = (typeID==ExchangeMode::coordnumber) ? p2 : p4; const size_t sizeN = sizeof(float4) * (pend-pstart); float4 *tgt = (typeID==ExchangeMode::acceleration) ? a : F; pthread_mutex_lock(&mutTMP); hipMemcpy(&(TMP[pstart]), &(tgt[pstart]), sizeN, hipMemcpyDeviceToHost); if (typeID!=ExchangeMode::coordnumber) { } pthread_mutex_unlock(&mutTMP); static const std::string reason[] = {"Force", "Acceleration", "Torque", "coordination number"}; if (withInfo) ErrorInfo("do getForceSelected:"+reason[static_cast<int>(typeID)]); } void cudaSelectedBlock::importForceSelected(const cudaSelectedBlock &A, const ExchangeMode typeID, bool directAccess, int idMe, int idPeer) { const size_t pstart = (typeID==ExchangeMode::coordnumber) ? A.p1 : A.p3; const size_t pend = (typeID==ExchangeMode::coordnumber) ? A.p2 : A.p4; const size_t sizeN = sizeof(float4) * (pend-pstart); float4 *tgt = (typeID==ExchangeMode::acceleration) ? a : F; if (directAccess) { float4 *src = (typeID==ExchangeMode::acceleration) ? A.a : A.F; hipMemcpyPeer(&(tgt[pstart]), idMe, &(src[pstart]), idPeer, sizeN); hipDeviceSynchronize(); } else { hipMemcpy(&(tgt[pstart]), &(A.TMP[pstart]), sizeN, hipMemcpyHostToDevice); } static const std::string reason[] = {"Force", "Acceleration", "Torque", "coordination number"}; if (withInfo) ErrorInfo("import the forces:"+reason[static_cast<int>(typeID)]); } void cudaSelectedBlock::setSelectedRange(uint32_t blockNum, uint32_t N, uint32_t myID) { calcBlockRange(blockNum, N, myID, [&](uint32_t offset, uint32_t num) { myOffsetSelected = offset; myBlockSelected = num; }); /* std::cerr << "[" << myID << ":" << myOffsetSelected << ":" << myBlockSelected << "]"; std::cerr << " "; */ }
500784cf76a53f4f6a309ef60ac5a74506c11598.cu
#include "cudaSelectedBlock.hh" #include "kernelfuncs.h" #include "kerneltemplate.hh" #include <thrust/device_vector.h> #include <thrust/sort.h> cudaSelectedBlock::~cudaSelectedBlock() { if (selectedBlock!=NULL) cudaFree(selectedBlock); } void cudaSelectedBlock::setupCutoffBlock(real rmax, bool periodic) { cudaCutoffBlock::setupCutoffBlock(rmax, periodic); cudaMalloc((void **)&selectedBlock, sizeof(uint32_t)*totalNumBlock); if (withInfo) ErrorInfo("malloc selectedBlock[] on GPU"); } void cudaSelectedBlock::selectBlocks(void) { clearArray<<<MPnum, THnum1D>>>(selectedBlock, totalNumBlock); //dim3 _thnum; //_thnum.x = THnum2D; _thnum.y = THnum2D; _thnum.z = 1; checkBlocks<<<MPnum, THnum1D>>>(selectedBlock, pid, move, bindex, totalNumBlock); if (withInfo) ErrorInfo("checkBlocks"); accumulate_F4<<<1, threadsMax, sizeof(uint32_t)*threadsMax>>>( selectedBlock, totalNumBlock, tmp3N); real R=0.0; cudaMemcpy(&R, tmp3N, sizeof(real), cudaMemcpyDeviceToHost); numSelected = static_cast<long>(R); myBlockSelected = static_cast<int>( numSelected * (static_cast<real>(myBlockNum) / totalNumBlock)+0.5); if (withInfo) ErrorInfo("accumulate"); writeBlockID<<<MPnum, THnum1D>>>(selectedBlock, totalNumBlock); if (withInfo) ErrorInfo("writeBlockID"); // sort by Thrust thrust::device_ptr<uint32_t> dev_ptr(selectedBlock); thrust::sort(dev_ptr, &(dev_ptr[totalNumBlock])); if (withInfo) ErrorInfo("thrrust::sort"); } void cudaSelectedBlock::getForceSelected(const ExchangeMode typeID) { assert(typeID!=ExchangeMode::torque); assert(typeID!=ExchangeMode::density); const size_t pstart = (typeID==ExchangeMode::coordnumber) ? p1 : p3; const size_t pend = (typeID==ExchangeMode::coordnumber) ? p2 : p4; const size_t sizeN = sizeof(float4) * (pend-pstart); float4 *tgt = (typeID==ExchangeMode::acceleration) ? a : F; pthread_mutex_lock(&mutTMP); cudaMemcpy(&(TMP[pstart]), &(tgt[pstart]), sizeN, cudaMemcpyDeviceToHost); if (typeID!=ExchangeMode::coordnumber) { } pthread_mutex_unlock(&mutTMP); static const std::string reason[] = {"Force", "Acceleration", "Torque", "coordination number"}; if (withInfo) ErrorInfo("do getForceSelected:"+reason[static_cast<int>(typeID)]); } void cudaSelectedBlock::importForceSelected(const cudaSelectedBlock &A, const ExchangeMode typeID, bool directAccess, int idMe, int idPeer) { const size_t pstart = (typeID==ExchangeMode::coordnumber) ? A.p1 : A.p3; const size_t pend = (typeID==ExchangeMode::coordnumber) ? A.p2 : A.p4; const size_t sizeN = sizeof(float4) * (pend-pstart); float4 *tgt = (typeID==ExchangeMode::acceleration) ? a : F; if (directAccess) { float4 *src = (typeID==ExchangeMode::acceleration) ? A.a : A.F; cudaMemcpyPeer(&(tgt[pstart]), idMe, &(src[pstart]), idPeer, sizeN); cudaDeviceSynchronize(); } else { cudaMemcpy(&(tgt[pstart]), &(A.TMP[pstart]), sizeN, cudaMemcpyHostToDevice); } static const std::string reason[] = {"Force", "Acceleration", "Torque", "coordination number"}; if (withInfo) ErrorInfo("import the forces:"+reason[static_cast<int>(typeID)]); } void cudaSelectedBlock::setSelectedRange(uint32_t blockNum, uint32_t N, uint32_t myID) { calcBlockRange(blockNum, N, myID, [&](uint32_t offset, uint32_t num) { myOffsetSelected = offset; myBlockSelected = num; }); /* std::cerr << "[" << myID << ":" << myOffsetSelected << ":" << myBlockSelected << "]"; std::cerr << " "; */ }
ef462d8a2a00ac6426eef3a271bae8917e8d2e71.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Architektura procesoru (ACH 2017) * Projekt c. 2 (cuda) * Login: xsumsa01 */ #include <sys/time.h> #include <cstdio> #include <cmath> #include "nbody.h" #define NF(x) (N * sizeof(float)) /** * @brief Allocate memory on CPU * * @param t Data type of the allocated memory * @param x Destination pointer * @param s Size of the allocated memory */ #define CPU_ALLOC(t, x, s) \ do { \ if(hipHostMalloc(&x, sizeof(t) * (s)) != hipSuccess) { \ hipError_t e = hipGetLastError(); \ fprintf(stderr, "hipHostMalloc() failed: %s\n", hipGetErrorString(e)); \ exit(EXIT_FAILURE); \ } \ hipMemset(x, 0, N * sizeof(*x)); \ } while(0) #define CPU_FREE(x) \ do { \ hipHostFree(x); \ x = NULL; \ } while(0) #define GPU_ALLOC(t, x, s) \ do { \ if(hipMalloc(&x, sizeof(t) * (s)) != hipSuccess) { \ hipError_t e = hipGetLastError(); \ fprintf(stderr, "hipMalloc() failed: %s\n", hipGetErrorString(e)); \ exit(EXIT_FAILURE); \ } \ hipMemset(x, 0, N * sizeof(*x)); \ } while(0) #define GPU_FREE(x) \ do { \ hipFree(x); \ x = NULL; \ } while(0) int main(int argc, char **argv) { FILE *fp; struct timeval t1, t2; int N; float dt; int steps; int thr_blc; // parametry if (argc != 7) { printf("Usage: nbody <N> <dt> <steps> <thr/blc> <input> <output>\n"); exit(1); } N = atoi(argv[1]); dt = atof(argv[2]); steps = atoi(argv[3]); thr_blc = atoi(argv[4]); printf("N: %d\n", N); printf("dt: %f\n", dt); printf("steps: %d\n", steps); printf("threads/block: %d\n", thr_blc); // alokace pameti na CPU t_particles particles_cpu; CPU_ALLOC(float, particles_cpu.pos_x, N); CPU_ALLOC(float, particles_cpu.pos_y, N); CPU_ALLOC(float, particles_cpu.pos_z, N); CPU_ALLOC(float, particles_cpu.vel_x, N); CPU_ALLOC(float, particles_cpu.vel_y, N); CPU_ALLOC(float, particles_cpu.vel_z, N); CPU_ALLOC(float, particles_cpu.weight, N); // nacteni castic ze souboru fp = fopen(argv[5], "r"); if (fp == NULL) { printf("Can't open file %s!\n", argv[5]); exit(1); } particles_read(fp, particles_cpu, N); fclose(fp); t_particles particles_gpu; t_velocities tmp_velocities_gpu; /* DOPLNTE: alokaci pameti na GPU */ GPU_ALLOC(float, particles_gpu.pos_x, N); GPU_ALLOC(float, particles_gpu.pos_y, N); GPU_ALLOC(float, particles_gpu.pos_z, N); GPU_ALLOC(float, particles_gpu.vel_x, N); GPU_ALLOC(float, particles_gpu.vel_y, N); GPU_ALLOC(float, particles_gpu.vel_z, N); GPU_ALLOC(float, particles_gpu.weight, N); GPU_ALLOC(float, tmp_velocities_gpu.x, N); GPU_ALLOC(float, tmp_velocities_gpu.y, N); GPU_ALLOC(float, tmp_velocities_gpu.z, N); hipMemcpy(particles_gpu.pos_x, particles_cpu.pos_x, NF(N), hipMemcpyHostToDevice); hipMemcpy(particles_gpu.pos_y, particles_cpu.pos_y, NF(N), hipMemcpyHostToDevice); hipMemcpy(particles_gpu.pos_z, particles_cpu.pos_z, NF(N), hipMemcpyHostToDevice); hipMemcpy(particles_gpu.vel_x, particles_cpu.vel_x, NF(N), hipMemcpyHostToDevice); hipMemcpy(particles_gpu.vel_y, particles_cpu.vel_y, NF(N), hipMemcpyHostToDevice); hipMemcpy(particles_gpu.vel_z, particles_cpu.vel_z, NF(N), hipMemcpyHostToDevice); hipMemcpy(particles_gpu.weight, particles_cpu.weight, NF(N), hipMemcpyHostToDevice); hipError_t err = hipGetLastError(); if(err != hipSuccess) { fprintf(stderr, "hipMemcpy(): %s\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } size_t grid = (N / thr_blc) + 1; //(N + thr_blc- 1) / thr_blc; // vypocet gettimeofday(&t1, 0); for (int s = 0; s < steps; ++s) { hipLaunchKernelGGL(( calculate_gravitation_velocity), dim3(grid), dim3(thr_blc), 0, 0, particles_gpu, tmp_velocities_gpu, N, dt); hipLaunchKernelGGL(( calculate_collision_velocity), dim3(grid), dim3(thr_blc), 0, 0, particles_gpu, tmp_velocities_gpu, N, dt); hipLaunchKernelGGL(( update_particle), dim3(grid), dim3(thr_blc), 0, 0, particles_gpu, tmp_velocities_gpu, N, dt); hipMemset(tmp_velocities_gpu.x, 0, NF(N)); hipMemset(tmp_velocities_gpu.y, 0, NF(N)); hipMemset(tmp_velocities_gpu.z, 0, NF(N)); } hipDeviceSynchronize(); gettimeofday(&t2, 0); // cas double t = (1000000.0 * (t2.tv_sec - t1.tv_sec) + t2.tv_usec - t1.tv_usec) / 1000000.0; printf("Time: %f s\n", t); hipMemcpy(particles_cpu.pos_x, particles_gpu.pos_x, NF(N), hipMemcpyDeviceToHost); hipMemcpy(particles_cpu.pos_y, particles_gpu.pos_y, NF(N), hipMemcpyDeviceToHost); hipMemcpy(particles_cpu.pos_z, particles_gpu.pos_z, NF(N), hipMemcpyDeviceToHost); hipMemcpy(particles_cpu.vel_x, particles_gpu.vel_x, NF(N), hipMemcpyDeviceToHost); hipMemcpy(particles_cpu.vel_y, particles_gpu.vel_y, NF(N), hipMemcpyDeviceToHost); hipMemcpy(particles_cpu.vel_z, particles_gpu.vel_z, NF(N), hipMemcpyDeviceToHost); hipMemcpy(particles_cpu.weight, particles_gpu.weight, NF(N), hipMemcpyDeviceToHost); // ulozeni castic do souboru fp = fopen(argv[6], "w"); if (fp == NULL) { printf("Can't open file %s!\n", argv[6]); exit(1); } particles_write(fp, particles_cpu, N); fclose(fp); // Cleanup CPU_FREE(particles_cpu.pos_x); CPU_FREE(particles_cpu.pos_y); CPU_FREE(particles_cpu.pos_z); CPU_FREE(particles_cpu.vel_x); CPU_FREE(particles_cpu.vel_y); CPU_FREE(particles_cpu.vel_z); CPU_FREE(particles_cpu.weight); GPU_FREE(particles_gpu.pos_x); GPU_FREE(particles_gpu.pos_y); GPU_FREE(particles_gpu.pos_z); GPU_FREE(particles_gpu.vel_x); GPU_FREE(particles_gpu.vel_y); GPU_FREE(particles_gpu.vel_z); GPU_FREE(particles_gpu.weight); GPU_FREE(tmp_velocities_gpu.x); GPU_FREE(tmp_velocities_gpu.y); GPU_FREE(tmp_velocities_gpu.z); return 0; }
ef462d8a2a00ac6426eef3a271bae8917e8d2e71.cu
/* * Architektura procesoru (ACH 2017) * Projekt c. 2 (cuda) * Login: xsumsa01 */ #include <sys/time.h> #include <cstdio> #include <cmath> #include "nbody.h" #define NF(x) (N * sizeof(float)) /** * @brief Allocate memory on CPU * * @param t Data type of the allocated memory * @param x Destination pointer * @param s Size of the allocated memory */ #define CPU_ALLOC(t, x, s) \ do { \ if(cudaMallocHost(&x, sizeof(t) * (s)) != cudaSuccess) { \ cudaError e = cudaGetLastError(); \ fprintf(stderr, "cudaMallocHost() failed: %s\n", cudaGetErrorString(e)); \ exit(EXIT_FAILURE); \ } \ cudaMemset(x, 0, N * sizeof(*x)); \ } while(0) #define CPU_FREE(x) \ do { \ cudaFreeHost(x); \ x = NULL; \ } while(0) #define GPU_ALLOC(t, x, s) \ do { \ if(cudaMalloc(&x, sizeof(t) * (s)) != cudaSuccess) { \ cudaError e = cudaGetLastError(); \ fprintf(stderr, "cudaMalloc() failed: %s\n", cudaGetErrorString(e)); \ exit(EXIT_FAILURE); \ } \ cudaMemset(x, 0, N * sizeof(*x)); \ } while(0) #define GPU_FREE(x) \ do { \ cudaFree(x); \ x = NULL; \ } while(0) int main(int argc, char **argv) { FILE *fp; struct timeval t1, t2; int N; float dt; int steps; int thr_blc; // parametry if (argc != 7) { printf("Usage: nbody <N> <dt> <steps> <thr/blc> <input> <output>\n"); exit(1); } N = atoi(argv[1]); dt = atof(argv[2]); steps = atoi(argv[3]); thr_blc = atoi(argv[4]); printf("N: %d\n", N); printf("dt: %f\n", dt); printf("steps: %d\n", steps); printf("threads/block: %d\n", thr_blc); // alokace pameti na CPU t_particles particles_cpu; CPU_ALLOC(float, particles_cpu.pos_x, N); CPU_ALLOC(float, particles_cpu.pos_y, N); CPU_ALLOC(float, particles_cpu.pos_z, N); CPU_ALLOC(float, particles_cpu.vel_x, N); CPU_ALLOC(float, particles_cpu.vel_y, N); CPU_ALLOC(float, particles_cpu.vel_z, N); CPU_ALLOC(float, particles_cpu.weight, N); // nacteni castic ze souboru fp = fopen(argv[5], "r"); if (fp == NULL) { printf("Can't open file %s!\n", argv[5]); exit(1); } particles_read(fp, particles_cpu, N); fclose(fp); t_particles particles_gpu; t_velocities tmp_velocities_gpu; /* DOPLNTE: alokaci pameti na GPU */ GPU_ALLOC(float, particles_gpu.pos_x, N); GPU_ALLOC(float, particles_gpu.pos_y, N); GPU_ALLOC(float, particles_gpu.pos_z, N); GPU_ALLOC(float, particles_gpu.vel_x, N); GPU_ALLOC(float, particles_gpu.vel_y, N); GPU_ALLOC(float, particles_gpu.vel_z, N); GPU_ALLOC(float, particles_gpu.weight, N); GPU_ALLOC(float, tmp_velocities_gpu.x, N); GPU_ALLOC(float, tmp_velocities_gpu.y, N); GPU_ALLOC(float, tmp_velocities_gpu.z, N); cudaMemcpy(particles_gpu.pos_x, particles_cpu.pos_x, NF(N), cudaMemcpyHostToDevice); cudaMemcpy(particles_gpu.pos_y, particles_cpu.pos_y, NF(N), cudaMemcpyHostToDevice); cudaMemcpy(particles_gpu.pos_z, particles_cpu.pos_z, NF(N), cudaMemcpyHostToDevice); cudaMemcpy(particles_gpu.vel_x, particles_cpu.vel_x, NF(N), cudaMemcpyHostToDevice); cudaMemcpy(particles_gpu.vel_y, particles_cpu.vel_y, NF(N), cudaMemcpyHostToDevice); cudaMemcpy(particles_gpu.vel_z, particles_cpu.vel_z, NF(N), cudaMemcpyHostToDevice); cudaMemcpy(particles_gpu.weight, particles_cpu.weight, NF(N), cudaMemcpyHostToDevice); cudaError err = cudaGetLastError(); if(err != cudaSuccess) { fprintf(stderr, "cudaMemcpy(): %s\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } size_t grid = (N / thr_blc) + 1; //(N + thr_blc- 1) / thr_blc; // vypocet gettimeofday(&t1, 0); for (int s = 0; s < steps; ++s) { calculate_gravitation_velocity<<<grid, thr_blc>>>(particles_gpu, tmp_velocities_gpu, N, dt); calculate_collision_velocity<<<grid, thr_blc>>>(particles_gpu, tmp_velocities_gpu, N, dt); update_particle<<<grid, thr_blc>>>(particles_gpu, tmp_velocities_gpu, N, dt); cudaMemset(tmp_velocities_gpu.x, 0, NF(N)); cudaMemset(tmp_velocities_gpu.y, 0, NF(N)); cudaMemset(tmp_velocities_gpu.z, 0, NF(N)); } cudaDeviceSynchronize(); gettimeofday(&t2, 0); // cas double t = (1000000.0 * (t2.tv_sec - t1.tv_sec) + t2.tv_usec - t1.tv_usec) / 1000000.0; printf("Time: %f s\n", t); cudaMemcpy(particles_cpu.pos_x, particles_gpu.pos_x, NF(N), cudaMemcpyDeviceToHost); cudaMemcpy(particles_cpu.pos_y, particles_gpu.pos_y, NF(N), cudaMemcpyDeviceToHost); cudaMemcpy(particles_cpu.pos_z, particles_gpu.pos_z, NF(N), cudaMemcpyDeviceToHost); cudaMemcpy(particles_cpu.vel_x, particles_gpu.vel_x, NF(N), cudaMemcpyDeviceToHost); cudaMemcpy(particles_cpu.vel_y, particles_gpu.vel_y, NF(N), cudaMemcpyDeviceToHost); cudaMemcpy(particles_cpu.vel_z, particles_gpu.vel_z, NF(N), cudaMemcpyDeviceToHost); cudaMemcpy(particles_cpu.weight, particles_gpu.weight, NF(N), cudaMemcpyDeviceToHost); // ulozeni castic do souboru fp = fopen(argv[6], "w"); if (fp == NULL) { printf("Can't open file %s!\n", argv[6]); exit(1); } particles_write(fp, particles_cpu, N); fclose(fp); // Cleanup CPU_FREE(particles_cpu.pos_x); CPU_FREE(particles_cpu.pos_y); CPU_FREE(particles_cpu.pos_z); CPU_FREE(particles_cpu.vel_x); CPU_FREE(particles_cpu.vel_y); CPU_FREE(particles_cpu.vel_z); CPU_FREE(particles_cpu.weight); GPU_FREE(particles_gpu.pos_x); GPU_FREE(particles_gpu.pos_y); GPU_FREE(particles_gpu.pos_z); GPU_FREE(particles_gpu.vel_x); GPU_FREE(particles_gpu.vel_y); GPU_FREE(particles_gpu.vel_z); GPU_FREE(particles_gpu.weight); GPU_FREE(tmp_velocities_gpu.x); GPU_FREE(tmp_velocities_gpu.y); GPU_FREE(tmp_velocities_gpu.z); return 0; }
e08105dcd31354536def3ff00e106c9ddfe7958a.hip
// !!! This is a file automatically generated by hipify!!! //Note: //======= Cara compile ======= //nvcc nama_file.cu -o nama_file_output -gencode arch=compute_serinya,code=sm_serinya --default-stream per-thread //======= Cara running program ======= //./nama_file mode besar_matrix besar_grid besar_block //Ukuran matrix: besar_matrix x besar matrix // besar_grid max = 65535 (Max grid.y adalah 65535) // besar_block max = 32, (32 x 32 = 1024) //Grid: besar_grid x besar_grid (block per grid) | Max: Mengacu pada NVIDIA Compute Capability dari setiap seri GPU //Block: besar_block x besar_block (thread per block) | Max: 1024, mengacu pada NVIDIA Compute Capability dari setiap seri GPU // mode 2 ketas belum selesai dikerjakan // Mode: // 0: Matrix multiplication pada 1 GPU tanpa melihat hasil sekuensial // 1: Matrix multiplication pada 1 GPU dengan hasil sekuensial // 2: Matrix multiplication pada multiple GPU tanpa melihat hasil sekuensial // 3: Matrix multiplication pada multiple GPU dengan hasil sekuensial #include <stdio.h> #include <assert.h> #include <stdlib.h> #include <time.h> //#include <helper_cuda.h> #include <hip/hip_runtime.h> //#include <helper_functions.h> #define checkCudaErrors(ans) { gpuAssert((ans), __FILE__, __LINE__);} inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true){ if(code != hipSuccess){ fprintf(stderr, "Error: %s %s %d\n", hipGetErrorString(code), file, line); if(abort) exit(code); } } //Operasi perkalian matrix pada gpu __global__ void mm_gpu(float *gpu_matrixA, float *gpu_matrixB, float *gpu_result, int matrix_size, int grid, int block){ int l, m, n, R, displacement; if(matrix_size > (grid * block)) displacement = matrix_size/(grid * block); else displacement = 1; int row_index = blockIdx.y * blockDim.y + threadIdx.y; int col_index = blockIdx.x * blockDim.x + threadIdx.x; if(row_index < matrix_size && col_index < matrix_size){ for(m = 0; m < displacement; m++){ for(n = 0; n < displacement; n++){ R = 0; for(l = 0; l < matrix_size; l++){ float A = gpu_matrixA[(row_index * displacement + m) * matrix_size + l]; float B = gpu_matrixB[l * matrix_size + (col_index * displacement + n)]; R += A * B; } gpu_result[(row_index * displacement + m) * matrix_size + (col_index * displacement + n)] = R; } } } } __global__ void mm_multigpu(float *gpu_matrixA, float *gpu_matrixB, float *gpu_result, int device_count, int device_index, int matrix_size, int grid, int block){ int l, m, n, R, row_disp, col_disp; int data_split = matrix_size/device_count; int row_index = blockIdx.y * blockDim.y + threadIdx.y; int col_index = blockIdx.x * blockDim.x + threadIdx.x; if(data_split * device_count < matrix_size && device_count == device_index + 1) data_split += matrix_size - (data_split * device_count); if(data_split > (grid * block)){ row_disp = data_split/(grid * block); //if(row_disp * (grid * block) < data_split && row_index == data_split) // row_disp += data_split - (row_disp * grid * block); }else row_disp = 1; if(matrix_size > (grid * block)){ col_disp = matrix_size / (grid * block); //if(col_disp * grid * block < matrix_size && col_index == (grid * block)) // col_disp += matrix_size - col_disp * grid * block; }else col_disp = 1; if(col_index < matrix_size && row_index < data_split){ for(m = 0; m < row_disp; m++){ for(n = 0; n < col_disp; n++){ R = 0; for(l = 0; l < matrix_size; l++){ float A = gpu_matrixA[(row_index * row_disp + m) * matrix_size + l]; float B = gpu_matrixB[l * matrix_size + (col_index * col_disp + n)]; R += A * B; } gpu_result[(row_index * row_disp + m) * matrix_size + (col_index * col_disp + n)] = R; } } } } int main(int argc, char** argv){ srand(time(NULL)); double runtime; struct timespec begin, end; // Inisialisasi parameter dari user input int mode = atoi(argv[1]); int matrix_size = atoi(argv[2]); int igrid = atoi(argv[3]); int iblock = atoi(argv[4]); //Debug print variabel user input //printf("Mode: %d\n", mode); //printf("Size %d x %d\n", matrix_size, matrix_size); //printf("Grid: %d\n", igrid); //printf("Block:%d\n", iblock); // Inisailiasai pada Host //int matrixallsize = matrix_size * matrix_size; int matrixBytes = (matrix_size * matrix_size) * sizeof(float); int i, j, k; float *matrixA, *matrixB, *result; int device_count; hipGetDeviceCount(&device_count); //printf("Device: %d\n", device_count); float *gpu_matrixA[device_count], *gpu_matrixB[device_count], *gpu_result[device_count]; //Inisialisasi pada GPU dim3 grid(igrid, igrid); dim3 block(iblock, iblock); //printf("Dim3 Block: {%d, %d, %d}\n", block.x, block.y, block.z); //Operasi dengan 1 GPU if(mode < 2){ //float *gpu_matrixA, *gpu_matrixB, *gpu_result; matrixA = (float *)malloc(matrixBytes) ; matrixB = (float *)malloc(matrixBytes); result = (float *)malloc(matrixBytes); //Inisialisasi martrix for(i = 0; i < matrix_size * matrix_size; i++){ matrixA[i] = rand() % 99 + 1; matrixB[i] = rand() % 99 + 1; } clock_gettime(CLOCK_REALTIME, &begin); //Mulai operasi pada device checkCudaErrors(hipMalloc((void **) &gpu_matrixA[0], matrixBytes)); checkCudaErrors(hipMalloc((void **) &gpu_matrixB[0], matrixBytes)); checkCudaErrors(hipMalloc((void **) &gpu_result[0], matrixBytes)); checkCudaErrors(hipMemcpy(gpu_matrixA[0], matrixA, matrixBytes, hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(gpu_matrixB[0], matrixB, matrixBytes, hipMemcpyHostToDevice)); hipLaunchKernelGGL(( mm_gpu), dim3(grid), dim3(block), 0, 0, gpu_matrixA[0], gpu_matrixB[0], gpu_result[0], matrix_size, igrid, iblock); hipError_t error_kernel; error_kernel = hipGetLastError(); if(error_kernel != hipSuccess) printf("Error: %s\n", hipGetErrorString(error_kernel)); //Return hasil perkalian checkCudaErrors(hipMemcpy(result, gpu_result[0], matrixBytes, hipMemcpyDeviceToHost)); //hipDeviceSynchronize(); clock_gettime(CLOCK_REALTIME, &end); runtime = (end.tv_sec - begin.tv_sec) + (end.tv_nsec - begin.tv_nsec) / 1000000000.0; printf("Running Time: %f\n\n", runtime); }else{ //Operasi pada multiple GPU //Check Device checkCudaErrors(hipHostMalloc((void**) &matrixA, matrixBytes)); checkCudaErrors(hipHostMalloc((void**) &matrixB, matrixBytes)); checkCudaErrors(hipHostMalloc((void**) &result, matrixBytes)); //Inisialisasi martrix for(i = 0; i < matrix_size * matrix_size; i++){ matrixA[i] = rand() % 99 + 1; matrixB[i] = rand() % 99 + 1; } clock_gettime(CLOCK_REALTIME, &begin); int start_p, chunk_size = (matrix_size/device_count); int chunkBytes; int rem_size; if((chunk_size * device_count) != matrix_size) rem_size = matrix_size - (chunk_size * device_count); else rem_size = 0; printf("chunk size: %d\n", chunk_size); printf("remaining size: %d\n", rem_size); //Inisialisasi memori pada tiap gpu for(i = 0; i < device_count; i++){ checkCudaErrors(hipSetDevice(i)); if(i == (device_count - 1)) chunkBytes = ((chunk_size + rem_size) * matrix_size) * sizeof(float); else chunkBytes = (chunk_size * matrix_size) * sizeof(float); checkCudaErrors(hipMalloc((void **) &gpu_matrixA[i], chunkBytes)); checkCudaErrors(hipMalloc((void **) &gpu_matrixB[i], matrixBytes)); checkCudaErrors(hipMalloc((void **) &gpu_result[i], chunkBytes)); } for(i = 0; i < device_count; i++){ start_p = i * chunk_size; if(i == (device_count - 1)) chunkBytes = ((chunk_size + rem_size) * matrix_size) * sizeof(float); else chunkBytes = (chunk_size * matrix_size) * sizeof(float); checkCudaErrors(hipSetDevice(i)); checkCudaErrors(hipMemcpyAsync(gpu_matrixA[i], &matrixA[start_p], chunkBytes, hipMemcpyHostToDevice)); } for(i = 0; i < device_count; i++){ checkCudaErrors(hipSetDevice(i)); checkCudaErrors(hipMemcpyAsync(gpu_matrixB[i], matrixB, matrixBytes, hipMemcpyHostToDevice)); } for(i = 0; i < device_count; i++){ checkCudaErrors(hipSetDevice(i)); hipLaunchKernelGGL(( mm_multigpu), dim3(grid), dim3(block), 0, 0, gpu_matrixA[i], gpu_matrixB[i], gpu_result[i], device_count, i, matrix_size, igrid, iblock); } for(i = 0; i < device_count; i++){ start_p = i * chunk_size; if(i == (device_count - 1)) chunkBytes = ((chunk_size + rem_size) * matrix_size) * sizeof(float); else chunkBytes = (chunk_size * matrix_size) * sizeof(float); checkCudaErrors(hipSetDevice(i)); checkCudaErrors(hipMemcpyAsync(&result[start_p], gpu_result[i], chunkBytes, hipMemcpyDeviceToHost)); } for(i = 0; i < device_count; i++){ checkCudaErrors(hipSetDevice(i)); hipDeviceSynchronize(); } hipError_t error_kernel; error_kernel = hipGetLastError(); if(error_kernel != hipSuccess) printf("Error: %s\n", hipGetErrorString(error_kernel)); clock_gettime(CLOCK_REALTIME, &end); runtime = (end.tv_sec - begin.tv_sec) + (end.tv_nsec - begin.tv_nsec) / 1000000000.0; printf("Running Time: %f\n\n", runtime); } //Operasi sekuensial if(mode == 1 || mode == 3){ int right_answer = 0; float *seqresult = (float *)malloc(matrixBytes); for (i = 0; i < matrix_size; i++){ for (j = 0; j < matrix_size; j++){ seqresult[i * matrix_size + j] = 0; for (k = 0; k < matrix_size; k++) seqresult[i * matrix_size + j] += matrixA[i * matrix_size + k] * matrixB[k * matrix_size + j]; if(seqresult[i * matrix_size + j] == result[i * matrix_size + j]) right_answer += 1; //printf("%d - %d S: %f, CUDA: %f\n", i * matrix_size, j, seqresult[i * matrix_size + j], result[i * matrix_size + j]); } } if(right_answer == (matrix_size * matrix_size)) printf("The answer is matched.\n"); free(seqresult); } //Membebaskan Host if(mode < 2){ hipFree(gpu_matrixA[0]); hipFree(gpu_matrixB[0]); hipFree(gpu_result[0]); free(matrixA); free(matrixB); free(result); }else{ for(i = 0; i < device_count; i++){ hipFree(gpu_matrixA[i]); hipFree(gpu_matrixB[i]); hipFree(gpu_result[i]); } hipHostFree(matrixA); hipHostFree(matrixB); hipHostFree(result); } hipDeviceReset(); return 0; }
e08105dcd31354536def3ff00e106c9ddfe7958a.cu
//Note: //======= Cara compile ======= //nvcc nama_file.cu -o nama_file_output -gencode arch=compute_serinya,code=sm_serinya --default-stream per-thread //======= Cara running program ======= //./nama_file mode besar_matrix besar_grid besar_block //Ukuran matrix: besar_matrix x besar matrix // besar_grid max = 65535 (Max grid.y adalah 65535) // besar_block max = 32, (32 x 32 = 1024) //Grid: besar_grid x besar_grid (block per grid) | Max: Mengacu pada NVIDIA Compute Capability dari setiap seri GPU //Block: besar_block x besar_block (thread per block) | Max: 1024, mengacu pada NVIDIA Compute Capability dari setiap seri GPU // mode 2 ketas belum selesai dikerjakan // Mode: // 0: Matrix multiplication pada 1 GPU tanpa melihat hasil sekuensial // 1: Matrix multiplication pada 1 GPU dengan hasil sekuensial // 2: Matrix multiplication pada multiple GPU tanpa melihat hasil sekuensial // 3: Matrix multiplication pada multiple GPU dengan hasil sekuensial #include <stdio.h> #include <assert.h> #include <stdlib.h> #include <time.h> //#include <helper_cuda.h> #include <cuda_runtime.h> //#include <helper_functions.h> #define checkCudaErrors(ans) { gpuAssert((ans), __FILE__, __LINE__);} inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true){ if(code != cudaSuccess){ fprintf(stderr, "Error: %s %s %d\n", cudaGetErrorString(code), file, line); if(abort) exit(code); } } //Operasi perkalian matrix pada gpu __global__ void mm_gpu(float *gpu_matrixA, float *gpu_matrixB, float *gpu_result, int matrix_size, int grid, int block){ int l, m, n, R, displacement; if(matrix_size > (grid * block)) displacement = matrix_size/(grid * block); else displacement = 1; int row_index = blockIdx.y * blockDim.y + threadIdx.y; int col_index = blockIdx.x * blockDim.x + threadIdx.x; if(row_index < matrix_size && col_index < matrix_size){ for(m = 0; m < displacement; m++){ for(n = 0; n < displacement; n++){ R = 0; for(l = 0; l < matrix_size; l++){ float A = gpu_matrixA[(row_index * displacement + m) * matrix_size + l]; float B = gpu_matrixB[l * matrix_size + (col_index * displacement + n)]; R += A * B; } gpu_result[(row_index * displacement + m) * matrix_size + (col_index * displacement + n)] = R; } } } } __global__ void mm_multigpu(float *gpu_matrixA, float *gpu_matrixB, float *gpu_result, int device_count, int device_index, int matrix_size, int grid, int block){ int l, m, n, R, row_disp, col_disp; int data_split = matrix_size/device_count; int row_index = blockIdx.y * blockDim.y + threadIdx.y; int col_index = blockIdx.x * blockDim.x + threadIdx.x; if(data_split * device_count < matrix_size && device_count == device_index + 1) data_split += matrix_size - (data_split * device_count); if(data_split > (grid * block)){ row_disp = data_split/(grid * block); //if(row_disp * (grid * block) < data_split && row_index == data_split) // row_disp += data_split - (row_disp * grid * block); }else row_disp = 1; if(matrix_size > (grid * block)){ col_disp = matrix_size / (grid * block); //if(col_disp * grid * block < matrix_size && col_index == (grid * block)) // col_disp += matrix_size - col_disp * grid * block; }else col_disp = 1; if(col_index < matrix_size && row_index < data_split){ for(m = 0; m < row_disp; m++){ for(n = 0; n < col_disp; n++){ R = 0; for(l = 0; l < matrix_size; l++){ float A = gpu_matrixA[(row_index * row_disp + m) * matrix_size + l]; float B = gpu_matrixB[l * matrix_size + (col_index * col_disp + n)]; R += A * B; } gpu_result[(row_index * row_disp + m) * matrix_size + (col_index * col_disp + n)] = R; } } } } int main(int argc, char** argv){ srand(time(NULL)); double runtime; struct timespec begin, end; // Inisialisasi parameter dari user input int mode = atoi(argv[1]); int matrix_size = atoi(argv[2]); int igrid = atoi(argv[3]); int iblock = atoi(argv[4]); //Debug print variabel user input //printf("Mode: %d\n", mode); //printf("Size %d x %d\n", matrix_size, matrix_size); //printf("Grid: %d\n", igrid); //printf("Block:%d\n", iblock); // Inisailiasai pada Host //int matrixallsize = matrix_size * matrix_size; int matrixBytes = (matrix_size * matrix_size) * sizeof(float); int i, j, k; float *matrixA, *matrixB, *result; int device_count; cudaGetDeviceCount(&device_count); //printf("Device: %d\n", device_count); float *gpu_matrixA[device_count], *gpu_matrixB[device_count], *gpu_result[device_count]; //Inisialisasi pada GPU dim3 grid(igrid, igrid); dim3 block(iblock, iblock); //printf("Dim3 Block: {%d, %d, %d}\n", block.x, block.y, block.z); //Operasi dengan 1 GPU if(mode < 2){ //float *gpu_matrixA, *gpu_matrixB, *gpu_result; matrixA = (float *)malloc(matrixBytes) ; matrixB = (float *)malloc(matrixBytes); result = (float *)malloc(matrixBytes); //Inisialisasi martrix for(i = 0; i < matrix_size * matrix_size; i++){ matrixA[i] = rand() % 99 + 1; matrixB[i] = rand() % 99 + 1; } clock_gettime(CLOCK_REALTIME, &begin); //Mulai operasi pada device checkCudaErrors(cudaMalloc((void **) &gpu_matrixA[0], matrixBytes)); checkCudaErrors(cudaMalloc((void **) &gpu_matrixB[0], matrixBytes)); checkCudaErrors(cudaMalloc((void **) &gpu_result[0], matrixBytes)); checkCudaErrors(cudaMemcpy(gpu_matrixA[0], matrixA, matrixBytes, cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(gpu_matrixB[0], matrixB, matrixBytes, cudaMemcpyHostToDevice)); mm_gpu<<<grid, block>>>(gpu_matrixA[0], gpu_matrixB[0], gpu_result[0], matrix_size, igrid, iblock); cudaError error_kernel; error_kernel = cudaGetLastError(); if(error_kernel != cudaSuccess) printf("Error: %s\n", cudaGetErrorString(error_kernel)); //Return hasil perkalian checkCudaErrors(cudaMemcpy(result, gpu_result[0], matrixBytes, cudaMemcpyDeviceToHost)); //cudaDeviceSynchronize(); clock_gettime(CLOCK_REALTIME, &end); runtime = (end.tv_sec - begin.tv_sec) + (end.tv_nsec - begin.tv_nsec) / 1000000000.0; printf("Running Time: %f\n\n", runtime); }else{ //Operasi pada multiple GPU //Check Device checkCudaErrors(cudaMallocHost((void**) &matrixA, matrixBytes)); checkCudaErrors(cudaMallocHost((void**) &matrixB, matrixBytes)); checkCudaErrors(cudaMallocHost((void**) &result, matrixBytes)); //Inisialisasi martrix for(i = 0; i < matrix_size * matrix_size; i++){ matrixA[i] = rand() % 99 + 1; matrixB[i] = rand() % 99 + 1; } clock_gettime(CLOCK_REALTIME, &begin); int start_p, chunk_size = (matrix_size/device_count); int chunkBytes; int rem_size; if((chunk_size * device_count) != matrix_size) rem_size = matrix_size - (chunk_size * device_count); else rem_size = 0; printf("chunk size: %d\n", chunk_size); printf("remaining size: %d\n", rem_size); //Inisialisasi memori pada tiap gpu for(i = 0; i < device_count; i++){ checkCudaErrors(cudaSetDevice(i)); if(i == (device_count - 1)) chunkBytes = ((chunk_size + rem_size) * matrix_size) * sizeof(float); else chunkBytes = (chunk_size * matrix_size) * sizeof(float); checkCudaErrors(cudaMalloc((void **) &gpu_matrixA[i], chunkBytes)); checkCudaErrors(cudaMalloc((void **) &gpu_matrixB[i], matrixBytes)); checkCudaErrors(cudaMalloc((void **) &gpu_result[i], chunkBytes)); } for(i = 0; i < device_count; i++){ start_p = i * chunk_size; if(i == (device_count - 1)) chunkBytes = ((chunk_size + rem_size) * matrix_size) * sizeof(float); else chunkBytes = (chunk_size * matrix_size) * sizeof(float); checkCudaErrors(cudaSetDevice(i)); checkCudaErrors(cudaMemcpyAsync(gpu_matrixA[i], &matrixA[start_p], chunkBytes, cudaMemcpyHostToDevice)); } for(i = 0; i < device_count; i++){ checkCudaErrors(cudaSetDevice(i)); checkCudaErrors(cudaMemcpyAsync(gpu_matrixB[i], matrixB, matrixBytes, cudaMemcpyHostToDevice)); } for(i = 0; i < device_count; i++){ checkCudaErrors(cudaSetDevice(i)); mm_multigpu<<<grid, block>>>(gpu_matrixA[i], gpu_matrixB[i], gpu_result[i], device_count, i, matrix_size, igrid, iblock); } for(i = 0; i < device_count; i++){ start_p = i * chunk_size; if(i == (device_count - 1)) chunkBytes = ((chunk_size + rem_size) * matrix_size) * sizeof(float); else chunkBytes = (chunk_size * matrix_size) * sizeof(float); checkCudaErrors(cudaSetDevice(i)); checkCudaErrors(cudaMemcpyAsync(&result[start_p], gpu_result[i], chunkBytes, cudaMemcpyDeviceToHost)); } for(i = 0; i < device_count; i++){ checkCudaErrors(cudaSetDevice(i)); cudaDeviceSynchronize(); } cudaError error_kernel; error_kernel = cudaGetLastError(); if(error_kernel != cudaSuccess) printf("Error: %s\n", cudaGetErrorString(error_kernel)); clock_gettime(CLOCK_REALTIME, &end); runtime = (end.tv_sec - begin.tv_sec) + (end.tv_nsec - begin.tv_nsec) / 1000000000.0; printf("Running Time: %f\n\n", runtime); } //Operasi sekuensial if(mode == 1 || mode == 3){ int right_answer = 0; float *seqresult = (float *)malloc(matrixBytes); for (i = 0; i < matrix_size; i++){ for (j = 0; j < matrix_size; j++){ seqresult[i * matrix_size + j] = 0; for (k = 0; k < matrix_size; k++) seqresult[i * matrix_size + j] += matrixA[i * matrix_size + k] * matrixB[k * matrix_size + j]; if(seqresult[i * matrix_size + j] == result[i * matrix_size + j]) right_answer += 1; //printf("%d - %d S: %f, CUDA: %f\n", i * matrix_size, j, seqresult[i * matrix_size + j], result[i * matrix_size + j]); } } if(right_answer == (matrix_size * matrix_size)) printf("The answer is matched.\n"); free(seqresult); } //Membebaskan Host if(mode < 2){ cudaFree(gpu_matrixA[0]); cudaFree(gpu_matrixB[0]); cudaFree(gpu_result[0]); free(matrixA); free(matrixB); free(result); }else{ for(i = 0; i < device_count; i++){ cudaFree(gpu_matrixA[i]); cudaFree(gpu_matrixB[i]); cudaFree(gpu_result[i]); } cudaFreeHost(matrixA); cudaFreeHost(matrixB); cudaFreeHost(result); } cudaDeviceReset(); return 0; }
e971d9482d9802edf7ee8c8d5c0a21398d580ed8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" ///////////////////////////////////////////////////////////////////////////// /// \file basis_hproj_bilateral.cuh /// /// \brief /// /// \copyright Copyright (c) 2020 Visual Computing group of Ulm University, /// Germany. See the LICENSE file at the top-level directory of /// this distribution. /// /// \author pedro hermosilla (pedro-1.hermosilla-casajus@uni-ulm.de) ///////////////////////////////////////////////////////////////////////////// #include "defines.hpp" #include "cuda_kernel_utils.cuh" #include "math_helper.cuh" #include "nn_utils.cuh" #include "basis/basis_hproj_bilateral.cuh" #include "basis/basis_utils.cuh" template<int D, int K, int U, int A> __global__ void compute_hproj_bilateral_basis_proj_pt_coords( const unsigned int pNumNeighbors, const mccnn::fpoint<D>* __restrict__ pInPtsGPUPtr, const mccnn::fpoint<D>* __restrict__ pInSamplesGPUPtr, const mccnn::fpoint<D>* __restrict__ pInInvRadiiGPUPtr, const int2* __restrict__ pInNeighborsGPUPtr, const float* __restrict__ pInPDFsGPUPtr, const float* __restrict__ pInXNeighValsGPUPtr, const float* __restrict__ pInBasisGPUPtr, float* __restrict__ pOutProjGPUPtr) { //Shared memory to store the kernel points. extern __shared__ float kernelPts[]; //Create the struct to compute the activation function. mccnn::activation_function_struct<A> acFunc; //Load the kernel point centers. #pragma unroll(2) for(int i = threadIdx.x; i < K*(D+U+1); i+=blockDim.x) kernelPts[i] = pInBasisGPUPtr[i]; __syncthreads(); //Get usefull indices. const unsigned int initThreadIndex = mccnn::compute_global_index_gpu_funct(); const unsigned int totalNumThreads = mccnn::compute_total_threads_gpu_funct(); for(unsigned int curIter = initThreadIndex; curIter < pNumNeighbors; curIter += totalNumThreads) { //Get indices to the point and sample. int2 neighAndSampleIndices = pInNeighborsGPUPtr[curIter]; //Compute the pt difference. mccnn::fpoint<D> ptDiff = (pInPtsGPUPtr[neighAndSampleIndices.x] - pInSamplesGPUPtr[neighAndSampleIndices.y])*pInInvRadiiGPUPtr[0]; //Compute the pdf inverse. float weightVal = 1.0f/(pInPDFsGPUPtr[curIter]); //Compute the projection of each basis. for(int i = 0; i < K; ++i){ float sum = 0.0f; #pragma unroll for(int j = 0; j < D; ++j) sum += kernelPts[i*(D+U+1) + j]*ptDiff[j]; #pragma unroll for(int j = 0; j < U; ++j) sum += kernelPts[i*(D+U+1) + D + j]* pInXNeighValsGPUPtr[curIter*U + j]; sum += kernelPts[i*(D+U+1) + D + U]; pOutProjGPUPtr[curIter*K + i] = acFunc.forward(sum)*weightVal; } } } /** * Template to accumulate the point gradients. */ template<int D, int K, int U, bool P> struct accum_pt_grads{ __forceinline__ __device__ void accumulate( const int pOffset, const float* pSharedMem, float* __restrict__ pOutPtGrads, float* __restrict__ pOutSampleGrads, float* __restrict__ pOutPDFGrads, float* __restrict__ pXNeighValGrads){} }; template<int D, int K, int U> struct accum_pt_grads<D, K, U, true>{ __forceinline__ __device__ void accumulate( const int pOffset, const float* __restrict__ pSharedMem, float* __restrict__ pOutPtGrads, float* __restrict__ pOutSampleGrads, float* __restrict__ pOutPDFGrads, float* __restrict__ pXNeighValGrads){ float accumVal = 0.0f; #pragma unroll for(int j = 0; j < K; ++j){ accumVal += pSharedMem[pOffset*blockDim.x + j]; } if(pOffset < D) atomicAdd(&pOutPtGrads[pOffset], accumVal); else if(pOffset < D+U) atomicAdd(&pXNeighValGrads[pOffset - D], accumVal); else if(pOffset < (D*2+U)) atomicAdd(&pOutSampleGrads[pOffset - (D+U)], accumVal); else pOutPDFGrads[0] = accumVal; } }; template<int D, int K, int U, int A, bool P> __global__ void compute_hproj_bilateral_basis_proj_pt_coords_grads( const unsigned int pNumNeighbors, const float* __restrict__ pInPtsGPUPtr, const float* __restrict__ pInSamplesGPUPtr, const float* __restrict__ pInInvRadiiGPUPtr, const int2* __restrict__ pInNeighborsGPUPtr, const float* __restrict__ pInPDFsGPUPtr, const float* __restrict__ pInXNeighValsGPUPtr, const float* __restrict__ pInBasisGPUPtr, const float* __restrict__ pInGradsGPUPtr, float* __restrict__ pOutBasisGradsGPUPtr, float* __restrict__ pOutPtsGradsGPUPtr, float* __restrict__ pOutSampleGradsGPUPtr, float* __restrict__ pOutPDFGradsGPUPtr, float* __restrict__ pOutXNeighValsGradsGPUPtr) { //Shared memory to store the kernel points. extern __shared__ float sharedMem[]; //Create the struct to compute the activation function. mccnn::activation_function_struct<A> acFunc; //Create the struct to compute point gradients. accum_pt_grads<D, K, U, P> ptGrads; //Compute usefull indices. int totalExecThreads = pNumNeighbors*K; totalExecThreads += (totalExecThreads%blockDim.x != 0)? blockDim.x-totalExecThreads%blockDim.x:0; int groupId = threadIdx.x/K; int kpIndex = threadIdx.x%K; int groupsXBlock = blockDim.x/K; //Get the pointers to shared memory. float* kernelPts = sharedMem; float* accumGrads = &sharedMem[K*(D+U+1)]; float* sharedPtDiffs = &sharedMem[K*(D+U+1) + blockDim.x*(D+U+1)]; float* accumPtGrads = &sharedMem[K*(D+U+1) + blockDim.x*(D+U+1) + groupsXBlock*(D+U)]; //Load the kernel point centers. #pragma unroll(2) for(int i = threadIdx.x; i < K*(D+U+1); i+=blockDim.x) kernelPts[i] = pInBasisGPUPtr[i]; #pragma unroll for(int i = 0; i < D+U+1; ++i) accumGrads[i*blockDim.x + threadIdx.x] = 0.0f; //Get usefull indices. const int initThreadIndex = mccnn::compute_global_index_gpu_funct(); const int totalNumThreads = mccnn::compute_total_threads_gpu_funct(); for(int curIter = initThreadIndex; curIter < totalExecThreads; curIter += totalNumThreads) { //Get indices to the point and sample. int2 neighAndSampleIndices; int neighIndex = curIter/K; float inGradient = 0.0f; if(neighIndex < pNumNeighbors){ neighAndSampleIndices = pInNeighborsGPUPtr[neighIndex]; //Compute the pt difference. if(kpIndex < D){ sharedPtDiffs[groupId*(D+U) + kpIndex] = (pInPtsGPUPtr[neighAndSampleIndices.x*D + kpIndex] - pInSamplesGPUPtr[neighAndSampleIndices.y*D + kpIndex])* pInInvRadiiGPUPtr[kpIndex]; }else if(kpIndex < D+U){ sharedPtDiffs[groupId*(D+U) + kpIndex] = pInXNeighValsGPUPtr[neighIndex*U + kpIndex - D]; } //Get the gradient. inGradient = pInGradsGPUPtr[neighIndex*K + kpIndex]; } __syncthreads(); if(neighIndex < pNumNeighbors){ //Compute the pdf inverse. float invPdf = 1.0f/(pInPDFsGPUPtr[neighIndex]); //Compute the projection of each basis. float sum = 0.0f; #pragma unroll for(int j = 0; j < D+U; ++j) sum += kernelPts[kpIndex*(D+U+1) + j]*sharedPtDiffs[groupId*(D+U) + j]; sum += kernelPts[kpIndex*(D+U+1) + D + U]; float value = acFunc.forward(sum); //Compute the gradient before the projection. float curInGradient = inGradient * acFunc.backward(value) * invPdf; //Compute the gradients //TODO - Add kahan summation, but requires more shared memory. #pragma unroll for(int j = 0; j < D+U; ++j){ accumGrads[threadIdx.x + j*blockDim.x] += sharedPtDiffs[groupId*(D+U) + j]*curInGradient; if (j < D){ accumPtGrads[threadIdx.x + j*blockDim.x] = pInInvRadiiGPUPtr[j]*curInGradient*kernelPts[kpIndex*(D+U+1) + j]; accumPtGrads[threadIdx.x + (D+U+j)*blockDim.x] = -pInInvRadiiGPUPtr[j]*curInGradient*kernelPts[kpIndex*(D+U+1) + j]; }else{ accumPtGrads[threadIdx.x + j*blockDim.x] = curInGradient* kernelPts[kpIndex*(D+U+1) + j]; } } accumGrads[threadIdx.x + (D+U)*blockDim.x] += curInGradient;//Bias accumPtGrads[threadIdx.x + (D*2+U)*blockDim.x] = -value*invPdf*invPdf*inGradient;//PDF } __syncthreads(); if(neighIndex < pNumNeighbors && kpIndex < (D*2+U+1)){ ptGrads.accumulate(kpIndex, &accumPtGrads[groupId*K], &pOutPtsGradsGPUPtr[neighAndSampleIndices.x*D], &pOutSampleGradsGPUPtr[neighAndSampleIndices.y*D], &pOutPDFGradsGPUPtr[neighIndex], &pOutXNeighValsGradsGPUPtr[neighIndex*U]); } __syncthreads(); } //Save the gradient into memory. for(int i = threadIdx.x; i < K*(D+U+1); i+=blockDim.x){ int dimension = i/K; int kpoint = i%K; float accumVal = 0.0f; #pragma unroll(2) for(int j = 0; j < groupsXBlock; ++j){ accumVal += accumGrads[dimension*blockDim.x + j*K + kpoint]; } atomicAdd(&pOutBasisGradsGPUPtr[kpoint*(D+U+1) + dimension], accumVal); /*if(initThreadIndex < 64 && kpoint < 2){ printf("%f %d %d | ", accumVal, kpoint, dimension); }*/ } } /////////////////// CLASS DEFINITION namespace mccnn{ template<int D, int K, int U> HProjBilateralBasis<D, K, U>::HProjBilateralBasis( HProjBilateralBasis::ActivationFunction pAcFunc) :BasisInterface<D, K, U>(), acFunc_(pAcFunc) { } template<int D, int K, int U> HProjBilateralBasis<D, K, U>::~HProjBilateralBasis(void) { } template<int D, int K, int U> void HProjBilateralBasis<D, K, U>::compute_basis_proj_pt_coords( std::unique_ptr<IGPUDevice>& pDevice, const unsigned int pNumNeighbors, const float* pInPtsGPUPtr, const float* pInSamplesGPUPtr, const float* pInInvRadiiGPUPtr, const int* pInNeighborsGPUPtr, const float* pInPDFsGPUPtr, const float* pInXNeighValsGPUPtr, const float* pInBasisGPUPtr, float* pOutProjGPUPtr) { //Get the device properties. const GpuDeviceProperties& gpuProps = pDevice->get_device_properties(); //Get information of the Device. unsigned int numMP = gpuProps.numMPs_; //Get the cuda stream. auto cudaStream = pDevice->getCUDAStream(); //Define the block size. unsigned int blockSize = 64; //Get the current function pointer. const void* cFunct = nullptr; if(acFunc_ == HProjBilateralBasis<D, K, U>::ActivationFunction::RELU){ cFunct = (const void*)compute_hproj_bilateral_basis_proj_pt_coords<D, K, U, 0>; }else if(acFunc_ == HProjBilateralBasis<D, K, U>::ActivationFunction::LRELU){ cFunct = (const void*)compute_hproj_bilateral_basis_proj_pt_coords<D, K, U, 1>; }else if(acFunc_ == HProjBilateralBasis<D, K, U>::ActivationFunction::ELU){ cFunct = (const void*)compute_hproj_bilateral_basis_proj_pt_coords<D, K, U, 2>; }else if(acFunc_ == HProjBilateralBasis<D, K, U>::ActivationFunction::EXP){ cFunct = (const void*)compute_hproj_bilateral_basis_proj_pt_coords<D, K, U, 3>; } #ifdef DEBUG_INFO hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, cudaStream); #endif //Calculate the shared memory needed. unsigned int sharedMemSize = (K*(D+U+1)*sizeof(float)); //Compute the number of blocks unsigned int numBlocks = pDevice->get_max_active_block_x_sm( blockSize, cFunct, sharedMemSize); pDevice->check_error(__FILE__, __LINE__); //Calculate the total number of blocks to execute. unsigned int execBlocks = pNumNeighbors/blockSize; execBlocks += (pNumNeighbors%blockSize != 0)?1:0; unsigned int totalNumBlocks = numMP*numBlocks; totalNumBlocks = (totalNumBlocks > execBlocks)?execBlocks:totalNumBlocks; //Execute the kernel extensions. if(acFunc_ == HProjBilateralBasis<D, K, U>::ActivationFunction::RELU){ hipLaunchKernelGGL(( compute_hproj_bilateral_basis_proj_pt_coords<D, K, U, 0>) , dim3(totalNumBlocks), dim3(blockSize), sharedMemSize, cudaStream, pNumNeighbors, (const fpoint<D>*)pInPtsGPUPtr, (const fpoint<D>*)pInSamplesGPUPtr, (const fpoint<D>*)pInInvRadiiGPUPtr, (const int2*)pInNeighborsGPUPtr, pInPDFsGPUPtr, pInXNeighValsGPUPtr, pInBasisGPUPtr, pOutProjGPUPtr); }else if(acFunc_ == HProjBilateralBasis<D, K, U>::ActivationFunction::LRELU){ hipLaunchKernelGGL(( compute_hproj_bilateral_basis_proj_pt_coords<D, K, U, 1>) , dim3(totalNumBlocks), dim3(blockSize), sharedMemSize, cudaStream, pNumNeighbors, (const fpoint<D>*)pInPtsGPUPtr, (const fpoint<D>*)pInSamplesGPUPtr, (const fpoint<D>*)pInInvRadiiGPUPtr, (const int2*)pInNeighborsGPUPtr, pInPDFsGPUPtr, pInXNeighValsGPUPtr, pInBasisGPUPtr, pOutProjGPUPtr); }else if(acFunc_ == HProjBilateralBasis<D, K, U>::ActivationFunction::ELU){ hipLaunchKernelGGL(( compute_hproj_bilateral_basis_proj_pt_coords<D, K, U, 2>) , dim3(totalNumBlocks), dim3(blockSize), sharedMemSize, cudaStream, pNumNeighbors, (const fpoint<D>*)pInPtsGPUPtr, (const fpoint<D>*)pInSamplesGPUPtr, (const fpoint<D>*)pInInvRadiiGPUPtr, (const int2*)pInNeighborsGPUPtr, pInPDFsGPUPtr, pInXNeighValsGPUPtr, pInBasisGPUPtr, pOutProjGPUPtr); }else if(acFunc_ == HProjBilateralBasis<D, K, U>::ActivationFunction::EXP){ hipLaunchKernelGGL(( compute_hproj_bilateral_basis_proj_pt_coords<D, K, U, 3>) , dim3(totalNumBlocks), dim3(blockSize), sharedMemSize, cudaStream, pNumNeighbors, (const fpoint<D>*)pInPtsGPUPtr, (const fpoint<D>*)pInSamplesGPUPtr, (const fpoint<D>*)pInInvRadiiGPUPtr, (const int2*)pInNeighborsGPUPtr, pInPDFsGPUPtr, pInXNeighValsGPUPtr, pInBasisGPUPtr, pOutProjGPUPtr); } pDevice->check_error(__FILE__, __LINE__); #ifdef DEBUG_INFO hipEventRecord(stop, cudaStream); hipEventSynchronize(stop); float milliseconds = 0.0f; hipEventElapsedTime(&milliseconds, start, stop); struct hipFuncAttributes funcAttrib; hipFuncGetAttributes(&funcAttrib, cFunct); float gpuOccupancy = (float)(numBlocks*blockSize)/(float)gpuProps.maxThreadsXMP_; fprintf(stderr, "### HPROJ BILATERAL BASIS PROJ ###\n"); fprintf(stderr, "Num basis: %d\n", K); fprintf(stderr, "Local memory: %d\n", (int)funcAttrib.localSizeBytes); fprintf(stderr, "Constant memory: %d\n", (int)funcAttrib.constSizeBytes); fprintf(stderr, "Num reg kernel: %d\n", funcAttrib.numRegs); fprintf(stderr, "Shared memory kernel: %d\n", sharedMemSize); fprintf(stderr, "Num neighbors: %d\n", pNumNeighbors); fprintf(stderr, "Occupancy: %f\n", gpuOccupancy); fprintf(stderr, "Execution time: %f\n", milliseconds); fprintf(stderr, "\n"); #endif } template<int D, int K, int U> void HProjBilateralBasis<D, K, U>::compute_grads_basis_proj_pt_coords( std::unique_ptr<IGPUDevice>& pDevice, const unsigned int pNumNeighbors, const float* pInPtsGPUPtr, const float* pInSamplesGPUPtr, const float* pInInvRadiiGPUPtr, const int* pInNeighborsGPUPtr, const float* pInPDFsGPUPtr, const float* pInXNeighValsGPUPtr, const float* pInBasisGPUPtr, const float* pInGradsGPUPtr, float* pOutBasisGradsGPUPtr, float* pOutPtsGradsGPUPtr, float* pOutSampleGradsGPUPtr, float* pOutPDFGradsGPUPtr, float* pOutXNeighGradsGPUPtr) { //Check if the gradietns of the points should be computed. bool pointGrads = (pOutPtsGradsGPUPtr != nullptr) && (pOutSampleGradsGPUPtr != nullptr) && (pOutPDFGradsGPUPtr != nullptr); //Get the device properties. const GpuDeviceProperties& gpuProps = pDevice->get_device_properties(); //Get information of the Device. unsigned int numMP = gpuProps.numMPs_; //Get the cuda stream. auto cudaStream = pDevice->getCUDAStream(); //Define the block size. unsigned int blockSize = 64; //Get the current function pointer. const void* cFunct = nullptr; if(acFunc_ == HProjBilateralBasis<D, K, U>::ActivationFunction::RELU){ if(pointGrads) cFunct = (const void*)compute_hproj_bilateral_basis_proj_pt_coords_grads<D, K, U, 0, true>; else cFunct = (const void*)compute_hproj_bilateral_basis_proj_pt_coords_grads<D, K, U, 0, false>; }else if(acFunc_ == HProjBilateralBasis<D, K, U>::ActivationFunction::LRELU){ if(pointGrads) cFunct = (const void*)compute_hproj_bilateral_basis_proj_pt_coords_grads<D, K, U, 1, true>; else cFunct = (const void*)compute_hproj_bilateral_basis_proj_pt_coords_grads<D, K, U, 1, false>; }else if(acFunc_ == HProjBilateralBasis<D, K, U>::ActivationFunction::ELU){ if(pointGrads) cFunct = (const void*)compute_hproj_bilateral_basis_proj_pt_coords_grads<D, K, U, 2, true>; else cFunct = (const void*)compute_hproj_bilateral_basis_proj_pt_coords_grads<D, K, U, 2, false>; }else if(acFunc_ == HProjBilateralBasis<D, K, U>::ActivationFunction::EXP){ if(pointGrads) cFunct = (const void*)compute_hproj_bilateral_basis_proj_pt_coords_grads<D, K, U, 3, true>; else cFunct = (const void*)compute_hproj_bilateral_basis_proj_pt_coords_grads<D, K, U, 3, false>; } #ifdef DEBUG_INFO hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, cudaStream); #endif //Calculate the shared memory needed. unsigned int sharedMemSize = ((K + blockSize)*(D+U+1) + (blockSize/K)*(D+U) + blockSize*(D*2+ U + 1))*sizeof(float); //Compute the number of blocks unsigned int numBlocks = pDevice->get_max_active_block_x_sm( blockSize, cFunct, sharedMemSize); pDevice->check_error(__FILE__, __LINE__); //Calculate the total number of blocks to execute. unsigned int execBlocks = (pNumNeighbors*K)/blockSize; execBlocks += ((pNumNeighbors*K)%blockSize != 0)?1:0; unsigned int totalNumBlocks = numMP*numBlocks; totalNumBlocks = (totalNumBlocks > execBlocks)?execBlocks:totalNumBlocks; //Execute the kernel extensions. if(acFunc_ == HProjBilateralBasis<D, K, U>::ActivationFunction::RELU){ if(pointGrads){ hipLaunchKernelGGL(( compute_hproj_bilateral_basis_proj_pt_coords_grads<D, K, U, 0, true>) , dim3(totalNumBlocks), dim3(blockSize), sharedMemSize, cudaStream, pNumNeighbors, pInPtsGPUPtr, pInSamplesGPUPtr, pInInvRadiiGPUPtr, (const int2*)pInNeighborsGPUPtr, pInPDFsGPUPtr, pInXNeighValsGPUPtr, pInBasisGPUPtr, pInGradsGPUPtr, pOutBasisGradsGPUPtr, pOutPtsGradsGPUPtr, pOutSampleGradsGPUPtr, pOutPDFGradsGPUPtr, pOutXNeighGradsGPUPtr); }else{ hipLaunchKernelGGL(( compute_hproj_bilateral_basis_proj_pt_coords_grads<D, K, U, 0, false>) , dim3(totalNumBlocks), dim3(blockSize), sharedMemSize, cudaStream, pNumNeighbors, pInPtsGPUPtr, pInSamplesGPUPtr, pInInvRadiiGPUPtr, (const int2*)pInNeighborsGPUPtr, pInPDFsGPUPtr, pInXNeighValsGPUPtr, pInBasisGPUPtr, pInGradsGPUPtr, pOutBasisGradsGPUPtr, pOutPtsGradsGPUPtr, pOutSampleGradsGPUPtr, pOutPDFGradsGPUPtr, pOutXNeighGradsGPUPtr); } }else if(acFunc_ == HProjBilateralBasis<D, K, U>::ActivationFunction::LRELU){ if(pointGrads){ hipLaunchKernelGGL(( compute_hproj_bilateral_basis_proj_pt_coords_grads<D, K, U, 1, true>) , dim3(totalNumBlocks), dim3(blockSize), sharedMemSize, cudaStream, pNumNeighbors, pInPtsGPUPtr, pInSamplesGPUPtr, pInInvRadiiGPUPtr, (const int2*)pInNeighborsGPUPtr, pInPDFsGPUPtr, pInXNeighValsGPUPtr, pInBasisGPUPtr, pInGradsGPUPtr, pOutBasisGradsGPUPtr, pOutPtsGradsGPUPtr, pOutSampleGradsGPUPtr, pOutPDFGradsGPUPtr, pOutXNeighGradsGPUPtr); }else{ hipLaunchKernelGGL(( compute_hproj_bilateral_basis_proj_pt_coords_grads<D, K, U, 1, false>) , dim3(totalNumBlocks), dim3(blockSize), sharedMemSize, cudaStream, pNumNeighbors, pInPtsGPUPtr, pInSamplesGPUPtr, pInInvRadiiGPUPtr, (const int2*)pInNeighborsGPUPtr, pInPDFsGPUPtr, pInXNeighValsGPUPtr, pInBasisGPUPtr, pInGradsGPUPtr, pOutBasisGradsGPUPtr, pOutPtsGradsGPUPtr, pOutSampleGradsGPUPtr, pOutPDFGradsGPUPtr, pOutXNeighGradsGPUPtr); } }else if(acFunc_ == HProjBilateralBasis<D, K, U>::ActivationFunction::ELU){ if(pointGrads){ hipLaunchKernelGGL(( compute_hproj_bilateral_basis_proj_pt_coords_grads<D, K, U, 2, true>) , dim3(totalNumBlocks), dim3(blockSize), sharedMemSize, cudaStream, pNumNeighbors, pInPtsGPUPtr, pInSamplesGPUPtr, pInInvRadiiGPUPtr, (const int2*)pInNeighborsGPUPtr, pInPDFsGPUPtr, pInXNeighValsGPUPtr, pInBasisGPUPtr, pInGradsGPUPtr, pOutBasisGradsGPUPtr, pOutPtsGradsGPUPtr, pOutSampleGradsGPUPtr, pOutPDFGradsGPUPtr, pOutXNeighGradsGPUPtr); }else{ hipLaunchKernelGGL(( compute_hproj_bilateral_basis_proj_pt_coords_grads<D, K, U, 2, false>) , dim3(totalNumBlocks), dim3(blockSize), sharedMemSize, cudaStream, pNumNeighbors, pInPtsGPUPtr, pInSamplesGPUPtr, pInInvRadiiGPUPtr, (const int2*)pInNeighborsGPUPtr, pInPDFsGPUPtr, pInXNeighValsGPUPtr, pInBasisGPUPtr, pInGradsGPUPtr, pOutBasisGradsGPUPtr, pOutPtsGradsGPUPtr, pOutSampleGradsGPUPtr, pOutPDFGradsGPUPtr, pOutXNeighGradsGPUPtr); } }else if(acFunc_ == HProjBilateralBasis<D, K, U>::ActivationFunction::EXP){ if(pointGrads){ hipLaunchKernelGGL(( compute_hproj_bilateral_basis_proj_pt_coords_grads<D, K, U, 3, true>) , dim3(totalNumBlocks), dim3(blockSize), sharedMemSize, cudaStream, pNumNeighbors, pInPtsGPUPtr, pInSamplesGPUPtr, pInInvRadiiGPUPtr, (const int2*)pInNeighborsGPUPtr, pInPDFsGPUPtr, pInXNeighValsGPUPtr, pInBasisGPUPtr, pInGradsGPUPtr, pOutBasisGradsGPUPtr, pOutPtsGradsGPUPtr, pOutSampleGradsGPUPtr, pOutPDFGradsGPUPtr, pOutXNeighGradsGPUPtr); }else{ hipLaunchKernelGGL(( compute_hproj_bilateral_basis_proj_pt_coords_grads<D, K, U, 3, false>) , dim3(totalNumBlocks), dim3(blockSize), sharedMemSize, cudaStream, pNumNeighbors, pInPtsGPUPtr, pInSamplesGPUPtr, pInInvRadiiGPUPtr, (const int2*)pInNeighborsGPUPtr, pInPDFsGPUPtr, pInXNeighValsGPUPtr, pInBasisGPUPtr, pInGradsGPUPtr, pOutBasisGradsGPUPtr, pOutPtsGradsGPUPtr, pOutSampleGradsGPUPtr, pOutPDFGradsGPUPtr, pOutXNeighGradsGPUPtr); } } pDevice->check_error(__FILE__, __LINE__); #ifdef DEBUG_INFO hipEventRecord(stop, cudaStream); hipEventSynchronize(stop); float milliseconds = 0.0f; hipEventElapsedTime(&milliseconds, start, stop); struct hipFuncAttributes funcAttrib; hipFuncGetAttributes(&funcAttrib, cFunct); float gpuOccupancy = (float)(numBlocks*blockSize)/(float)gpuProps.maxThreadsXMP_; fprintf(stderr, "### HPROJ BILATERAL BASIS PROJ GRADS ###\n"); fprintf(stderr, "Num basis: %d\n", K); fprintf(stderr, "Local memory: %d\n", (int)funcAttrib.localSizeBytes); fprintf(stderr, "Constant memory: %d\n", (int)funcAttrib.constSizeBytes); fprintf(stderr, "Num reg kernel: %d\n", funcAttrib.numRegs); fprintf(stderr, "Shared memory kernel: %d\n", sharedMemSize); fprintf(stderr, "Num neighbors: %d\n", pNumNeighbors); fprintf(stderr, "Occupancy: %f\n", gpuOccupancy); fprintf(stderr, "Execution time: %f\n", milliseconds); fprintf(stderr, "\n"); #endif } } //DECLARE THE VALID INSTANCES OF THE TEMPLATE CLASS #define HPROJ_BILATERAL_BASIS_CLASS_DECL(D, K, U) \ template class mccnn::HProjBilateralBasis<D, K, U>; DECLARE_TEMPLATE_DIMS_BASIS(HPROJ_BILATERAL_BASIS_CLASS_DECL)
e971d9482d9802edf7ee8c8d5c0a21398d580ed8.cu
///////////////////////////////////////////////////////////////////////////// /// \file basis_hproj_bilateral.cuh /// /// \brief /// /// \copyright Copyright (c) 2020 Visual Computing group of Ulm University, /// Germany. See the LICENSE file at the top-level directory of /// this distribution. /// /// \author pedro hermosilla (pedro-1.hermosilla-casajus@uni-ulm.de) ///////////////////////////////////////////////////////////////////////////// #include "defines.hpp" #include "cuda_kernel_utils.cuh" #include "math_helper.cuh" #include "nn_utils.cuh" #include "basis/basis_hproj_bilateral.cuh" #include "basis/basis_utils.cuh" template<int D, int K, int U, int A> __global__ void compute_hproj_bilateral_basis_proj_pt_coords( const unsigned int pNumNeighbors, const mccnn::fpoint<D>* __restrict__ pInPtsGPUPtr, const mccnn::fpoint<D>* __restrict__ pInSamplesGPUPtr, const mccnn::fpoint<D>* __restrict__ pInInvRadiiGPUPtr, const int2* __restrict__ pInNeighborsGPUPtr, const float* __restrict__ pInPDFsGPUPtr, const float* __restrict__ pInXNeighValsGPUPtr, const float* __restrict__ pInBasisGPUPtr, float* __restrict__ pOutProjGPUPtr) { //Shared memory to store the kernel points. extern __shared__ float kernelPts[]; //Create the struct to compute the activation function. mccnn::activation_function_struct<A> acFunc; //Load the kernel point centers. #pragma unroll(2) for(int i = threadIdx.x; i < K*(D+U+1); i+=blockDim.x) kernelPts[i] = pInBasisGPUPtr[i]; __syncthreads(); //Get usefull indices. const unsigned int initThreadIndex = mccnn::compute_global_index_gpu_funct(); const unsigned int totalNumThreads = mccnn::compute_total_threads_gpu_funct(); for(unsigned int curIter = initThreadIndex; curIter < pNumNeighbors; curIter += totalNumThreads) { //Get indices to the point and sample. int2 neighAndSampleIndices = pInNeighborsGPUPtr[curIter]; //Compute the pt difference. mccnn::fpoint<D> ptDiff = (pInPtsGPUPtr[neighAndSampleIndices.x] - pInSamplesGPUPtr[neighAndSampleIndices.y])*pInInvRadiiGPUPtr[0]; //Compute the pdf inverse. float weightVal = 1.0f/(pInPDFsGPUPtr[curIter]); //Compute the projection of each basis. for(int i = 0; i < K; ++i){ float sum = 0.0f; #pragma unroll for(int j = 0; j < D; ++j) sum += kernelPts[i*(D+U+1) + j]*ptDiff[j]; #pragma unroll for(int j = 0; j < U; ++j) sum += kernelPts[i*(D+U+1) + D + j]* pInXNeighValsGPUPtr[curIter*U + j]; sum += kernelPts[i*(D+U+1) + D + U]; pOutProjGPUPtr[curIter*K + i] = acFunc.forward(sum)*weightVal; } } } /** * Template to accumulate the point gradients. */ template<int D, int K, int U, bool P> struct accum_pt_grads{ __forceinline__ __device__ void accumulate( const int pOffset, const float* pSharedMem, float* __restrict__ pOutPtGrads, float* __restrict__ pOutSampleGrads, float* __restrict__ pOutPDFGrads, float* __restrict__ pXNeighValGrads){} }; template<int D, int K, int U> struct accum_pt_grads<D, K, U, true>{ __forceinline__ __device__ void accumulate( const int pOffset, const float* __restrict__ pSharedMem, float* __restrict__ pOutPtGrads, float* __restrict__ pOutSampleGrads, float* __restrict__ pOutPDFGrads, float* __restrict__ pXNeighValGrads){ float accumVal = 0.0f; #pragma unroll for(int j = 0; j < K; ++j){ accumVal += pSharedMem[pOffset*blockDim.x + j]; } if(pOffset < D) atomicAdd(&pOutPtGrads[pOffset], accumVal); else if(pOffset < D+U) atomicAdd(&pXNeighValGrads[pOffset - D], accumVal); else if(pOffset < (D*2+U)) atomicAdd(&pOutSampleGrads[pOffset - (D+U)], accumVal); else pOutPDFGrads[0] = accumVal; } }; template<int D, int K, int U, int A, bool P> __global__ void compute_hproj_bilateral_basis_proj_pt_coords_grads( const unsigned int pNumNeighbors, const float* __restrict__ pInPtsGPUPtr, const float* __restrict__ pInSamplesGPUPtr, const float* __restrict__ pInInvRadiiGPUPtr, const int2* __restrict__ pInNeighborsGPUPtr, const float* __restrict__ pInPDFsGPUPtr, const float* __restrict__ pInXNeighValsGPUPtr, const float* __restrict__ pInBasisGPUPtr, const float* __restrict__ pInGradsGPUPtr, float* __restrict__ pOutBasisGradsGPUPtr, float* __restrict__ pOutPtsGradsGPUPtr, float* __restrict__ pOutSampleGradsGPUPtr, float* __restrict__ pOutPDFGradsGPUPtr, float* __restrict__ pOutXNeighValsGradsGPUPtr) { //Shared memory to store the kernel points. extern __shared__ float sharedMem[]; //Create the struct to compute the activation function. mccnn::activation_function_struct<A> acFunc; //Create the struct to compute point gradients. accum_pt_grads<D, K, U, P> ptGrads; //Compute usefull indices. int totalExecThreads = pNumNeighbors*K; totalExecThreads += (totalExecThreads%blockDim.x != 0)? blockDim.x-totalExecThreads%blockDim.x:0; int groupId = threadIdx.x/K; int kpIndex = threadIdx.x%K; int groupsXBlock = blockDim.x/K; //Get the pointers to shared memory. float* kernelPts = sharedMem; float* accumGrads = &sharedMem[K*(D+U+1)]; float* sharedPtDiffs = &sharedMem[K*(D+U+1) + blockDim.x*(D+U+1)]; float* accumPtGrads = &sharedMem[K*(D+U+1) + blockDim.x*(D+U+1) + groupsXBlock*(D+U)]; //Load the kernel point centers. #pragma unroll(2) for(int i = threadIdx.x; i < K*(D+U+1); i+=blockDim.x) kernelPts[i] = pInBasisGPUPtr[i]; #pragma unroll for(int i = 0; i < D+U+1; ++i) accumGrads[i*blockDim.x + threadIdx.x] = 0.0f; //Get usefull indices. const int initThreadIndex = mccnn::compute_global_index_gpu_funct(); const int totalNumThreads = mccnn::compute_total_threads_gpu_funct(); for(int curIter = initThreadIndex; curIter < totalExecThreads; curIter += totalNumThreads) { //Get indices to the point and sample. int2 neighAndSampleIndices; int neighIndex = curIter/K; float inGradient = 0.0f; if(neighIndex < pNumNeighbors){ neighAndSampleIndices = pInNeighborsGPUPtr[neighIndex]; //Compute the pt difference. if(kpIndex < D){ sharedPtDiffs[groupId*(D+U) + kpIndex] = (pInPtsGPUPtr[neighAndSampleIndices.x*D + kpIndex] - pInSamplesGPUPtr[neighAndSampleIndices.y*D + kpIndex])* pInInvRadiiGPUPtr[kpIndex]; }else if(kpIndex < D+U){ sharedPtDiffs[groupId*(D+U) + kpIndex] = pInXNeighValsGPUPtr[neighIndex*U + kpIndex - D]; } //Get the gradient. inGradient = pInGradsGPUPtr[neighIndex*K + kpIndex]; } __syncthreads(); if(neighIndex < pNumNeighbors){ //Compute the pdf inverse. float invPdf = 1.0f/(pInPDFsGPUPtr[neighIndex]); //Compute the projection of each basis. float sum = 0.0f; #pragma unroll for(int j = 0; j < D+U; ++j) sum += kernelPts[kpIndex*(D+U+1) + j]*sharedPtDiffs[groupId*(D+U) + j]; sum += kernelPts[kpIndex*(D+U+1) + D + U]; float value = acFunc.forward(sum); //Compute the gradient before the projection. float curInGradient = inGradient * acFunc.backward(value) * invPdf; //Compute the gradients //TODO - Add kahan summation, but requires more shared memory. #pragma unroll for(int j = 0; j < D+U; ++j){ accumGrads[threadIdx.x + j*blockDim.x] += sharedPtDiffs[groupId*(D+U) + j]*curInGradient; if (j < D){ accumPtGrads[threadIdx.x + j*blockDim.x] = pInInvRadiiGPUPtr[j]*curInGradient*kernelPts[kpIndex*(D+U+1) + j]; accumPtGrads[threadIdx.x + (D+U+j)*blockDim.x] = -pInInvRadiiGPUPtr[j]*curInGradient*kernelPts[kpIndex*(D+U+1) + j]; }else{ accumPtGrads[threadIdx.x + j*blockDim.x] = curInGradient* kernelPts[kpIndex*(D+U+1) + j]; } } accumGrads[threadIdx.x + (D+U)*blockDim.x] += curInGradient;//Bias accumPtGrads[threadIdx.x + (D*2+U)*blockDim.x] = -value*invPdf*invPdf*inGradient;//PDF } __syncthreads(); if(neighIndex < pNumNeighbors && kpIndex < (D*2+U+1)){ ptGrads.accumulate(kpIndex, &accumPtGrads[groupId*K], &pOutPtsGradsGPUPtr[neighAndSampleIndices.x*D], &pOutSampleGradsGPUPtr[neighAndSampleIndices.y*D], &pOutPDFGradsGPUPtr[neighIndex], &pOutXNeighValsGradsGPUPtr[neighIndex*U]); } __syncthreads(); } //Save the gradient into memory. for(int i = threadIdx.x; i < K*(D+U+1); i+=blockDim.x){ int dimension = i/K; int kpoint = i%K; float accumVal = 0.0f; #pragma unroll(2) for(int j = 0; j < groupsXBlock; ++j){ accumVal += accumGrads[dimension*blockDim.x + j*K + kpoint]; } atomicAdd(&pOutBasisGradsGPUPtr[kpoint*(D+U+1) + dimension], accumVal); /*if(initThreadIndex < 64 && kpoint < 2){ printf("%f %d %d | ", accumVal, kpoint, dimension); }*/ } } /////////////////// CLASS DEFINITION namespace mccnn{ template<int D, int K, int U> HProjBilateralBasis<D, K, U>::HProjBilateralBasis( HProjBilateralBasis::ActivationFunction pAcFunc) :BasisInterface<D, K, U>(), acFunc_(pAcFunc) { } template<int D, int K, int U> HProjBilateralBasis<D, K, U>::~HProjBilateralBasis(void) { } template<int D, int K, int U> void HProjBilateralBasis<D, K, U>::compute_basis_proj_pt_coords( std::unique_ptr<IGPUDevice>& pDevice, const unsigned int pNumNeighbors, const float* pInPtsGPUPtr, const float* pInSamplesGPUPtr, const float* pInInvRadiiGPUPtr, const int* pInNeighborsGPUPtr, const float* pInPDFsGPUPtr, const float* pInXNeighValsGPUPtr, const float* pInBasisGPUPtr, float* pOutProjGPUPtr) { //Get the device properties. const GpuDeviceProperties& gpuProps = pDevice->get_device_properties(); //Get information of the Device. unsigned int numMP = gpuProps.numMPs_; //Get the cuda stream. auto cudaStream = pDevice->getCUDAStream(); //Define the block size. unsigned int blockSize = 64; //Get the current function pointer. const void* cFunct = nullptr; if(acFunc_ == HProjBilateralBasis<D, K, U>::ActivationFunction::RELU){ cFunct = (const void*)compute_hproj_bilateral_basis_proj_pt_coords<D, K, U, 0>; }else if(acFunc_ == HProjBilateralBasis<D, K, U>::ActivationFunction::LRELU){ cFunct = (const void*)compute_hproj_bilateral_basis_proj_pt_coords<D, K, U, 1>; }else if(acFunc_ == HProjBilateralBasis<D, K, U>::ActivationFunction::ELU){ cFunct = (const void*)compute_hproj_bilateral_basis_proj_pt_coords<D, K, U, 2>; }else if(acFunc_ == HProjBilateralBasis<D, K, U>::ActivationFunction::EXP){ cFunct = (const void*)compute_hproj_bilateral_basis_proj_pt_coords<D, K, U, 3>; } #ifdef DEBUG_INFO cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, cudaStream); #endif //Calculate the shared memory needed. unsigned int sharedMemSize = (K*(D+U+1)*sizeof(float)); //Compute the number of blocks unsigned int numBlocks = pDevice->get_max_active_block_x_sm( blockSize, cFunct, sharedMemSize); pDevice->check_error(__FILE__, __LINE__); //Calculate the total number of blocks to execute. unsigned int execBlocks = pNumNeighbors/blockSize; execBlocks += (pNumNeighbors%blockSize != 0)?1:0; unsigned int totalNumBlocks = numMP*numBlocks; totalNumBlocks = (totalNumBlocks > execBlocks)?execBlocks:totalNumBlocks; //Execute the kernel extensions. if(acFunc_ == HProjBilateralBasis<D, K, U>::ActivationFunction::RELU){ compute_hproj_bilateral_basis_proj_pt_coords<D, K, U, 0> <<<totalNumBlocks, blockSize, sharedMemSize, cudaStream>>>( pNumNeighbors, (const fpoint<D>*)pInPtsGPUPtr, (const fpoint<D>*)pInSamplesGPUPtr, (const fpoint<D>*)pInInvRadiiGPUPtr, (const int2*)pInNeighborsGPUPtr, pInPDFsGPUPtr, pInXNeighValsGPUPtr, pInBasisGPUPtr, pOutProjGPUPtr); }else if(acFunc_ == HProjBilateralBasis<D, K, U>::ActivationFunction::LRELU){ compute_hproj_bilateral_basis_proj_pt_coords<D, K, U, 1> <<<totalNumBlocks, blockSize, sharedMemSize, cudaStream>>>( pNumNeighbors, (const fpoint<D>*)pInPtsGPUPtr, (const fpoint<D>*)pInSamplesGPUPtr, (const fpoint<D>*)pInInvRadiiGPUPtr, (const int2*)pInNeighborsGPUPtr, pInPDFsGPUPtr, pInXNeighValsGPUPtr, pInBasisGPUPtr, pOutProjGPUPtr); }else if(acFunc_ == HProjBilateralBasis<D, K, U>::ActivationFunction::ELU){ compute_hproj_bilateral_basis_proj_pt_coords<D, K, U, 2> <<<totalNumBlocks, blockSize, sharedMemSize, cudaStream>>>( pNumNeighbors, (const fpoint<D>*)pInPtsGPUPtr, (const fpoint<D>*)pInSamplesGPUPtr, (const fpoint<D>*)pInInvRadiiGPUPtr, (const int2*)pInNeighborsGPUPtr, pInPDFsGPUPtr, pInXNeighValsGPUPtr, pInBasisGPUPtr, pOutProjGPUPtr); }else if(acFunc_ == HProjBilateralBasis<D, K, U>::ActivationFunction::EXP){ compute_hproj_bilateral_basis_proj_pt_coords<D, K, U, 3> <<<totalNumBlocks, blockSize, sharedMemSize, cudaStream>>>( pNumNeighbors, (const fpoint<D>*)pInPtsGPUPtr, (const fpoint<D>*)pInSamplesGPUPtr, (const fpoint<D>*)pInInvRadiiGPUPtr, (const int2*)pInNeighborsGPUPtr, pInPDFsGPUPtr, pInXNeighValsGPUPtr, pInBasisGPUPtr, pOutProjGPUPtr); } pDevice->check_error(__FILE__, __LINE__); #ifdef DEBUG_INFO cudaEventRecord(stop, cudaStream); cudaEventSynchronize(stop); float milliseconds = 0.0f; cudaEventElapsedTime(&milliseconds, start, stop); struct cudaFuncAttributes funcAttrib; cudaFuncGetAttributes(&funcAttrib, cFunct); float gpuOccupancy = (float)(numBlocks*blockSize)/(float)gpuProps.maxThreadsXMP_; fprintf(stderr, "### HPROJ BILATERAL BASIS PROJ ###\n"); fprintf(stderr, "Num basis: %d\n", K); fprintf(stderr, "Local memory: %d\n", (int)funcAttrib.localSizeBytes); fprintf(stderr, "Constant memory: %d\n", (int)funcAttrib.constSizeBytes); fprintf(stderr, "Num reg kernel: %d\n", funcAttrib.numRegs); fprintf(stderr, "Shared memory kernel: %d\n", sharedMemSize); fprintf(stderr, "Num neighbors: %d\n", pNumNeighbors); fprintf(stderr, "Occupancy: %f\n", gpuOccupancy); fprintf(stderr, "Execution time: %f\n", milliseconds); fprintf(stderr, "\n"); #endif } template<int D, int K, int U> void HProjBilateralBasis<D, K, U>::compute_grads_basis_proj_pt_coords( std::unique_ptr<IGPUDevice>& pDevice, const unsigned int pNumNeighbors, const float* pInPtsGPUPtr, const float* pInSamplesGPUPtr, const float* pInInvRadiiGPUPtr, const int* pInNeighborsGPUPtr, const float* pInPDFsGPUPtr, const float* pInXNeighValsGPUPtr, const float* pInBasisGPUPtr, const float* pInGradsGPUPtr, float* pOutBasisGradsGPUPtr, float* pOutPtsGradsGPUPtr, float* pOutSampleGradsGPUPtr, float* pOutPDFGradsGPUPtr, float* pOutXNeighGradsGPUPtr) { //Check if the gradietns of the points should be computed. bool pointGrads = (pOutPtsGradsGPUPtr != nullptr) && (pOutSampleGradsGPUPtr != nullptr) && (pOutPDFGradsGPUPtr != nullptr); //Get the device properties. const GpuDeviceProperties& gpuProps = pDevice->get_device_properties(); //Get information of the Device. unsigned int numMP = gpuProps.numMPs_; //Get the cuda stream. auto cudaStream = pDevice->getCUDAStream(); //Define the block size. unsigned int blockSize = 64; //Get the current function pointer. const void* cFunct = nullptr; if(acFunc_ == HProjBilateralBasis<D, K, U>::ActivationFunction::RELU){ if(pointGrads) cFunct = (const void*)compute_hproj_bilateral_basis_proj_pt_coords_grads<D, K, U, 0, true>; else cFunct = (const void*)compute_hproj_bilateral_basis_proj_pt_coords_grads<D, K, U, 0, false>; }else if(acFunc_ == HProjBilateralBasis<D, K, U>::ActivationFunction::LRELU){ if(pointGrads) cFunct = (const void*)compute_hproj_bilateral_basis_proj_pt_coords_grads<D, K, U, 1, true>; else cFunct = (const void*)compute_hproj_bilateral_basis_proj_pt_coords_grads<D, K, U, 1, false>; }else if(acFunc_ == HProjBilateralBasis<D, K, U>::ActivationFunction::ELU){ if(pointGrads) cFunct = (const void*)compute_hproj_bilateral_basis_proj_pt_coords_grads<D, K, U, 2, true>; else cFunct = (const void*)compute_hproj_bilateral_basis_proj_pt_coords_grads<D, K, U, 2, false>; }else if(acFunc_ == HProjBilateralBasis<D, K, U>::ActivationFunction::EXP){ if(pointGrads) cFunct = (const void*)compute_hproj_bilateral_basis_proj_pt_coords_grads<D, K, U, 3, true>; else cFunct = (const void*)compute_hproj_bilateral_basis_proj_pt_coords_grads<D, K, U, 3, false>; } #ifdef DEBUG_INFO cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, cudaStream); #endif //Calculate the shared memory needed. unsigned int sharedMemSize = ((K + blockSize)*(D+U+1) + (blockSize/K)*(D+U) + blockSize*(D*2+ U + 1))*sizeof(float); //Compute the number of blocks unsigned int numBlocks = pDevice->get_max_active_block_x_sm( blockSize, cFunct, sharedMemSize); pDevice->check_error(__FILE__, __LINE__); //Calculate the total number of blocks to execute. unsigned int execBlocks = (pNumNeighbors*K)/blockSize; execBlocks += ((pNumNeighbors*K)%blockSize != 0)?1:0; unsigned int totalNumBlocks = numMP*numBlocks; totalNumBlocks = (totalNumBlocks > execBlocks)?execBlocks:totalNumBlocks; //Execute the kernel extensions. if(acFunc_ == HProjBilateralBasis<D, K, U>::ActivationFunction::RELU){ if(pointGrads){ compute_hproj_bilateral_basis_proj_pt_coords_grads<D, K, U, 0, true> <<<totalNumBlocks, blockSize, sharedMemSize, cudaStream>>>( pNumNeighbors, pInPtsGPUPtr, pInSamplesGPUPtr, pInInvRadiiGPUPtr, (const int2*)pInNeighborsGPUPtr, pInPDFsGPUPtr, pInXNeighValsGPUPtr, pInBasisGPUPtr, pInGradsGPUPtr, pOutBasisGradsGPUPtr, pOutPtsGradsGPUPtr, pOutSampleGradsGPUPtr, pOutPDFGradsGPUPtr, pOutXNeighGradsGPUPtr); }else{ compute_hproj_bilateral_basis_proj_pt_coords_grads<D, K, U, 0, false> <<<totalNumBlocks, blockSize, sharedMemSize, cudaStream>>>( pNumNeighbors, pInPtsGPUPtr, pInSamplesGPUPtr, pInInvRadiiGPUPtr, (const int2*)pInNeighborsGPUPtr, pInPDFsGPUPtr, pInXNeighValsGPUPtr, pInBasisGPUPtr, pInGradsGPUPtr, pOutBasisGradsGPUPtr, pOutPtsGradsGPUPtr, pOutSampleGradsGPUPtr, pOutPDFGradsGPUPtr, pOutXNeighGradsGPUPtr); } }else if(acFunc_ == HProjBilateralBasis<D, K, U>::ActivationFunction::LRELU){ if(pointGrads){ compute_hproj_bilateral_basis_proj_pt_coords_grads<D, K, U, 1, true> <<<totalNumBlocks, blockSize, sharedMemSize, cudaStream>>>( pNumNeighbors, pInPtsGPUPtr, pInSamplesGPUPtr, pInInvRadiiGPUPtr, (const int2*)pInNeighborsGPUPtr, pInPDFsGPUPtr, pInXNeighValsGPUPtr, pInBasisGPUPtr, pInGradsGPUPtr, pOutBasisGradsGPUPtr, pOutPtsGradsGPUPtr, pOutSampleGradsGPUPtr, pOutPDFGradsGPUPtr, pOutXNeighGradsGPUPtr); }else{ compute_hproj_bilateral_basis_proj_pt_coords_grads<D, K, U, 1, false> <<<totalNumBlocks, blockSize, sharedMemSize, cudaStream>>>( pNumNeighbors, pInPtsGPUPtr, pInSamplesGPUPtr, pInInvRadiiGPUPtr, (const int2*)pInNeighborsGPUPtr, pInPDFsGPUPtr, pInXNeighValsGPUPtr, pInBasisGPUPtr, pInGradsGPUPtr, pOutBasisGradsGPUPtr, pOutPtsGradsGPUPtr, pOutSampleGradsGPUPtr, pOutPDFGradsGPUPtr, pOutXNeighGradsGPUPtr); } }else if(acFunc_ == HProjBilateralBasis<D, K, U>::ActivationFunction::ELU){ if(pointGrads){ compute_hproj_bilateral_basis_proj_pt_coords_grads<D, K, U, 2, true> <<<totalNumBlocks, blockSize, sharedMemSize, cudaStream>>>( pNumNeighbors, pInPtsGPUPtr, pInSamplesGPUPtr, pInInvRadiiGPUPtr, (const int2*)pInNeighborsGPUPtr, pInPDFsGPUPtr, pInXNeighValsGPUPtr, pInBasisGPUPtr, pInGradsGPUPtr, pOutBasisGradsGPUPtr, pOutPtsGradsGPUPtr, pOutSampleGradsGPUPtr, pOutPDFGradsGPUPtr, pOutXNeighGradsGPUPtr); }else{ compute_hproj_bilateral_basis_proj_pt_coords_grads<D, K, U, 2, false> <<<totalNumBlocks, blockSize, sharedMemSize, cudaStream>>>( pNumNeighbors, pInPtsGPUPtr, pInSamplesGPUPtr, pInInvRadiiGPUPtr, (const int2*)pInNeighborsGPUPtr, pInPDFsGPUPtr, pInXNeighValsGPUPtr, pInBasisGPUPtr, pInGradsGPUPtr, pOutBasisGradsGPUPtr, pOutPtsGradsGPUPtr, pOutSampleGradsGPUPtr, pOutPDFGradsGPUPtr, pOutXNeighGradsGPUPtr); } }else if(acFunc_ == HProjBilateralBasis<D, K, U>::ActivationFunction::EXP){ if(pointGrads){ compute_hproj_bilateral_basis_proj_pt_coords_grads<D, K, U, 3, true> <<<totalNumBlocks, blockSize, sharedMemSize, cudaStream>>>( pNumNeighbors, pInPtsGPUPtr, pInSamplesGPUPtr, pInInvRadiiGPUPtr, (const int2*)pInNeighborsGPUPtr, pInPDFsGPUPtr, pInXNeighValsGPUPtr, pInBasisGPUPtr, pInGradsGPUPtr, pOutBasisGradsGPUPtr, pOutPtsGradsGPUPtr, pOutSampleGradsGPUPtr, pOutPDFGradsGPUPtr, pOutXNeighGradsGPUPtr); }else{ compute_hproj_bilateral_basis_proj_pt_coords_grads<D, K, U, 3, false> <<<totalNumBlocks, blockSize, sharedMemSize, cudaStream>>>( pNumNeighbors, pInPtsGPUPtr, pInSamplesGPUPtr, pInInvRadiiGPUPtr, (const int2*)pInNeighborsGPUPtr, pInPDFsGPUPtr, pInXNeighValsGPUPtr, pInBasisGPUPtr, pInGradsGPUPtr, pOutBasisGradsGPUPtr, pOutPtsGradsGPUPtr, pOutSampleGradsGPUPtr, pOutPDFGradsGPUPtr, pOutXNeighGradsGPUPtr); } } pDevice->check_error(__FILE__, __LINE__); #ifdef DEBUG_INFO cudaEventRecord(stop, cudaStream); cudaEventSynchronize(stop); float milliseconds = 0.0f; cudaEventElapsedTime(&milliseconds, start, stop); struct cudaFuncAttributes funcAttrib; cudaFuncGetAttributes(&funcAttrib, cFunct); float gpuOccupancy = (float)(numBlocks*blockSize)/(float)gpuProps.maxThreadsXMP_; fprintf(stderr, "### HPROJ BILATERAL BASIS PROJ GRADS ###\n"); fprintf(stderr, "Num basis: %d\n", K); fprintf(stderr, "Local memory: %d\n", (int)funcAttrib.localSizeBytes); fprintf(stderr, "Constant memory: %d\n", (int)funcAttrib.constSizeBytes); fprintf(stderr, "Num reg kernel: %d\n", funcAttrib.numRegs); fprintf(stderr, "Shared memory kernel: %d\n", sharedMemSize); fprintf(stderr, "Num neighbors: %d\n", pNumNeighbors); fprintf(stderr, "Occupancy: %f\n", gpuOccupancy); fprintf(stderr, "Execution time: %f\n", milliseconds); fprintf(stderr, "\n"); #endif } } //DECLARE THE VALID INSTANCES OF THE TEMPLATE CLASS #define HPROJ_BILATERAL_BASIS_CLASS_DECL(D, K, U) \ template class mccnn::HProjBilateralBasis<D, K, U>; DECLARE_TEMPLATE_DIMS_BASIS(HPROJ_BILATERAL_BASIS_CLASS_DECL)
1a99832d53eed82cb04f10421374869b01dd26e9.hip
// !!! This is a file automatically generated by hipify!!! #include "encoder.h" using namespace std; namespace GPU { Encoder::Encoder(const Weights& model) : embeddings_(model.encEmbeddings_), forwardRnn_(model.encForwardGRU_), backwardRnn_(model.encBackwardGRU_) {} size_t GetMaxLength(const Sentences& source, size_t tab) { size_t maxLength = source.at(0)->GetWords(tab).size(); for (size_t i = 0; i < source.size(); ++i) { const Sentence &sentence = *source.at(i); maxLength = ::max(maxLength, sentence.GetWords(tab).size()); } return maxLength; } std::vector<std::vector<size_t>> GetBatchInput(const Sentences& source, size_t tab, size_t maxLen) { std::vector<std::vector<size_t>> matrix(maxLen, std::vector<size_t>(source.size(), 0)); for (size_t j = 0; j < source.size(); ++j) { for (size_t i = 0; i < source.at(j)->GetWords(tab).size(); ++i) { matrix[i][j] = source.at(j)->GetWords(tab)[i]; } } return matrix; } void Encoder::GetContext(const Sentences& source, size_t tab, mblas::Matrix& Context, DeviceVector<int>& dMapping) { size_t maxSentenceLength = GetMaxLength(source, tab); thrust::host_vector<int> hMapping(maxSentenceLength * source.size(), 0); for (size_t i = 0; i < source.size(); ++i) { for (size_t j = 0; j < source.at(i)->GetWords(tab).size(); ++j) { hMapping[i * maxSentenceLength + j] = 1; } } dMapping = hMapping; Context.Resize(maxSentenceLength * source.size(), forwardRnn_.GetStateLength() + backwardRnn_.GetStateLength()); auto input = GetBatchInput(source, tab, maxSentenceLength); for (size_t i = 0; i < input.size(); ++i) { if (i >= embeddedWords_.size()) { embeddedWords_.emplace_back(); } embeddings_.Lookup(embeddedWords_[i], input[i]); } forwardRnn_.GetContext(embeddedWords_.cbegin(), embeddedWords_.cbegin() + maxSentenceLength, Context, source.size(), false); backwardRnn_.GetContext(embeddedWords_.crend() - maxSentenceLength, embeddedWords_.crend() , Context, source.size(), true, &dMapping); } }
1a99832d53eed82cb04f10421374869b01dd26e9.cu
#include "encoder.h" using namespace std; namespace GPU { Encoder::Encoder(const Weights& model) : embeddings_(model.encEmbeddings_), forwardRnn_(model.encForwardGRU_), backwardRnn_(model.encBackwardGRU_) {} size_t GetMaxLength(const Sentences& source, size_t tab) { size_t maxLength = source.at(0)->GetWords(tab).size(); for (size_t i = 0; i < source.size(); ++i) { const Sentence &sentence = *source.at(i); maxLength = std::max(maxLength, sentence.GetWords(tab).size()); } return maxLength; } std::vector<std::vector<size_t>> GetBatchInput(const Sentences& source, size_t tab, size_t maxLen) { std::vector<std::vector<size_t>> matrix(maxLen, std::vector<size_t>(source.size(), 0)); for (size_t j = 0; j < source.size(); ++j) { for (size_t i = 0; i < source.at(j)->GetWords(tab).size(); ++i) { matrix[i][j] = source.at(j)->GetWords(tab)[i]; } } return matrix; } void Encoder::GetContext(const Sentences& source, size_t tab, mblas::Matrix& Context, DeviceVector<int>& dMapping) { size_t maxSentenceLength = GetMaxLength(source, tab); thrust::host_vector<int> hMapping(maxSentenceLength * source.size(), 0); for (size_t i = 0; i < source.size(); ++i) { for (size_t j = 0; j < source.at(i)->GetWords(tab).size(); ++j) { hMapping[i * maxSentenceLength + j] = 1; } } dMapping = hMapping; Context.Resize(maxSentenceLength * source.size(), forwardRnn_.GetStateLength() + backwardRnn_.GetStateLength()); auto input = GetBatchInput(source, tab, maxSentenceLength); for (size_t i = 0; i < input.size(); ++i) { if (i >= embeddedWords_.size()) { embeddedWords_.emplace_back(); } embeddings_.Lookup(embeddedWords_[i], input[i]); } forwardRnn_.GetContext(embeddedWords_.cbegin(), embeddedWords_.cbegin() + maxSentenceLength, Context, source.size(), false); backwardRnn_.GetContext(embeddedWords_.crend() - maxSentenceLength, embeddedWords_.crend() , Context, source.size(), true, &dMapping); } }
09f7036d5f7eea7e8f26a1dabbfdb20ae9999490.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** @file tsp.c * @brief Traveling Salesman Problem functions. * @author Marcelo Pinto (xmrcl0@gmail.com) * @date 09/12/2017 * @updated 09/21/2017 */ #include "tsp.h" #include "print.h" #include "utils.h" void distance_matrix (float ***coord, float ***distance, int num_city) { int i, j, nrows, ncols; ncols = num_city; nrows = num_city; *distance = (float **) malloc (nrows * sizeof (float *)); for (i = 0; i < nrows; i++) (*distance)[i] = (float *) malloc (ncols * sizeof (float)); for (i = 0; i < num_city; i++) for (j = 0; j < num_city; j++) (*distance)[i][j] = sqrt (pow ((*coord)[i][0] - (*coord)[j][0], 2) + pow ((*coord)[i][1] - (*coord)[j][1], 2)); } void distance_vector (float ***coord, float **distance, int num_city) { int i, j, nrows, ncols; ncols = num_city; nrows = num_city; *distance = (float *) malloc (num_city * num_city * sizeof (float)); for (i = 0; i < num_city; i++) { for (j = 0; j < num_city; j++) { (*distance)[i + j * num_city] = sqrt (pow ((*coord)[i][0] - (*coord)[j][0], 2) + pow ((*coord)[i][1] - (*coord)[j][1], 2)); } } } __global__ void initRNG(hiprandState_t *const rngStates, const unsigned int seed) { // Determine thread ID unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x; // Initialise the RNG hiprand_init(seed, tid, 0, &rngStates[tid]); } __device__ void create_path (int num_city, int *coord, hiprandState_t localState) { randperm (num_city, coord, localState); } __device__ float measure_path (float *distance, int num_city, int *path) { int i; float l = 0; for (i = 0; i < num_city - 1; i++) { int j = path[i]; int k = path[i + 1]; l = l + distance[j + num_city * k]; } l+= distance[path[0] + num_city * path[num_city - 1]]; return l; } __global__ void kernel (float *const mindists, int *const minpaths, float *const distance, hiprandState_t *const rngStates, const int n_cities, const int n_iter) { // Determine thread ID unsigned int bid = blockIdx.x; unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x; unsigned int ltid = threadIdx.x; // Sort out shared memory extern __shared__ float sdata[]; float *threadsMinDists = sdata; int *pathMatrix = (int *) &threadsMinDists[blockDim.x]; int *pathBanks[] = {&pathMatrix[2 * ltid * n_cities], &pathMatrix[n_cities * (2 * ltid + 1)]}; // Sort out local(ie this thread's) variables float *curThreadMinDist = &threadsMinDists[ltid]; int minPathBank = 0; hiprandState_t localState = rngStates[tid]; //Run everything at least once to initialize a sane minimum path create_path (n_cities, pathBanks[1 - minPathBank], localState); *curThreadMinDist = measure_path (distance, n_cities, pathBanks[1 - minPathBank]); minPathBank = 1 - minPathBank; float curThreadCptDist = 0; for (int i = 1; i < n_iter; i++) { create_path (n_cities, pathBanks[1 - minPathBank], localState); curThreadCptDist = measure_path (distance, n_cities, pathBanks[1- minPathBank]); if (curThreadCptDist < *curThreadMinDist) { *curThreadMinDist = curThreadCptDist; // Well, I actually do care minPathBank = 1 - minPathBank; } } unsigned int minDistTid = reduce_dists(threadsMinDists); if (ltid == minDistTid) { mindists[bid] = threadsMinDists[0]; memcpy(&minpaths[bid * n_cities], pathBanks[minPathBank], sizeof(int) * n_cities); } } __device__ unsigned int reduce_dists(float *const threadsMinDists) { unsigned int ltid = threadIdx.x; __syncthreads(); // Do reduction in shared mem for (unsigned int s = blockDim.x / 2 ; s > 0 ; s >>= 1) { if (ltid < s) { if (threadsMinDists[ltid + s] < threadsMinDists[ltid]) { threadsMinDists[ltid] = threadsMinDists[ltid + s]; if (s == blockDim.x /2) { threadsMinDists[ltid + s] = ltid + s; } else { threadsMinDists[ltid + s] = threadsMinDists[ltid + s + (s << 1)]; } } else { if (s == blockDim.x /2) { threadsMinDists[ltid + s] = ltid; } else { threadsMinDists[ltid + s] = threadsMinDists[ltid + (s << 1)]; } } } __syncthreads(); } return threadsMinDists[1]; } int read_file (char *file, float ***array) { int i, j, nrows = 0, ncols = 2; char c; char *line = NULL; size_t len=0; FILE *fp; fp = fopen (file, "r"); if (fp == NULL) return 0; while ((getline(&line, &len, fp) != -1)) { if (!is_coordinate (line)) return -1; nrows++; } free(line); // Allocate memory for coordinates matrix *array = (float **) malloc (nrows * sizeof (float *)); for (i = 0; i < nrows; i++) (*array)[i] = (float *) malloc (ncols * sizeof (float)); // Read coordinates from file to coordinates matrix fseek (fp, 0, SEEK_SET); for (i = 0; i < nrows; i++) for (j = 0; j < ncols; j++) if (!fscanf (fp, "%f", &(*array)[i][j])) break; fclose (fp); return nrows; } __device__ void randperm (int n, int perm[], hiprandState_t localState) { int i, j, t; for (i = 0; i < n; i++) { perm[i] = i; } for (i = 0; i < n; i++) { j = hiprand (&localState) % (n - i) + i; t = perm[j]; perm[j] = perm[i]; perm[i] = t; } }
09f7036d5f7eea7e8f26a1dabbfdb20ae9999490.cu
/** @file tsp.c * @brief Traveling Salesman Problem functions. * @author Marcelo Pinto (xmrcl0@gmail.com) * @date 09/12/2017 * @updated 09/21/2017 */ #include "tsp.h" #include "print.h" #include "utils.h" void distance_matrix (float ***coord, float ***distance, int num_city) { int i, j, nrows, ncols; ncols = num_city; nrows = num_city; *distance = (float **) malloc (nrows * sizeof (float *)); for (i = 0; i < nrows; i++) (*distance)[i] = (float *) malloc (ncols * sizeof (float)); for (i = 0; i < num_city; i++) for (j = 0; j < num_city; j++) (*distance)[i][j] = sqrt (pow ((*coord)[i][0] - (*coord)[j][0], 2) + pow ((*coord)[i][1] - (*coord)[j][1], 2)); } void distance_vector (float ***coord, float **distance, int num_city) { int i, j, nrows, ncols; ncols = num_city; nrows = num_city; *distance = (float *) malloc (num_city * num_city * sizeof (float)); for (i = 0; i < num_city; i++) { for (j = 0; j < num_city; j++) { (*distance)[i + j * num_city] = sqrt (pow ((*coord)[i][0] - (*coord)[j][0], 2) + pow ((*coord)[i][1] - (*coord)[j][1], 2)); } } } __global__ void initRNG(curandState *const rngStates, const unsigned int seed) { // Determine thread ID unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x; // Initialise the RNG curand_init(seed, tid, 0, &rngStates[tid]); } __device__ void create_path (int num_city, int *coord, curandState localState) { randperm (num_city, coord, localState); } __device__ float measure_path (float *distance, int num_city, int *path) { int i; float l = 0; for (i = 0; i < num_city - 1; i++) { int j = path[i]; int k = path[i + 1]; l = l + distance[j + num_city * k]; } l+= distance[path[0] + num_city * path[num_city - 1]]; return l; } __global__ void kernel (float *const mindists, int *const minpaths, float *const distance, curandState *const rngStates, const int n_cities, const int n_iter) { // Determine thread ID unsigned int bid = blockIdx.x; unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x; unsigned int ltid = threadIdx.x; // Sort out shared memory extern __shared__ float sdata[]; float *threadsMinDists = sdata; int *pathMatrix = (int *) &threadsMinDists[blockDim.x]; int *pathBanks[] = {&pathMatrix[2 * ltid * n_cities], &pathMatrix[n_cities * (2 * ltid + 1)]}; // Sort out local(ie this thread's) variables float *curThreadMinDist = &threadsMinDists[ltid]; int minPathBank = 0; curandState localState = rngStates[tid]; //Run everything at least once to initialize a sane minimum path create_path (n_cities, pathBanks[1 - minPathBank], localState); *curThreadMinDist = measure_path (distance, n_cities, pathBanks[1 - minPathBank]); minPathBank = 1 - minPathBank; float curThreadCptDist = 0; for (int i = 1; i < n_iter; i++) { create_path (n_cities, pathBanks[1 - minPathBank], localState); curThreadCptDist = measure_path (distance, n_cities, pathBanks[1- minPathBank]); if (curThreadCptDist < *curThreadMinDist) { *curThreadMinDist = curThreadCptDist; // Well, I actually do care minPathBank = 1 - minPathBank; } } unsigned int minDistTid = reduce_dists(threadsMinDists); if (ltid == minDistTid) { mindists[bid] = threadsMinDists[0]; memcpy(&minpaths[bid * n_cities], pathBanks[minPathBank], sizeof(int) * n_cities); } } __device__ unsigned int reduce_dists(float *const threadsMinDists) { unsigned int ltid = threadIdx.x; __syncthreads(); // Do reduction in shared mem for (unsigned int s = blockDim.x / 2 ; s > 0 ; s >>= 1) { if (ltid < s) { if (threadsMinDists[ltid + s] < threadsMinDists[ltid]) { threadsMinDists[ltid] = threadsMinDists[ltid + s]; if (s == blockDim.x /2) { threadsMinDists[ltid + s] = ltid + s; } else { threadsMinDists[ltid + s] = threadsMinDists[ltid + s + (s << 1)]; } } else { if (s == blockDim.x /2) { threadsMinDists[ltid + s] = ltid; } else { threadsMinDists[ltid + s] = threadsMinDists[ltid + (s << 1)]; } } } __syncthreads(); } return threadsMinDists[1]; } int read_file (char *file, float ***array) { int i, j, nrows = 0, ncols = 2; char c; char *line = NULL; size_t len=0; FILE *fp; fp = fopen (file, "r"); if (fp == NULL) return 0; while ((getline(&line, &len, fp) != -1)) { if (!is_coordinate (line)) return -1; nrows++; } free(line); // Allocate memory for coordinates matrix *array = (float **) malloc (nrows * sizeof (float *)); for (i = 0; i < nrows; i++) (*array)[i] = (float *) malloc (ncols * sizeof (float)); // Read coordinates from file to coordinates matrix fseek (fp, 0, SEEK_SET); for (i = 0; i < nrows; i++) for (j = 0; j < ncols; j++) if (!fscanf (fp, "%f", &(*array)[i][j])) break; fclose (fp); return nrows; } __device__ void randperm (int n, int perm[], curandState localState) { int i, j, t; for (i = 0; i < n; i++) { perm[i] = i; } for (i = 0; i < n; i++) { j = curand (&localState) % (n - i) + i; t = perm[j]; perm[j] = perm[i]; perm[i] = t; } }
02d2869e32467e4bb41cff6075d64995c871587e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <algorithm> #include "paddle/fluid/operators/math/sequence_padding.h" namespace paddle { namespace operators { namespace math { template <typename T, bool NormByTimes, bool Padding> __global__ void SequencePaddingKernel(T* padding, T* sequence, const size_t* sequence_start_positions, const size_t sequence_width, const size_t max_sequence_length, const size_t num_sequences) { size_t padding_idx = blockIdx.y; size_t start_pos = sequence_start_positions[padding_idx]; size_t sequence_length = sequence_start_positions[padding_idx + 1] - start_pos; size_t sequence_idx = blockIdx.x * blockDim.y + threadIdx.y; size_t padding_base_idx = (sequence_idx * num_sequences + padding_idx) * sequence_width; size_t sequence_base_idx = (start_pos + sequence_idx) * sequence_width; if (sequence_idx < sequence_length) { T scale = NormByTimes ? (1.0f / static_cast<T>(sequence_length)) : 1.0f; if (Padding) { /* sequence -> padding */ for (size_t i = threadIdx.x; i < sequence_width; i += blockDim.x) { padding[padding_base_idx + i] = scale * sequence[sequence_base_idx + i]; } } else { /* padding -> sequence */ for (size_t i = threadIdx.x; i < sequence_width; i += blockDim.x) { sequence[sequence_base_idx + i] = scale * padding[padding_base_idx + i]; } } } else if (sequence_idx < max_sequence_length) { if (Padding) { /* sequence -> padding */ for (size_t i = threadIdx.x; i < sequence_width; i += blockDim.x) { padding[padding_base_idx + i] = 0; } } } } template <typename T> class PaddingLoDTensorFunctor<platform::CUDADeviceContext, T> { public: void operator()(const platform::CUDADeviceContext& context, const framework::LoDTensor& seq, framework::Tensor* padding, bool norm_by_times) { auto lod = seq.lod(); PADDLE_ENFORCE_GT(lod.size(), 0UL, "The lod of LoDTensor seq should not be null."); const size_t level = 0; framework::LoD abs_offset_lod = framework::ToAbsOffset(lod); auto seq_dims = seq.dims(); PADDLE_ENFORCE_EQ(seq_dims[0], static_cast<int64_t>(abs_offset_lod[level].back()), "The first dimension of LoDTensor seq should be " "equal to the sum of all sequences's length."); auto padding_dims = padding->dims(); PADDLE_ENFORCE_EQ(padding_dims.size(), 3UL, "The input padding should be a 3-D Tensor of shape " "[max_sequence_length, num_sequences, sequence_width]."); int64_t max_sequence_length = MaximumSequenceLength(lod, level); PADDLE_ENFORCE_EQ(padding_dims[0], max_sequence_length, "The first dimension of Tensor padding should be the " "maximum length of all sequences in LoDTensor seq."); const int64_t num_sequences = abs_offset_lod[level].size() - 1; PADDLE_ENFORCE_EQ(padding_dims[1], num_sequences, "The second dimension of Tensor padding should be the " "number of sequences in LoDTensor seq."); const int64_t sequence_width = seq.numel() / seq_dims[0]; PADDLE_ENFORCE_EQ(padding_dims[2], sequence_width, "The third dimension of Tensor padding should be the " "width of sequence in LoDTensor seq."); if (!norm_by_times && num_sequences == 1UL) { TensorCopy(seq, context.GetPlace(), context, padding); padding->Resize(padding_dims); return; } const int64_t kBlockSize = 512; /* At least use 32 threads to copy sequence_width elements, * and at least 8 elements for each thread. */ size_t block_dim_x = ::min(((((sequence_width + 7) >> 3) + 31) >> 5) << 5, kBlockSize); size_t block_dim_y = kBlockSize / block_dim_x; dim3 threads(block_dim_x, block_dim_y); size_t grid_dim_x = (max_sequence_length + block_dim_y - 1) / block_dim_y; size_t grid_dim_y = num_sequences; dim3 grid(grid_dim_x, grid_dim_y); const T* seq_data = seq.data<T>(); T* padding_data = padding->data<T>(); if (norm_by_times) { hipLaunchKernelGGL(( SequencePaddingKernel<T, 1, 1>), dim3(grid), dim3(threads), 0, context.stream(), padding_data, const_cast<T*>(seq_data), abs_offset_lod[level].CUDAData(context.GetPlace()), sequence_width, max_sequence_length, num_sequences); } else { hipLaunchKernelGGL(( SequencePaddingKernel<T, 0, 1>), dim3(grid), dim3(threads), 0, context.stream(), padding_data, const_cast<T*>(seq_data), abs_offset_lod[level].CUDAData(context.GetPlace()), sequence_width, max_sequence_length, num_sequences); } } }; template <typename T> class UnpaddingLoDTensorFunctor<platform::CUDADeviceContext, T> { public: void operator()(const platform::CUDADeviceContext& context, framework::LoDTensor* seq, const framework::Tensor& padding, bool norm_by_times) { auto lod = seq->lod(); PADDLE_ENFORCE_GT(lod.size(), 0UL, "The lod of LoDTensor seq should not be null."); const size_t level = 0; framework::LoD abs_offset_lod = framework::ToAbsOffset(lod); auto seq_dims = seq->dims(); PADDLE_ENFORCE_EQ(seq_dims[0], static_cast<int64_t>(abs_offset_lod[level].back()), "The first dimension of LoDTensor seq should be " "equal to the sum of all sequences's length."); auto padding_dims = padding.dims(); PADDLE_ENFORCE_EQ(padding_dims.size(), 3UL, "The input padding should be a 3-D Tensor of shape " "[max_sequnece_length, num_sequences, sequence_width]."); int64_t max_sequence_length = MaximumSequenceLength(lod, level); PADDLE_ENFORCE_EQ(padding_dims[0], max_sequence_length, "The first dimension of Tensor padding should be " "the maximum length of all sequences in LoDTensor seq."); const int64_t num_sequences = abs_offset_lod[level].size() - 1; PADDLE_ENFORCE_EQ(padding_dims[1], num_sequences, "The second dimension of Tensor padding should be " "the number of sequences in LoDTensor seq."); const int64_t sequence_width = seq->numel() / seq_dims[0]; PADDLE_ENFORCE_EQ(padding_dims[2], sequence_width, "The third dimension of Tensor padding should be the " "width of sequence in LoDTensor seq."); if (!norm_by_times && num_sequences == 1UL) { TensorCopy(padding, context.GetPlace(), context, seq); seq->Resize(seq_dims); return; } const int64_t kBlockSize = 512; /* At least use 32 threads to copy sequence_width elements, * and at least 8 elements for each thread. */ size_t block_dim_x = ::min(((((sequence_width + 7) >> 3) + 31) >> 5) << 5, kBlockSize); size_t block_dim_y = kBlockSize / block_dim_x; dim3 threads(block_dim_x, block_dim_y); size_t grid_dim_x = (max_sequence_length + block_dim_y - 1) / block_dim_y; size_t grid_dim_y = num_sequences; dim3 grid(grid_dim_x, grid_dim_y); const T* padding_data = padding.data<T>(); T* seq_data = seq->data<T>(); if (norm_by_times) { hipLaunchKernelGGL(( SequencePaddingKernel<T, 1, 0>), dim3(grid), dim3(threads), 0, context.stream(), const_cast<T*>(padding_data), seq_data, abs_offset_lod[level].CUDAData(context.GetPlace()), sequence_width, max_sequence_length, num_sequences); } else { hipLaunchKernelGGL(( SequencePaddingKernel<T, 0, 0>), dim3(grid), dim3(threads), 0, context.stream(), const_cast<T*>(padding_data), seq_data, abs_offset_lod[level].CUDAData(context.GetPlace()), sequence_width, max_sequence_length, num_sequences); } } }; template class PaddingLoDTensorFunctor<platform::CUDADeviceContext, float>; template class UnpaddingLoDTensorFunctor<platform::CUDADeviceContext, float>; } // namespace math } // namespace operators } // namespace paddle
02d2869e32467e4bb41cff6075d64995c871587e.cu
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <algorithm> #include "paddle/fluid/operators/math/sequence_padding.h" namespace paddle { namespace operators { namespace math { template <typename T, bool NormByTimes, bool Padding> __global__ void SequencePaddingKernel(T* padding, T* sequence, const size_t* sequence_start_positions, const size_t sequence_width, const size_t max_sequence_length, const size_t num_sequences) { size_t padding_idx = blockIdx.y; size_t start_pos = sequence_start_positions[padding_idx]; size_t sequence_length = sequence_start_positions[padding_idx + 1] - start_pos; size_t sequence_idx = blockIdx.x * blockDim.y + threadIdx.y; size_t padding_base_idx = (sequence_idx * num_sequences + padding_idx) * sequence_width; size_t sequence_base_idx = (start_pos + sequence_idx) * sequence_width; if (sequence_idx < sequence_length) { T scale = NormByTimes ? (1.0f / static_cast<T>(sequence_length)) : 1.0f; if (Padding) { /* sequence -> padding */ for (size_t i = threadIdx.x; i < sequence_width; i += blockDim.x) { padding[padding_base_idx + i] = scale * sequence[sequence_base_idx + i]; } } else { /* padding -> sequence */ for (size_t i = threadIdx.x; i < sequence_width; i += blockDim.x) { sequence[sequence_base_idx + i] = scale * padding[padding_base_idx + i]; } } } else if (sequence_idx < max_sequence_length) { if (Padding) { /* sequence -> padding */ for (size_t i = threadIdx.x; i < sequence_width; i += blockDim.x) { padding[padding_base_idx + i] = 0; } } } } template <typename T> class PaddingLoDTensorFunctor<platform::CUDADeviceContext, T> { public: void operator()(const platform::CUDADeviceContext& context, const framework::LoDTensor& seq, framework::Tensor* padding, bool norm_by_times) { auto lod = seq.lod(); PADDLE_ENFORCE_GT(lod.size(), 0UL, "The lod of LoDTensor seq should not be null."); const size_t level = 0; framework::LoD abs_offset_lod = framework::ToAbsOffset(lod); auto seq_dims = seq.dims(); PADDLE_ENFORCE_EQ(seq_dims[0], static_cast<int64_t>(abs_offset_lod[level].back()), "The first dimension of LoDTensor seq should be " "equal to the sum of all sequences's length."); auto padding_dims = padding->dims(); PADDLE_ENFORCE_EQ(padding_dims.size(), 3UL, "The input padding should be a 3-D Tensor of shape " "[max_sequence_length, num_sequences, sequence_width]."); int64_t max_sequence_length = MaximumSequenceLength(lod, level); PADDLE_ENFORCE_EQ(padding_dims[0], max_sequence_length, "The first dimension of Tensor padding should be the " "maximum length of all sequences in LoDTensor seq."); const int64_t num_sequences = abs_offset_lod[level].size() - 1; PADDLE_ENFORCE_EQ(padding_dims[1], num_sequences, "The second dimension of Tensor padding should be the " "number of sequences in LoDTensor seq."); const int64_t sequence_width = seq.numel() / seq_dims[0]; PADDLE_ENFORCE_EQ(padding_dims[2], sequence_width, "The third dimension of Tensor padding should be the " "width of sequence in LoDTensor seq."); if (!norm_by_times && num_sequences == 1UL) { TensorCopy(seq, context.GetPlace(), context, padding); padding->Resize(padding_dims); return; } const int64_t kBlockSize = 512; /* At least use 32 threads to copy sequence_width elements, * and at least 8 elements for each thread. */ size_t block_dim_x = std::min(((((sequence_width + 7) >> 3) + 31) >> 5) << 5, kBlockSize); size_t block_dim_y = kBlockSize / block_dim_x; dim3 threads(block_dim_x, block_dim_y); size_t grid_dim_x = (max_sequence_length + block_dim_y - 1) / block_dim_y; size_t grid_dim_y = num_sequences; dim3 grid(grid_dim_x, grid_dim_y); const T* seq_data = seq.data<T>(); T* padding_data = padding->data<T>(); if (norm_by_times) { SequencePaddingKernel<T, 1, 1><<<grid, threads, 0, context.stream()>>>( padding_data, const_cast<T*>(seq_data), abs_offset_lod[level].CUDAData(context.GetPlace()), sequence_width, max_sequence_length, num_sequences); } else { SequencePaddingKernel<T, 0, 1><<<grid, threads, 0, context.stream()>>>( padding_data, const_cast<T*>(seq_data), abs_offset_lod[level].CUDAData(context.GetPlace()), sequence_width, max_sequence_length, num_sequences); } } }; template <typename T> class UnpaddingLoDTensorFunctor<platform::CUDADeviceContext, T> { public: void operator()(const platform::CUDADeviceContext& context, framework::LoDTensor* seq, const framework::Tensor& padding, bool norm_by_times) { auto lod = seq->lod(); PADDLE_ENFORCE_GT(lod.size(), 0UL, "The lod of LoDTensor seq should not be null."); const size_t level = 0; framework::LoD abs_offset_lod = framework::ToAbsOffset(lod); auto seq_dims = seq->dims(); PADDLE_ENFORCE_EQ(seq_dims[0], static_cast<int64_t>(abs_offset_lod[level].back()), "The first dimension of LoDTensor seq should be " "equal to the sum of all sequences's length."); auto padding_dims = padding.dims(); PADDLE_ENFORCE_EQ(padding_dims.size(), 3UL, "The input padding should be a 3-D Tensor of shape " "[max_sequnece_length, num_sequences, sequence_width]."); int64_t max_sequence_length = MaximumSequenceLength(lod, level); PADDLE_ENFORCE_EQ(padding_dims[0], max_sequence_length, "The first dimension of Tensor padding should be " "the maximum length of all sequences in LoDTensor seq."); const int64_t num_sequences = abs_offset_lod[level].size() - 1; PADDLE_ENFORCE_EQ(padding_dims[1], num_sequences, "The second dimension of Tensor padding should be " "the number of sequences in LoDTensor seq."); const int64_t sequence_width = seq->numel() / seq_dims[0]; PADDLE_ENFORCE_EQ(padding_dims[2], sequence_width, "The third dimension of Tensor padding should be the " "width of sequence in LoDTensor seq."); if (!norm_by_times && num_sequences == 1UL) { TensorCopy(padding, context.GetPlace(), context, seq); seq->Resize(seq_dims); return; } const int64_t kBlockSize = 512; /* At least use 32 threads to copy sequence_width elements, * and at least 8 elements for each thread. */ size_t block_dim_x = std::min(((((sequence_width + 7) >> 3) + 31) >> 5) << 5, kBlockSize); size_t block_dim_y = kBlockSize / block_dim_x; dim3 threads(block_dim_x, block_dim_y); size_t grid_dim_x = (max_sequence_length + block_dim_y - 1) / block_dim_y; size_t grid_dim_y = num_sequences; dim3 grid(grid_dim_x, grid_dim_y); const T* padding_data = padding.data<T>(); T* seq_data = seq->data<T>(); if (norm_by_times) { SequencePaddingKernel<T, 1, 0><<<grid, threads, 0, context.stream()>>>( const_cast<T*>(padding_data), seq_data, abs_offset_lod[level].CUDAData(context.GetPlace()), sequence_width, max_sequence_length, num_sequences); } else { SequencePaddingKernel<T, 0, 0><<<grid, threads, 0, context.stream()>>>( const_cast<T*>(padding_data), seq_data, abs_offset_lod[level].CUDAData(context.GetPlace()), sequence_width, max_sequence_length, num_sequences); } } }; template class PaddingLoDTensorFunctor<platform::CUDADeviceContext, float>; template class UnpaddingLoDTensorFunctor<platform::CUDADeviceContext, float>; } // namespace math } // namespace operators } // namespace paddle
4d28a083e2c1dce7814caa58303fdbc491659cea.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "hiprand/hiprand.h" //Host executed functions for generating numbers on the GPU #include "hiprand/hiprand_kernel.h" //Device executed functions for generating numbers on the GPU #include <stdio.h> #include <iostream> #include <fstream> #include <tuple> #include <string> #include <functional> #include <cstdarg> //template<typename T> //concept Number = std::is_arithmetic<T>::value; bool randomizer_generated = false; //boolean indicating whether or not the randomizer has been setup unsigned long long seed = 10; //default seed value for randomizer hiprandGenerator_t generator; //generator for random matrices //Concepts are not fully supported yet... waiting for C++20 to be more well supported... //template <Number N> template <typename N> struct Matrix { unsigned r; unsigned c; N* arr; /* * Constructs an empty Matrix * * @tparam N type of the values of the Matrix */ __host__ Matrix<N>() {} /* * Constructs a 1x1 Matrix with value val * * @param val Value of the primitive in the Matrix */ __host__ Matrix<N>(N val) { r = 1; c = 1; *arr = val; } /* * Constructs an r by c Matrix and allocates space for the backing pointer arr * * @tparam N type of the values of the Matrix * @param r # of rows * @param c # of columns */ __host__ Matrix<N>(unsigned r, unsigned c) { this->r = r; this->c = c; this->arr = new N[r * c]; } /* * Constructs an r by c Matrix with a given backing pointer that should be in row major form arr[i][j] = arr[i * c + j] * * @param r # of rows * @param c # of columns * @param arr given pointer of values to initalize the Matrix with */ __host__ Matrix<N>(unsigned r, unsigned c, N* arr) { this->r = r; this->c = c; this->arr = arr; } /* * Contrsucts an r by c Matrix using a given list of numbers to simplify the process. * Here's an example of this for creating a 3 by 2 matrix with elements: * [ 5 10] * [15 20] * [ 3 7] * * struct Matrix<int> mat = {3, 2, 6, 5, 10, 15, 20, 3, 7} * * @param r # of rows * @param c # of columns * @param num # of elements you want to add following this parameter * @param ... List of elements you want to add formatted such that a_{1}, a_{2}, ... , a_{num} */ __host__ Matrix<N>(unsigned r, unsigned c, int num, ...) { this->r = r; this->c = c; arr = new N[r * c]; std::va_list valist; va_start(valist, num); for (int i = 0; i < num && i < r * c; i++) { arr[i] = va_arg(valist, N); } va_end(valist); } __host__ Matrix(const Matrix& mat) { r = mat.r; c = mat.c; arr = mat.arr; } /* * Automatically allocates memory on the device and based on the parameter copies the backing array from this Matrix onto the device * * @param copy If copy is true copies the backing array from this matrix onto the device * @return The device matrix */ __host__ inline Matrix<N> cudaSetup(bool copy) { struct Matrix<N> dev; dev.r = r; dev.c = c; const size_t size = static_cast<unsigned long long>(r) * c * sizeof(N); hipMalloc(&dev.arr, size); if (copy) { hipMemcpy(dev.arr, this->arr, size, hipMemcpyHostToDevice); } return dev; } /* * Access value in the Matrix at point [i][j] * * @param i row i * @param j column j * @return Value at index [i][j] */ __host__ __device__ inline N& operator()(int i, int j) { return arr[i * c + j]; } /* * Gives a pointer to the head of the ith row in the Matrix * * @param i The ith row * @return A pointer to the ith row */ __host__ __device__ inline N* operator[](int i) { N* ptr = &arr[i * c]; return ptr; } __host__ Matrix<N> operator+(const Matrix<N>& o) { return MatAdd(*this, o); } __host__ void operator+=(const Matrix<N>& o) { (*this) = (*this) + o; } /* * Prints the matrix */ __host__ void print() { for (unsigned i = 0; i < r; i++) { std::cout << '['; for (unsigned j = 0; j < c; j++) { if (j == c - 1) { std::cout << arr[i * c + j]; } else { std::cout << arr[i * c + j] << ", "; } } std::cout << "]\n"; } } }; /* * Calculates how many threads per block and the dimension such that: # of threads >= rows * cols * * @param dimBlock Given cuda block to modify. Represents the threads per block * @param dimGrid Given cuda grid to modify. Represents the amount of blocks * @param Number of rows you want to base the calculation off * @param Number of columns you want to base the calculation off */ __host__ void threadCalc(dim3& dimBlock, dim3& dimGrid, unsigned rows, unsigned cols) { if (rows > 32 && cols > 32) { dimBlock.x = 32; dimBlock.y = 32; } else if (rows <= 32 && cols <= 32) { dimBlock.y = (unsigned int)pow(2, floor(log2(rows))); dimBlock.x = (unsigned int)pow(2, floor(log2(cols))); } else if (rows < 32) { dimBlock.y = (unsigned int)pow(2, floor(log2(rows))); dimBlock.x = cols > 32 * (32 / dimBlock.y) ? 32 * (32 / dimBlock.y) : (unsigned int)pow(2, floor(log2(cols))); } else // cols < 32 { dimBlock.x = (unsigned int)pow(2, floor(log2(cols))); dimBlock.y = rows > 32 * (32 / dimBlock.x) ? 32 * (32 / dimBlock.x) : (unsigned int)pow(2, floor(log2(rows))); } dimGrid.x = (cols / dimBlock.x) * dimBlock.x == cols ? cols / dimBlock.x : cols / dimBlock.x + 1; dimGrid.y = (rows / dimBlock.y) * dimBlock.y == rows ? rows / dimBlock.y : rows / dimBlock.y + 1; } /* * Creates a matrix containing doubles from [0,1) * * @param rows Amount of rows the matrix will have * @param cols Amount of columns the matrix will have * @return Matrix of type double */ __host__ Matrix<double> randomMatrix(unsigned int rows, unsigned int cols) { struct Matrix<double> mat = { rows, cols }; double* d_A; size_t n = static_cast<unsigned long long>(rows) * cols * sizeof(double); if (!randomizer_generated) { std::fstream file; file.open("seed.txt", std::ios::in); if (file.is_open()) { file >> seed; file.close(); } if (seed < 0) { seed = 0; } unsigned long long nextSeed; if (seed > LLONG_MAX - 5) { nextSeed = 0; } else { nextSeed = seed + 1; } file.open("seed.txt", std::ios::out); if (file.is_open()) { file.clear(); file << nextSeed; } hiprandCreateGenerator(&generator, HIPRAND_RNG_PSEUDO_DEFAULT); hiprandSetPseudoRandomGeneratorSeed(generator, seed); randomizer_generated = true; } hipMalloc(&d_A, n); hiprandGenerateUniformDouble(generator, d_A, static_cast<size_t>(rows) * cols); hipMemcpy(mat.arr, d_A, n, hipMemcpyDeviceToHost); hipFree(d_A); return mat; } template <typename N> __global__ void transposeK(Matrix<N> A, Matrix<N> B) { int i = blockIdx.y * blockDim.y + threadIdx.y; int j = blockIdx.x * blockDim.x + threadIdx.x; if (i < B.r && j < B.c) { B[i][j] = A[j][i]; } } /* * Takes the tranpose of a given matrix * * @param A The given matrix to take the transpose of */ template <typename N> __host__ Matrix<N> transpose(Matrix<N> A) { struct Matrix<N> d_A = A.cudaSetup(true); struct Matrix<N> B = { A.c, A.r }; struct Matrix<N> d_B = B.cudaSetup(false); dim3 dimBlock; dim3 dimGrid; threadCalc(dimBlock, dimGrid, B.r, B.c); transposeK<N> << <dimGrid, dimBlock >> > (d_A, d_B); hipMemcpy(B.arr, d_B.arr, static_cast<unsigned long long>(B.r) * B.c * sizeof(N), hipMemcpyDeviceToHost); hipFree(d_B.arr); hipFree(d_A.arr); return B; } template <typename N> __global__ void kroneckerProductK(Matrix<N> A, Matrix<N> B, Matrix<N> C) { //todo Speed this up with shared memory int i = blockIdx.y * blockDim.y + threadIdx.y; int j = blockIdx.x * blockDim.x + threadIdx.x; if (i < C.r && j < C.c) { C[i][j] = A[i / A.r][j / A.c] * B[i % B.r][j % B.c]; } C(i, j); } /* * Takes the kronecker product (more often called the tensor product) of two matrices. In the order A x B * Note: The kronecker product of two matrices is not commutative! * * @param A The first matrix * @param B THe second matrix */ template <typename N> __host__ Matrix<N> kroneckerProduct(Matrix<N> A, Matrix<N> B) { struct Matrix<N> C = { A.r * B.r, A.c * B.c }; size_t size = static_cast<size_t>(C.r) * C.c * sizeof(N); struct Matrix<N> d_A = A.cudaSetup(true); struct Matrix<N> d_B = B.cudaSetup(true); struct Matrix<N> d_C = C.cudaSetup(false); dim3 dimBlock; dim3 dimGrid; threadCalc(dimBlock, dimGrid, C.r, C.c); kroneckerProductK << <dimGrid, dimBlock >> > (d_A, d_B, d_C); hipMemcpy(C.arr, d_C.arr, size, hipMemcpyDeviceToHost); hipFree(d_C.arr); hipFree(d_B.arr); hipFree(d_C.arr); return C; } //template <Number N> template <typename N> __global__ void MatAddK(Matrix<N> A, Matrix<N> B, Matrix<N> C) { int i = blockIdx.y * blockDim.y + threadIdx.y; int j = blockIdx.x * blockDim.x + threadIdx.x; if (i < A.r && j < A.c) { C[i][j] = A[i][j] + B[i][j]; } } //template <Number N> /* * Performs standard matrix addition, where each element is added where C_{i,j} = A_{i,j} + B_{i,j} * * @param A Matrix * @param B Matrix */ template <typename N> __host__ Matrix<N> MatAdd(Matrix<N> A, Matrix<N> B) { if (A.r != B.r || A.c != B.c) { throw std::invalid_argument("The dimension of the matrices don't match"); } struct Matrix<N> C = { A.r, A.c }; size_t size = static_cast<unsigned long long>(A.r) * A.c * sizeof(N); //All matrices are the same size struct Matrix<N> d_A = A.cudaSetup(true); struct Matrix<N> d_B = B.cudaSetup(true); struct Matrix<N> d_C = C.cudaSetup(false); dim3 dimBlock; dim3 dimGrid; threadCalc(dimBlock, dimGrid, C.r, C.c); MatAddK<N><< <dimGrid, dimBlock >> > (d_A, d_B, d_C); hipMemcpy(C.arr, d_C.arr, size, hipMemcpyDeviceToHost); hipFree(d_A.arr); hipFree(d_B.arr); hipFree(d_C.arr); return C; } int main() { struct Matrix<int> mat = { 10, 10 }; for (unsigned i = 0; i < mat.r; i++) { for (unsigned j = 0; j < mat.c; j++) { mat[i][j] = i * mat.r + j + 1; } } mat.print(); mat += mat; mat.print(); (randomMatrix(10,10)).print(); std::cout << '\n'; (randomMatrix(10, 10)).print(); std::cout << '\n'; transpose(mat).print(); std::cout << "\n\n"; struct Matrix<int> alpha = { 2, 2 , 4, 0, 5, 6, 7}; struct Matrix<int> beta = { 2, 2, 4, 1, 2, 3, 4 }; beta.print(); alpha.print(); std::cout << "\n\n"; kroneckerProduct(beta, alpha).print(); return 0; }
4d28a083e2c1dce7814caa58303fdbc491659cea.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include "curand.h" //Host executed functions for generating numbers on the GPU #include "curand_kernel.h" //Device executed functions for generating numbers on the GPU #include <stdio.h> #include <iostream> #include <fstream> #include <tuple> #include <string> #include <functional> #include <cstdarg> //template<typename T> //concept Number = std::is_arithmetic<T>::value; bool randomizer_generated = false; //boolean indicating whether or not the randomizer has been setup unsigned long long seed = 10; //default seed value for randomizer curandGenerator_t generator; //generator for random matrices //Concepts are not fully supported yet... waiting for C++20 to be more well supported... //template <Number N> template <typename N> struct Matrix { unsigned r; unsigned c; N* arr; /* * Constructs an empty Matrix * * @tparam N type of the values of the Matrix */ __host__ Matrix<N>() {} /* * Constructs a 1x1 Matrix with value val * * @param val Value of the primitive in the Matrix */ __host__ Matrix<N>(N val) { r = 1; c = 1; *arr = val; } /* * Constructs an r by c Matrix and allocates space for the backing pointer arr * * @tparam N type of the values of the Matrix * @param r # of rows * @param c # of columns */ __host__ Matrix<N>(unsigned r, unsigned c) { this->r = r; this->c = c; this->arr = new N[r * c]; } /* * Constructs an r by c Matrix with a given backing pointer that should be in row major form arr[i][j] = arr[i * c + j] * * @param r # of rows * @param c # of columns * @param arr given pointer of values to initalize the Matrix with */ __host__ Matrix<N>(unsigned r, unsigned c, N* arr) { this->r = r; this->c = c; this->arr = arr; } /* * Contrsucts an r by c Matrix using a given list of numbers to simplify the process. * Here's an example of this for creating a 3 by 2 matrix with elements: * [ 5 10] * [15 20] * [ 3 7] * * struct Matrix<int> mat = {3, 2, 6, 5, 10, 15, 20, 3, 7} * * @param r # of rows * @param c # of columns * @param num # of elements you want to add following this parameter * @param ... List of elements you want to add formatted such that a_{1}, a_{2}, ... , a_{num} */ __host__ Matrix<N>(unsigned r, unsigned c, int num, ...) { this->r = r; this->c = c; arr = new N[r * c]; std::va_list valist; va_start(valist, num); for (int i = 0; i < num && i < r * c; i++) { arr[i] = va_arg(valist, N); } va_end(valist); } __host__ Matrix(const Matrix& mat) { r = mat.r; c = mat.c; arr = mat.arr; } /* * Automatically allocates memory on the device and based on the parameter copies the backing array from this Matrix onto the device * * @param copy If copy is true copies the backing array from this matrix onto the device * @return The device matrix */ __host__ inline Matrix<N> cudaSetup(bool copy) { struct Matrix<N> dev; dev.r = r; dev.c = c; const size_t size = static_cast<unsigned long long>(r) * c * sizeof(N); cudaMalloc(&dev.arr, size); if (copy) { cudaMemcpy(dev.arr, this->arr, size, cudaMemcpyHostToDevice); } return dev; } /* * Access value in the Matrix at point [i][j] * * @param i row i * @param j column j * @return Value at index [i][j] */ __host__ __device__ inline N& operator()(int i, int j) { return arr[i * c + j]; } /* * Gives a pointer to the head of the ith row in the Matrix * * @param i The ith row * @return A pointer to the ith row */ __host__ __device__ inline N* operator[](int i) { N* ptr = &arr[i * c]; return ptr; } __host__ Matrix<N> operator+(const Matrix<N>& o) { return MatAdd(*this, o); } __host__ void operator+=(const Matrix<N>& o) { (*this) = (*this) + o; } /* * Prints the matrix */ __host__ void print() { for (unsigned i = 0; i < r; i++) { std::cout << '['; for (unsigned j = 0; j < c; j++) { if (j == c - 1) { std::cout << arr[i * c + j]; } else { std::cout << arr[i * c + j] << ", "; } } std::cout << "]\n"; } } }; /* * Calculates how many threads per block and the dimension such that: # of threads >= rows * cols * * @param dimBlock Given cuda block to modify. Represents the threads per block * @param dimGrid Given cuda grid to modify. Represents the amount of blocks * @param Number of rows you want to base the calculation off * @param Number of columns you want to base the calculation off */ __host__ void threadCalc(dim3& dimBlock, dim3& dimGrid, unsigned rows, unsigned cols) { if (rows > 32 && cols > 32) { dimBlock.x = 32; dimBlock.y = 32; } else if (rows <= 32 && cols <= 32) { dimBlock.y = (unsigned int)pow(2, floor(log2(rows))); dimBlock.x = (unsigned int)pow(2, floor(log2(cols))); } else if (rows < 32) { dimBlock.y = (unsigned int)pow(2, floor(log2(rows))); dimBlock.x = cols > 32 * (32 / dimBlock.y) ? 32 * (32 / dimBlock.y) : (unsigned int)pow(2, floor(log2(cols))); } else // cols < 32 { dimBlock.x = (unsigned int)pow(2, floor(log2(cols))); dimBlock.y = rows > 32 * (32 / dimBlock.x) ? 32 * (32 / dimBlock.x) : (unsigned int)pow(2, floor(log2(rows))); } dimGrid.x = (cols / dimBlock.x) * dimBlock.x == cols ? cols / dimBlock.x : cols / dimBlock.x + 1; dimGrid.y = (rows / dimBlock.y) * dimBlock.y == rows ? rows / dimBlock.y : rows / dimBlock.y + 1; } /* * Creates a matrix containing doubles from [0,1) * * @param rows Amount of rows the matrix will have * @param cols Amount of columns the matrix will have * @return Matrix of type double */ __host__ Matrix<double> randomMatrix(unsigned int rows, unsigned int cols) { struct Matrix<double> mat = { rows, cols }; double* d_A; size_t n = static_cast<unsigned long long>(rows) * cols * sizeof(double); if (!randomizer_generated) { std::fstream file; file.open("seed.txt", std::ios::in); if (file.is_open()) { file >> seed; file.close(); } if (seed < 0) { seed = 0; } unsigned long long nextSeed; if (seed > LLONG_MAX - 5) { nextSeed = 0; } else { nextSeed = seed + 1; } file.open("seed.txt", std::ios::out); if (file.is_open()) { file.clear(); file << nextSeed; } curandCreateGenerator(&generator, CURAND_RNG_PSEUDO_DEFAULT); curandSetPseudoRandomGeneratorSeed(generator, seed); randomizer_generated = true; } cudaMalloc(&d_A, n); curandGenerateUniformDouble(generator, d_A, static_cast<size_t>(rows) * cols); cudaMemcpy(mat.arr, d_A, n, cudaMemcpyDeviceToHost); cudaFree(d_A); return mat; } template <typename N> __global__ void transposeK(Matrix<N> A, Matrix<N> B) { int i = blockIdx.y * blockDim.y + threadIdx.y; int j = blockIdx.x * blockDim.x + threadIdx.x; if (i < B.r && j < B.c) { B[i][j] = A[j][i]; } } /* * Takes the tranpose of a given matrix * * @param A The given matrix to take the transpose of */ template <typename N> __host__ Matrix<N> transpose(Matrix<N> A) { struct Matrix<N> d_A = A.cudaSetup(true); struct Matrix<N> B = { A.c, A.r }; struct Matrix<N> d_B = B.cudaSetup(false); dim3 dimBlock; dim3 dimGrid; threadCalc(dimBlock, dimGrid, B.r, B.c); transposeK<N> << <dimGrid, dimBlock >> > (d_A, d_B); cudaMemcpy(B.arr, d_B.arr, static_cast<unsigned long long>(B.r) * B.c * sizeof(N), cudaMemcpyDeviceToHost); cudaFree(d_B.arr); cudaFree(d_A.arr); return B; } template <typename N> __global__ void kroneckerProductK(Matrix<N> A, Matrix<N> B, Matrix<N> C) { //todo Speed this up with shared memory int i = blockIdx.y * blockDim.y + threadIdx.y; int j = blockIdx.x * blockDim.x + threadIdx.x; if (i < C.r && j < C.c) { C[i][j] = A[i / A.r][j / A.c] * B[i % B.r][j % B.c]; } C(i, j); } /* * Takes the kronecker product (more often called the tensor product) of two matrices. In the order A x B * Note: The kronecker product of two matrices is not commutative! * * @param A The first matrix * @param B THe second matrix */ template <typename N> __host__ Matrix<N> kroneckerProduct(Matrix<N> A, Matrix<N> B) { struct Matrix<N> C = { A.r * B.r, A.c * B.c }; size_t size = static_cast<size_t>(C.r) * C.c * sizeof(N); struct Matrix<N> d_A = A.cudaSetup(true); struct Matrix<N> d_B = B.cudaSetup(true); struct Matrix<N> d_C = C.cudaSetup(false); dim3 dimBlock; dim3 dimGrid; threadCalc(dimBlock, dimGrid, C.r, C.c); kroneckerProductK << <dimGrid, dimBlock >> > (d_A, d_B, d_C); cudaMemcpy(C.arr, d_C.arr, size, cudaMemcpyDeviceToHost); cudaFree(d_C.arr); cudaFree(d_B.arr); cudaFree(d_C.arr); return C; } //template <Number N> template <typename N> __global__ void MatAddK(Matrix<N> A, Matrix<N> B, Matrix<N> C) { int i = blockIdx.y * blockDim.y + threadIdx.y; int j = blockIdx.x * blockDim.x + threadIdx.x; if (i < A.r && j < A.c) { C[i][j] = A[i][j] + B[i][j]; } } //template <Number N> /* * Performs standard matrix addition, where each element is added where C_{i,j} = A_{i,j} + B_{i,j} * * @param A Matrix * @param B Matrix */ template <typename N> __host__ Matrix<N> MatAdd(Matrix<N> A, Matrix<N> B) { if (A.r != B.r || A.c != B.c) { throw std::invalid_argument("The dimension of the matrices don't match"); } struct Matrix<N> C = { A.r, A.c }; size_t size = static_cast<unsigned long long>(A.r) * A.c * sizeof(N); //All matrices are the same size struct Matrix<N> d_A = A.cudaSetup(true); struct Matrix<N> d_B = B.cudaSetup(true); struct Matrix<N> d_C = C.cudaSetup(false); dim3 dimBlock; dim3 dimGrid; threadCalc(dimBlock, dimGrid, C.r, C.c); MatAddK<N><< <dimGrid, dimBlock >> > (d_A, d_B, d_C); cudaMemcpy(C.arr, d_C.arr, size, cudaMemcpyDeviceToHost); cudaFree(d_A.arr); cudaFree(d_B.arr); cudaFree(d_C.arr); return C; } int main() { struct Matrix<int> mat = { 10, 10 }; for (unsigned i = 0; i < mat.r; i++) { for (unsigned j = 0; j < mat.c; j++) { mat[i][j] = i * mat.r + j + 1; } } mat.print(); mat += mat; mat.print(); (randomMatrix(10,10)).print(); std::cout << '\n'; (randomMatrix(10, 10)).print(); std::cout << '\n'; transpose(mat).print(); std::cout << "\n\n"; struct Matrix<int> alpha = { 2, 2 , 4, 0, 5, 6, 7}; struct Matrix<int> beta = { 2, 2, 4, 1, 2, 3, 4 }; beta.print(); alpha.print(); std::cout << "\n\n"; kroneckerProduct(beta, alpha).print(); return 0; }
df668be5912dd0d8f993138737692333d6687a72.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2019 ETH Zurich, Simon Frasch * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the copyright holder nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include <algorithm> #include <cassert> #include "gpu_util/gpu_fft_api.hpp" #include "gpu_util/gpu_kernel_parameter.hpp" #include "gpu_util/gpu_runtime.hpp" #include "memory/gpu_array_const_view.hpp" #include "memory/gpu_array_view.hpp" namespace spfft { template <typename T> __global__ static void decompress_kernel( const GPUArrayConstView1D<int> indices, const T* input, GPUArrayView1D<typename gpu::fft::ComplexType<T>::type> output) { // const int stride = gridDim.x * blockDim.x; for (int idx = threadIdx.x + blockIdx.x * blockDim.x; idx < indices.size(); idx += gridDim.x * blockDim.x) { const int valueIdx = indices(idx); typename gpu::fft::ComplexType<T>::type value; value.x = input[2 * idx]; value.y = input[2 * idx + 1]; output(valueIdx) = value; } } auto decompress_gpu(const gpu::StreamType stream, const GPUArrayView1D<int>& indices, const double* input, GPUArrayView2D<typename gpu::fft::ComplexType<double>::type> output) -> void { assert(indices.size() <= output.size()); const dim3 threadBlock(gpu::BlockSizeMedium); const dim3 threadGrid(::min( static_cast<int>((indices.size() + threadBlock.x - 1) / threadBlock.x), gpu::GridSizeMedium)); // const dim3 threadGrid(indices.size() < 4 ? 1 : indices.size() / 4); launch_kernel(decompress_kernel<double>, threadGrid, threadBlock, 0, stream, GPUArrayConstView1D<int>(indices), input, GPUArrayView1D<typename gpu::fft::ComplexType<double>::type>( output.data(), output.size(), output.device_id())); } auto decompress_gpu(const gpu::StreamType stream, const GPUArrayView1D<int>& indices, const float* input, GPUArrayView2D<typename gpu::fft::ComplexType<float>::type> output) -> void { assert(indices.size() <= output.size()); const dim3 threadBlock(gpu::BlockSizeMedium); const dim3 threadGrid(::min( static_cast<int>((indices.size() + threadBlock.x - 1) / threadBlock.x), gpu::GridSizeMedium)); launch_kernel(decompress_kernel<float>, threadGrid, threadBlock, 0, stream, GPUArrayConstView1D<int>(indices), input, GPUArrayView1D<typename gpu::fft::ComplexType<float>::type>( output.data(), output.size(), output.device_id())); } template <typename T> __global__ static void compress_kernel( const GPUArrayConstView1D<int> indices, GPUArrayConstView1D<typename gpu::fft::ComplexType<T>::type> input, T* output) { for (int idx = threadIdx.x + blockIdx.x * blockDim.x; idx < indices.size(); idx += gridDim.x * blockDim.x) { const int valueIdx = indices(idx); const auto value = input(valueIdx); output[2 * idx] = value.x; output[2 * idx + 1] = value.y; } } template <typename T> __global__ static void compress_kernel_scaled( const GPUArrayConstView1D<int> indices, GPUArrayConstView1D<typename gpu::fft::ComplexType<T>::type> input, T* output, const T scalingFactor) { for (int idx = threadIdx.x + blockIdx.x * blockDim.x; idx < indices.size(); idx += gridDim.x * blockDim.x) { const int valueIdx = indices(idx); const auto value = input(valueIdx); output[2 * idx] = scalingFactor * value.x; output[2 * idx + 1] = scalingFactor * value.y; } } auto compress_gpu(const gpu::StreamType stream, const GPUArrayView1D<int>& indices, GPUArrayView2D<typename gpu::fft::ComplexType<double>::type> input, double* output, const bool useScaling, const double scalingFactor) -> void { const dim3 threadBlock(gpu::BlockSizeMedium); const dim3 threadGrid(::min( static_cast<int>((indices.size() + threadBlock.x - 1) / threadBlock.x), gpu::GridSizeMedium)); if (useScaling) { launch_kernel(compress_kernel_scaled<double>, threadGrid, threadBlock, 0, stream, GPUArrayConstView1D<int>(indices), GPUArrayConstView1D<typename gpu::fft::ComplexType<double>::type>( input.data(), input.size(), input.device_id()), output, scalingFactor); } else { launch_kernel(compress_kernel<double>, threadGrid, threadBlock, 0, stream, GPUArrayConstView1D<int>(indices), GPUArrayConstView1D<typename gpu::fft::ComplexType<double>::type>( input.data(), input.size(), input.device_id()), output); } } auto compress_gpu(const gpu::StreamType stream, const GPUArrayView1D<int>& indices, GPUArrayView2D<typename gpu::fft::ComplexType<float>::type> input, float* output, const bool useScaling, const float scalingFactor) -> void { const dim3 threadBlock(gpu::BlockSizeMedium); const dim3 threadGrid(::min( static_cast<int>((indices.size() + threadBlock.x - 1) / threadBlock.x), gpu::GridSizeMedium)); if (useScaling) { launch_kernel(compress_kernel_scaled<float>, threadGrid, threadBlock, 0, stream, GPUArrayConstView1D<int>(indices), GPUArrayConstView1D<typename gpu::fft::ComplexType<float>::type>( input.data(), input.size(), input.device_id()), output, scalingFactor); } else { launch_kernel(compress_kernel<float>, threadGrid, threadBlock, 0, stream, GPUArrayConstView1D<int>(indices), GPUArrayConstView1D<typename gpu::fft::ComplexType<float>::type>( input.data(), input.size(), input.device_id()), output); } } } // namespace spfft
df668be5912dd0d8f993138737692333d6687a72.cu
/* * Copyright (c) 2019 ETH Zurich, Simon Frasch * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the copyright holder nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include <algorithm> #include <cassert> #include "gpu_util/gpu_fft_api.hpp" #include "gpu_util/gpu_kernel_parameter.hpp" #include "gpu_util/gpu_runtime.hpp" #include "memory/gpu_array_const_view.hpp" #include "memory/gpu_array_view.hpp" namespace spfft { template <typename T> __global__ static void decompress_kernel( const GPUArrayConstView1D<int> indices, const T* input, GPUArrayView1D<typename gpu::fft::ComplexType<T>::type> output) { // const int stride = gridDim.x * blockDim.x; for (int idx = threadIdx.x + blockIdx.x * blockDim.x; idx < indices.size(); idx += gridDim.x * blockDim.x) { const int valueIdx = indices(idx); typename gpu::fft::ComplexType<T>::type value; value.x = input[2 * idx]; value.y = input[2 * idx + 1]; output(valueIdx) = value; } } auto decompress_gpu(const gpu::StreamType stream, const GPUArrayView1D<int>& indices, const double* input, GPUArrayView2D<typename gpu::fft::ComplexType<double>::type> output) -> void { assert(indices.size() <= output.size()); const dim3 threadBlock(gpu::BlockSizeMedium); const dim3 threadGrid(std::min( static_cast<int>((indices.size() + threadBlock.x - 1) / threadBlock.x), gpu::GridSizeMedium)); // const dim3 threadGrid(indices.size() < 4 ? 1 : indices.size() / 4); launch_kernel(decompress_kernel<double>, threadGrid, threadBlock, 0, stream, GPUArrayConstView1D<int>(indices), input, GPUArrayView1D<typename gpu::fft::ComplexType<double>::type>( output.data(), output.size(), output.device_id())); } auto decompress_gpu(const gpu::StreamType stream, const GPUArrayView1D<int>& indices, const float* input, GPUArrayView2D<typename gpu::fft::ComplexType<float>::type> output) -> void { assert(indices.size() <= output.size()); const dim3 threadBlock(gpu::BlockSizeMedium); const dim3 threadGrid(std::min( static_cast<int>((indices.size() + threadBlock.x - 1) / threadBlock.x), gpu::GridSizeMedium)); launch_kernel(decompress_kernel<float>, threadGrid, threadBlock, 0, stream, GPUArrayConstView1D<int>(indices), input, GPUArrayView1D<typename gpu::fft::ComplexType<float>::type>( output.data(), output.size(), output.device_id())); } template <typename T> __global__ static void compress_kernel( const GPUArrayConstView1D<int> indices, GPUArrayConstView1D<typename gpu::fft::ComplexType<T>::type> input, T* output) { for (int idx = threadIdx.x + blockIdx.x * blockDim.x; idx < indices.size(); idx += gridDim.x * blockDim.x) { const int valueIdx = indices(idx); const auto value = input(valueIdx); output[2 * idx] = value.x; output[2 * idx + 1] = value.y; } } template <typename T> __global__ static void compress_kernel_scaled( const GPUArrayConstView1D<int> indices, GPUArrayConstView1D<typename gpu::fft::ComplexType<T>::type> input, T* output, const T scalingFactor) { for (int idx = threadIdx.x + blockIdx.x * blockDim.x; idx < indices.size(); idx += gridDim.x * blockDim.x) { const int valueIdx = indices(idx); const auto value = input(valueIdx); output[2 * idx] = scalingFactor * value.x; output[2 * idx + 1] = scalingFactor * value.y; } } auto compress_gpu(const gpu::StreamType stream, const GPUArrayView1D<int>& indices, GPUArrayView2D<typename gpu::fft::ComplexType<double>::type> input, double* output, const bool useScaling, const double scalingFactor) -> void { const dim3 threadBlock(gpu::BlockSizeMedium); const dim3 threadGrid(std::min( static_cast<int>((indices.size() + threadBlock.x - 1) / threadBlock.x), gpu::GridSizeMedium)); if (useScaling) { launch_kernel(compress_kernel_scaled<double>, threadGrid, threadBlock, 0, stream, GPUArrayConstView1D<int>(indices), GPUArrayConstView1D<typename gpu::fft::ComplexType<double>::type>( input.data(), input.size(), input.device_id()), output, scalingFactor); } else { launch_kernel(compress_kernel<double>, threadGrid, threadBlock, 0, stream, GPUArrayConstView1D<int>(indices), GPUArrayConstView1D<typename gpu::fft::ComplexType<double>::type>( input.data(), input.size(), input.device_id()), output); } } auto compress_gpu(const gpu::StreamType stream, const GPUArrayView1D<int>& indices, GPUArrayView2D<typename gpu::fft::ComplexType<float>::type> input, float* output, const bool useScaling, const float scalingFactor) -> void { const dim3 threadBlock(gpu::BlockSizeMedium); const dim3 threadGrid(std::min( static_cast<int>((indices.size() + threadBlock.x - 1) / threadBlock.x), gpu::GridSizeMedium)); if (useScaling) { launch_kernel(compress_kernel_scaled<float>, threadGrid, threadBlock, 0, stream, GPUArrayConstView1D<int>(indices), GPUArrayConstView1D<typename gpu::fft::ComplexType<float>::type>( input.data(), input.size(), input.device_id()), output, scalingFactor); } else { launch_kernel(compress_kernel<float>, threadGrid, threadBlock, 0, stream, GPUArrayConstView1D<int>(indices), GPUArrayConstView1D<typename gpu::fft::ComplexType<float>::type>( input.data(), input.size(), input.device_id()), output); } } } // namespace spfft
9335dbe86b014a09af8f01fbd7ed93f51bef4e43.hip
// !!! This is a file automatically generated by hipify!!! #include "book.h" int main( void ) { hipDeviceProp_t prop; int count; HANDLE_ERROR( hipGetDeviceCount( &count ) ); for (int i=0; i< count; i++) { HANDLE_ERROR( hipGetDeviceProperties( &prop, i ) ); printf( " --- General Information for device %d ---\n", i ); printf( "Name: %s\n", prop.name ); printf( "Compute capability: %d.%d\n", prop.major, prop.minor ); printf( "Clock rate: %d\n", prop.clockRate ); printf( "Device copy overlap: " ); if (prop.deviceOverlap) printf( "Enabled\n" ); else printf( "Disabled\n"); printf( "Kernel execution timeout : " ); if (prop.kernelExecTimeoutEnabled) printf( "Enabled\n" ); else printf( "Disabled\n" ); printf( " --- Memory Information for device %d ---\n", i ); printf( "Total global mem: %ld\n", prop.totalGlobalMem ); printf( "Total constant Mem: %ld\n", prop.totalConstMem ); printf( "Max mem pitch: %ld\n", prop.memPitch ); printf( "Texture Alignment: %ld\n", prop.textureAlignment ); printf( " --- MP Information for device %d ---\n", i ); printf( "Multiprocessor count: %d\n", prop.multiProcessorCount ); printf( "Shared mem per mp: %ld\n", prop.sharedMemPerBlock ); printf( "Registers per mp: %d\n", prop.regsPerBlock ); printf( "Threads in warp: %d\n", prop.warpSize ); printf( "Max threads per block: %d\n", prop.maxThreadsPerBlock ); printf( "Max thread dimensions: (%d, %d, %d)\n", prop.maxThreadsDim[0], prop.maxThreadsDim[1], prop.maxThreadsDim[2] ); printf( "Max grid dimensions: (%d, %d, %d)\n", prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2] ); printf( "\n" ); } }
9335dbe86b014a09af8f01fbd7ed93f51bef4e43.cu
#include "book.h" int main( void ) { cudaDeviceProp prop; int count; HANDLE_ERROR( cudaGetDeviceCount( &count ) ); for (int i=0; i< count; i++) { HANDLE_ERROR( cudaGetDeviceProperties( &prop, i ) ); printf( " --- General Information for device %d ---\n", i ); printf( "Name: %s\n", prop.name ); printf( "Compute capability: %d.%d\n", prop.major, prop.minor ); printf( "Clock rate: %d\n", prop.clockRate ); printf( "Device copy overlap: " ); if (prop.deviceOverlap) printf( "Enabled\n" ); else printf( "Disabled\n"); printf( "Kernel execution timeout : " ); if (prop.kernelExecTimeoutEnabled) printf( "Enabled\n" ); else printf( "Disabled\n" ); printf( " --- Memory Information for device %d ---\n", i ); printf( "Total global mem: %ld\n", prop.totalGlobalMem ); printf( "Total constant Mem: %ld\n", prop.totalConstMem ); printf( "Max mem pitch: %ld\n", prop.memPitch ); printf( "Texture Alignment: %ld\n", prop.textureAlignment ); printf( " --- MP Information for device %d ---\n", i ); printf( "Multiprocessor count: %d\n", prop.multiProcessorCount ); printf( "Shared mem per mp: %ld\n", prop.sharedMemPerBlock ); printf( "Registers per mp: %d\n", prop.regsPerBlock ); printf( "Threads in warp: %d\n", prop.warpSize ); printf( "Max threads per block: %d\n", prop.maxThreadsPerBlock ); printf( "Max thread dimensions: (%d, %d, %d)\n", prop.maxThreadsDim[0], prop.maxThreadsDim[1], prop.maxThreadsDim[2] ); printf( "Max grid dimensions: (%d, %d, %d)\n", prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2] ); printf( "\n" ); } }
951d71cace370ca1e0d2e6557893c8c4e9dc6cfc.hip
// !!! This is a file automatically generated by hipify!!! #include "common_hip.cuh"
951d71cace370ca1e0d2e6557893c8c4e9dc6cfc.cu
#include "common.cuh"
54c6edbf115153e8d209970c622e256c32a8588b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #define TORCH_ASSERT_ONLY_METHOD_OPERATORS #include <ATen/native/TensorTransformations.h> #include <ATen/Dispatch.h> #include <ATen/hip/detail/IndexUtils.cuh> #include <ATen/hip/HIPApplyUtils.cuh> #include <ATen/hip/HIPContext.h> #include <c10/macros/Macros.h> #ifndef AT_PER_OPERATOR_HEADERS #include <ATen/Functions.h> #include <ATen/NativeFunctions.h> #else #include <ATen/ops/empty_like.h> #include <ATen/ops/roll_native.h> #endif #include <cstddef> #include <vector> namespace at { namespace native { template <typename scalar_t, typename IndexType> #if __CUDA_ARCH__ >= 350 || defined(USE_ROCM) C10_LAUNCH_BOUNDS_2(cuda::getApplyBlockSize(), cuda::getApplyBlocksPerSM()) #endif __global__ void kernel_pointwise_flip_apply2( const cuda::detail::TensorInfo<scalar_t, IndexType> in_tensor_info, cuda::detail::TensorInfo<scalar_t, IndexType> out_tensor_info, IndexType N, int flip_dim, IndexType total_dims) { for (IndexType linear_index = blockIdx.x * blockDim.x + threadIdx.x; linear_index < N; linear_index += gridDim.x * blockDim.x) { IndexType dst_offset = 0; if (flip_dim == 0) { // flip 1st dim dst_offset = (in_tensor_info.sizes[0] - 1 - linear_index / in_tensor_info.strides[0]) * in_tensor_info.strides[0] + linear_index % in_tensor_info.strides[0]; } else { // flip last dim IndexType i = total_dims - 1; dst_offset = linear_index / in_tensor_info.strides[0] * in_tensor_info.strides[0] + (in_tensor_info.sizes[i] - 1 - linear_index % in_tensor_info.strides[0]); } out_tensor_info.data[dst_offset] = in_tensor_info.data[linear_index]; } } template <typename scalar_t> C10_LAUNCH_BOUNDS_1(cuda::getApplyBlockSize()) __global__ void flip_cuda_kernel( scalar_t* in_tensor, scalar_t* out_tensor, int64_t N, int64_t* flip_dims, int64_t flip_dims_size, int64_t* strides, int64_t* strides_contiguous, int64_t* shape, int64_t total_dims) { int64_t linear_index = blockIdx.x * blockDim.x + threadIdx.x; if (linear_index >= N) { return; } int64_t cur_indices = linear_index, rem = 0, dst_offset = 0; for (int64_t i = 0; i < total_dims; i++) { int64_t temp = cur_indices; cur_indices = cur_indices / strides_contiguous[i]; rem = temp - cur_indices * strides_contiguous[i]; // flip the indices if it is in flip_dims for (int64_t j = 0; j < flip_dims_size; j++) { if (i == flip_dims[j]) { cur_indices = shape[i] - 1 - cur_indices; } } dst_offset += cur_indices * strides[i]; cur_indices = rem; } out_tensor[linear_index] = in_tensor[dst_offset]; } template <typename scalar_t> C10_LAUNCH_BOUNDS_1(cuda::getApplyBlockSize()) __global__ void roll_cuda_kernel( scalar_t* in_tensor, scalar_t* out_tensor, int64_t N, int64_t roll_dim, int64_t start, int64_t size, int64_t stride, int64_t total_dims) { int64_t linear_index = blockIdx.x * blockDim.x + threadIdx.x; if (linear_index >= N) { return; } // roll dim idx is the index of linear_index along the rolling dimension. int64_t roll_dim_idx = linear_index % (stride * size) / stride; // index into the source data to find appropriate value. int64_t source_idx = 0; if( roll_dim_idx >= (size - start) ) { source_idx = linear_index - ((size - start) * stride); } else { source_idx = linear_index + (start * stride); } out_tensor[linear_index] = in_tensor[source_idx]; } // Roll a tensor along a dimension Tensor roll_cuda(const Tensor& self, IntArrayRef shifts, IntArrayRef dims) { if (dims.size() != 1 || shifts.size() != 1) { return roll_common(self, shifts, dims); } auto in_tensor = self; if(!self.is_contiguous()) { in_tensor = self.contiguous(); } auto out_tensor = at::empty_like(in_tensor, LEGACY_CONTIGUOUS_MEMORY_FORMAT); if (out_tensor.numel() == 0) { return out_tensor; } const int64_t N = in_tensor.numel(); const int64_t dim = dims[0]; const int64_t size = in_tensor.size(dim); int64_t start = (size - shifts[0]) % size; // Behavior of % is different in C++ vs Python for negative numbers. This // corrects the difference. if( start < 0 ) start = start + size; dim3 dim_block = cuda::getApplyBlock(); dim3 dim_grid; TORCH_CHECK(cuda::getApplyGrid(N, dim_grid, in_tensor.get_device()), "unable to get dim grid"); auto total_dims = in_tensor.dim(); AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND4( at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16, at::ScalarType::ComplexHalf, in_tensor.scalar_type(), "roll_cuda", [&] { hipLaunchKernelGGL(( roll_cuda_kernel), dim3(dim_grid), dim3(dim_block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), in_tensor.data_ptr<scalar_t>(), out_tensor.data_ptr<scalar_t>(), N, dim, start, size, in_tensor.stride(dim), total_dims); C10_HIP_KERNEL_LAUNCH_CHECK(); }); return out_tensor; } }} // namespace at::native
54c6edbf115153e8d209970c622e256c32a8588b.cu
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS #include <ATen/native/TensorTransformations.h> #include <ATen/Dispatch.h> #include <ATen/cuda/detail/IndexUtils.cuh> #include <ATen/cuda/CUDAApplyUtils.cuh> #include <ATen/cuda/CUDAContext.h> #include <c10/macros/Macros.h> #ifndef AT_PER_OPERATOR_HEADERS #include <ATen/Functions.h> #include <ATen/NativeFunctions.h> #else #include <ATen/ops/empty_like.h> #include <ATen/ops/roll_native.h> #endif #include <cstddef> #include <vector> namespace at { namespace native { template <typename scalar_t, typename IndexType> #if __CUDA_ARCH__ >= 350 || defined(USE_ROCM) C10_LAUNCH_BOUNDS_2(cuda::getApplyBlockSize(), cuda::getApplyBlocksPerSM()) #endif __global__ void kernel_pointwise_flip_apply2( const cuda::detail::TensorInfo<scalar_t, IndexType> in_tensor_info, cuda::detail::TensorInfo<scalar_t, IndexType> out_tensor_info, IndexType N, int flip_dim, IndexType total_dims) { for (IndexType linear_index = blockIdx.x * blockDim.x + threadIdx.x; linear_index < N; linear_index += gridDim.x * blockDim.x) { IndexType dst_offset = 0; if (flip_dim == 0) { // flip 1st dim dst_offset = (in_tensor_info.sizes[0] - 1 - linear_index / in_tensor_info.strides[0]) * in_tensor_info.strides[0] + linear_index % in_tensor_info.strides[0]; } else { // flip last dim IndexType i = total_dims - 1; dst_offset = linear_index / in_tensor_info.strides[0] * in_tensor_info.strides[0] + (in_tensor_info.sizes[i] - 1 - linear_index % in_tensor_info.strides[0]); } out_tensor_info.data[dst_offset] = in_tensor_info.data[linear_index]; } } template <typename scalar_t> C10_LAUNCH_BOUNDS_1(cuda::getApplyBlockSize()) __global__ void flip_cuda_kernel( scalar_t* in_tensor, scalar_t* out_tensor, int64_t N, int64_t* flip_dims, int64_t flip_dims_size, int64_t* strides, int64_t* strides_contiguous, int64_t* shape, int64_t total_dims) { int64_t linear_index = blockIdx.x * blockDim.x + threadIdx.x; if (linear_index >= N) { return; } int64_t cur_indices = linear_index, rem = 0, dst_offset = 0; for (int64_t i = 0; i < total_dims; i++) { int64_t temp = cur_indices; cur_indices = cur_indices / strides_contiguous[i]; rem = temp - cur_indices * strides_contiguous[i]; // flip the indices if it is in flip_dims for (int64_t j = 0; j < flip_dims_size; j++) { if (i == flip_dims[j]) { cur_indices = shape[i] - 1 - cur_indices; } } dst_offset += cur_indices * strides[i]; cur_indices = rem; } out_tensor[linear_index] = in_tensor[dst_offset]; } template <typename scalar_t> C10_LAUNCH_BOUNDS_1(cuda::getApplyBlockSize()) __global__ void roll_cuda_kernel( scalar_t* in_tensor, scalar_t* out_tensor, int64_t N, int64_t roll_dim, int64_t start, int64_t size, int64_t stride, int64_t total_dims) { int64_t linear_index = blockIdx.x * blockDim.x + threadIdx.x; if (linear_index >= N) { return; } // roll dim idx is the index of linear_index along the rolling dimension. int64_t roll_dim_idx = linear_index % (stride * size) / stride; // index into the source data to find appropriate value. int64_t source_idx = 0; if( roll_dim_idx >= (size - start) ) { source_idx = linear_index - ((size - start) * stride); } else { source_idx = linear_index + (start * stride); } out_tensor[linear_index] = in_tensor[source_idx]; } // Roll a tensor along a dimension Tensor roll_cuda(const Tensor& self, IntArrayRef shifts, IntArrayRef dims) { if (dims.size() != 1 || shifts.size() != 1) { return roll_common(self, shifts, dims); } auto in_tensor = self; if(!self.is_contiguous()) { in_tensor = self.contiguous(); } auto out_tensor = at::empty_like(in_tensor, LEGACY_CONTIGUOUS_MEMORY_FORMAT); if (out_tensor.numel() == 0) { return out_tensor; } const int64_t N = in_tensor.numel(); const int64_t dim = dims[0]; const int64_t size = in_tensor.size(dim); int64_t start = (size - shifts[0]) % size; // Behavior of % is different in C++ vs Python for negative numbers. This // corrects the difference. if( start < 0 ) start = start + size; dim3 dim_block = cuda::getApplyBlock(); dim3 dim_grid; TORCH_CHECK(cuda::getApplyGrid(N, dim_grid, in_tensor.get_device()), "unable to get dim grid"); auto total_dims = in_tensor.dim(); AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND4( at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16, at::ScalarType::ComplexHalf, in_tensor.scalar_type(), "roll_cuda", [&] { roll_cuda_kernel<<<dim_grid, dim_block, 0, at::cuda::getCurrentCUDAStream()>>>( in_tensor.data_ptr<scalar_t>(), out_tensor.data_ptr<scalar_t>(), N, dim, start, size, in_tensor.stride(dim), total_dims); C10_CUDA_KERNEL_LAUNCH_CHECK(); }); return out_tensor; } }} // namespace at::native
7b51a9e32f4eb8fb961070cccca4ba3153b22aeb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/phi/kernels/histogram_kernel.h" #include "paddle/fluid/platform/device/gpu/gpu_launch_config.h" #include "paddle/fluid/platform/device/gpu/gpu_primitives.h" #include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/funcs/eigen/common.h" #include "paddle/phi/kernels/funcs/eigen/eigen_function.h" #include "paddle/phi/kernels/funcs/math_function.h" namespace phi { using IndexType = int64_t; using paddle::platform::PADDLE_CUDA_NUM_THREADS; inline int GET_BLOCKS(const int N) { return (N + PADDLE_CUDA_NUM_THREADS - 1) / PADDLE_CUDA_NUM_THREADS; } template <typename T, typename IndexType> __device__ static IndexType GetBin(T input_value, T min_value, T max_value, int64_t nbins) { IndexType bin = static_cast<int>((input_value - min_value) * nbins / (max_value - min_value)); IndexType output_index = bin < nbins - 1 ? bin : nbins - 1; return output_index; } template <typename T, typename IndexType> __global__ void KernelHistogram(const T* input, const int total_elements, const int64_t nbins, const T min_value, const T max_value, int64_t* output) { extern __shared__ int64_t buf_hist[]; for (int i = threadIdx.x; i < nbins; i += blockDim.x) { buf_hist[i] = 0; } __syncthreads(); CUDA_KERNEL_LOOP(input_index, total_elements) { // const IndexType input_index = threadIdx.x + blockIdx.x * blockDim.x; const auto input_value = input[input_index]; if (input_value >= min_value && input_value <= max_value) { const IndexType output_index = GetBin<T, IndexType>(input_value, min_value, max_value, nbins); paddle::platform::CudaAtomicAdd(&buf_hist[output_index], 1); } } __syncthreads(); for (int i = threadIdx.x; i < nbins; i += blockDim.x) { paddle::platform::CudaAtomicAdd(&output[i], buf_hist[i]); } } template <typename T, typename Context> void HistogramKernel(const Context& dev_ctx, const DenseTensor& input, int64_t bins, int min, int max, DenseTensor* output) { auto& nbins = bins; auto& minval = min; auto& maxval = max; const T* input_data = input.data<T>(); const int input_numel = input.numel(); int64_t* out_data = output->mutable_data<int64_t>(dev_ctx.GetPlace()); phi::funcs::SetConstant<Context, int64_t>()( dev_ctx, output, static_cast<int64_t>(0)); if (input_data == nullptr) return; T output_min = static_cast<T>(minval); T output_max = static_cast<T>(maxval); if (output_min == output_max) { auto input_x = phi::EigenVector<T>::Flatten(input); DenseTensor input_min_t, input_max_t; auto* input_min_data = input_min_t.mutable_data<T>({1}, dev_ctx.GetPlace()); auto* input_max_data = input_max_t.mutable_data<T>({1}, dev_ctx.GetPlace()); auto input_min_scala = phi::EigenScalar<T>::From(input_min_t); auto input_max_scala = phi::EigenScalar<T>::From(input_max_t); auto* place = dev_ctx.eigen_device(); input_min_scala.device(*place) = input_x.minimum(); input_max_scala.device(*place) = input_x.maximum(); DenseTensor input_min_cpu, input_max_cpu; paddle::framework::TensorCopySync( input_min_t, phi::CPUPlace(), &input_min_cpu); paddle::framework::TensorCopySync( input_max_t, phi::CPUPlace(), &input_max_cpu); output_min = input_min_cpu.data<T>()[0]; output_max = input_max_cpu.data<T>()[0]; } if (output_min == output_max) { output_min = output_min - 1; output_max = output_max + 1; } PADDLE_ENFORCE_EQ((std::isinf(static_cast<float>(output_min)) || std::isnan(static_cast<float>(output_max)) || std::isinf(static_cast<float>(output_min)) || std::isnan(static_cast<float>(output_max))), false, phi::errors::OutOfRange("range of min, max is not finite")); PADDLE_ENFORCE_GE( output_max, output_min, phi::errors::InvalidArgument( "max must be larger or equal to min. If min and max are both zero, " "the minimum and maximum values of the data are used. " "But received max is %d, min is %d", maxval, minval)); auto stream = dev_ctx.stream(); hipLaunchKernelGGL(( KernelHistogram<T, IndexType>), dim3(GET_BLOCKS(input_numel)), dim3(PADDLE_CUDA_NUM_THREADS), nbins * sizeof(int64_t), stream, input_data, input_numel, nbins, output_min, output_max, out_data); } } // namespace phi PD_REGISTER_KERNEL(histogram, GPU, ALL_LAYOUT, phi::HistogramKernel, float, double, int, int64_t) {}
7b51a9e32f4eb8fb961070cccca4ba3153b22aeb.cu
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/phi/kernels/histogram_kernel.h" #include "paddle/fluid/platform/device/gpu/gpu_launch_config.h" #include "paddle/fluid/platform/device/gpu/gpu_primitives.h" #include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/funcs/eigen/common.h" #include "paddle/phi/kernels/funcs/eigen/eigen_function.h" #include "paddle/phi/kernels/funcs/math_function.h" namespace phi { using IndexType = int64_t; using paddle::platform::PADDLE_CUDA_NUM_THREADS; inline int GET_BLOCKS(const int N) { return (N + PADDLE_CUDA_NUM_THREADS - 1) / PADDLE_CUDA_NUM_THREADS; } template <typename T, typename IndexType> __device__ static IndexType GetBin(T input_value, T min_value, T max_value, int64_t nbins) { IndexType bin = static_cast<int>((input_value - min_value) * nbins / (max_value - min_value)); IndexType output_index = bin < nbins - 1 ? bin : nbins - 1; return output_index; } template <typename T, typename IndexType> __global__ void KernelHistogram(const T* input, const int total_elements, const int64_t nbins, const T min_value, const T max_value, int64_t* output) { extern __shared__ int64_t buf_hist[]; for (int i = threadIdx.x; i < nbins; i += blockDim.x) { buf_hist[i] = 0; } __syncthreads(); CUDA_KERNEL_LOOP(input_index, total_elements) { // const IndexType input_index = threadIdx.x + blockIdx.x * blockDim.x; const auto input_value = input[input_index]; if (input_value >= min_value && input_value <= max_value) { const IndexType output_index = GetBin<T, IndexType>(input_value, min_value, max_value, nbins); paddle::platform::CudaAtomicAdd(&buf_hist[output_index], 1); } } __syncthreads(); for (int i = threadIdx.x; i < nbins; i += blockDim.x) { paddle::platform::CudaAtomicAdd(&output[i], buf_hist[i]); } } template <typename T, typename Context> void HistogramKernel(const Context& dev_ctx, const DenseTensor& input, int64_t bins, int min, int max, DenseTensor* output) { auto& nbins = bins; auto& minval = min; auto& maxval = max; const T* input_data = input.data<T>(); const int input_numel = input.numel(); int64_t* out_data = output->mutable_data<int64_t>(dev_ctx.GetPlace()); phi::funcs::SetConstant<Context, int64_t>()( dev_ctx, output, static_cast<int64_t>(0)); if (input_data == nullptr) return; T output_min = static_cast<T>(minval); T output_max = static_cast<T>(maxval); if (output_min == output_max) { auto input_x = phi::EigenVector<T>::Flatten(input); DenseTensor input_min_t, input_max_t; auto* input_min_data = input_min_t.mutable_data<T>({1}, dev_ctx.GetPlace()); auto* input_max_data = input_max_t.mutable_data<T>({1}, dev_ctx.GetPlace()); auto input_min_scala = phi::EigenScalar<T>::From(input_min_t); auto input_max_scala = phi::EigenScalar<T>::From(input_max_t); auto* place = dev_ctx.eigen_device(); input_min_scala.device(*place) = input_x.minimum(); input_max_scala.device(*place) = input_x.maximum(); DenseTensor input_min_cpu, input_max_cpu; paddle::framework::TensorCopySync( input_min_t, phi::CPUPlace(), &input_min_cpu); paddle::framework::TensorCopySync( input_max_t, phi::CPUPlace(), &input_max_cpu); output_min = input_min_cpu.data<T>()[0]; output_max = input_max_cpu.data<T>()[0]; } if (output_min == output_max) { output_min = output_min - 1; output_max = output_max + 1; } PADDLE_ENFORCE_EQ((std::isinf(static_cast<float>(output_min)) || std::isnan(static_cast<float>(output_max)) || std::isinf(static_cast<float>(output_min)) || std::isnan(static_cast<float>(output_max))), false, phi::errors::OutOfRange("range of min, max is not finite")); PADDLE_ENFORCE_GE( output_max, output_min, phi::errors::InvalidArgument( "max must be larger or equal to min. If min and max are both zero, " "the minimum and maximum values of the data are used. " "But received max is %d, min is %d", maxval, minval)); auto stream = dev_ctx.stream(); KernelHistogram<T, IndexType><<<GET_BLOCKS(input_numel), PADDLE_CUDA_NUM_THREADS, nbins * sizeof(int64_t), stream>>>( input_data, input_numel, nbins, output_min, output_max, out_data); } } // namespace phi PD_REGISTER_KERNEL(histogram, GPU, ALL_LAYOUT, phi::HistogramKernel, float, double, int, int64_t) {}
a7ac1b203c00f566678996bf1d68977795daf655.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<stdio.h> #include<assert.h> #define N 4 #define THREADSPERBLOCK 4 __global__ void matmult( double * d_A, double * d_B, double * d_C ){ __shared__ double Arow[N]; int idx = threadIdx.x + blockIdx.x * blockDim.x; if(idx<N*N){ Arow[threadIdx.x]=d_A[idx]; } __syncthreads(); for(int k = 0; k < N; k++ ) { d_C[idx] += Arow[k] *d_B[k * N + threadIdx.x]; } } void PRINT_MAT(int P, int M, double * matr){ for(int j = 0; j < P; j++ ){ for(int i = 0; i < M; i++ ){ printf("%f ",matr[i+j*M]); } printf("\n"); } } int main(){ double * h_A , * h_B, * h_C; double * d_A , * d_B, * d_C; size_t matsize = N * N * sizeof(double); //long integer h_A = (double *) malloc( matsize ); h_B = (double *) malloc( matsize ); h_C = (double *) malloc( matsize ); hipMalloc((void**) &d_A, matsize ); hipMalloc((void**) &d_B, matsize ); hipMalloc((void**) &d_C, matsize ); for(int i=0;i<N*N;i++){ h_A[i]=( rand() % 100 + 1 ); //(double )i; h_B[i]=( rand() % 100 + 1 ); //(double )i; h_C[i]=0.; } printf("matrice A:\n"); PRINT_MAT(N,N,h_A); printf("matrice B:\n"); PRINT_MAT(N,N,h_B); hipMemcpy( d_A, h_A, matsize, hipMemcpyHostToDevice ); hipMemcpy( d_B, h_B, matsize, hipMemcpyHostToDevice ); hipMemcpy( d_C, h_C, matsize, hipMemcpyHostToDevice ); dim3 blockDim(THREADSPERBLOCK, THREADSPERBLOCK); dim3 gridDim((N*N)/THREADSPERBLOCK, (N*N)/THREADSPERBLOCK); hipLaunchKernelGGL(( matmult), dim3(gridDim), dim3(blockDim) , 0, 0, d_A, d_B, d_C); hipMemcpy( h_C, d_C, matsize, hipMemcpyDeviceToHost ); printf("matrice C=A*B:\n"); PRINT_MAT(N,N,h_C); free(h_A); free(h_B); free(h_C); hipFree(d_A); hipFree(d_B); hipFree(d_C); return 0; }
a7ac1b203c00f566678996bf1d68977795daf655.cu
#include<stdio.h> #include<assert.h> #define N 4 #define THREADSPERBLOCK 4 __global__ void matmult( double * d_A, double * d_B, double * d_C ){ __shared__ double Arow[N]; int idx = threadIdx.x + blockIdx.x * blockDim.x; if(idx<N*N){ Arow[threadIdx.x]=d_A[idx]; } __syncthreads(); for(int k = 0; k < N; k++ ) { d_C[idx] += Arow[k] *d_B[k * N + threadIdx.x]; } } void PRINT_MAT(int P, int M, double * matr){ for(int j = 0; j < P; j++ ){ for(int i = 0; i < M; i++ ){ printf("%f ",matr[i+j*M]); } printf("\n"); } } int main(){ double * h_A , * h_B, * h_C; double * d_A , * d_B, * d_C; size_t matsize = N * N * sizeof(double); //long integer h_A = (double *) malloc( matsize ); h_B = (double *) malloc( matsize ); h_C = (double *) malloc( matsize ); cudaMalloc((void**) &d_A, matsize ); cudaMalloc((void**) &d_B, matsize ); cudaMalloc((void**) &d_C, matsize ); for(int i=0;i<N*N;i++){ h_A[i]=( rand() % 100 + 1 ); //(double )i; h_B[i]=( rand() % 100 + 1 ); //(double )i; h_C[i]=0.; } printf("matrice A:\n"); PRINT_MAT(N,N,h_A); printf("matrice B:\n"); PRINT_MAT(N,N,h_B); cudaMemcpy( d_A, h_A, matsize, cudaMemcpyHostToDevice ); cudaMemcpy( d_B, h_B, matsize, cudaMemcpyHostToDevice ); cudaMemcpy( d_C, h_C, matsize, cudaMemcpyHostToDevice ); dim3 blockDim(THREADSPERBLOCK, THREADSPERBLOCK); dim3 gridDim((N*N)/THREADSPERBLOCK, (N*N)/THREADSPERBLOCK); matmult<<< gridDim, blockDim >>>( d_A, d_B, d_C); cudaMemcpy( h_C, d_C, matsize, cudaMemcpyDeviceToHost ); printf("matrice C=A*B:\n"); PRINT_MAT(N,N,h_C); free(h_A); free(h_B); free(h_C); cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); return 0; }
95f8b87f9f3fc60b675ff381c9beb8338e140a49.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <math.h> #include <time.h> #include <sys/time.h> #include <GL/glut.h> #define W 800 #define H 600 int max_steps = 10; double left = -2.5; double right = 1.5; double top = 1.5; double bottom = -1.5; double get_time() { double t ; // struct timeval* ptr = (struct timeval*)malloc( sizeof(struct timeval) ) ; // gettimeofday( ptr , NULL ) ; // second argument is time zone... NULL // t = ptr->tv_sec * 1000000.0 + ptr->tv_usec ; // free( ptr ) ; // return t / 1000000.0 ; } typedef struct { double real; double im; } ImNum; __device__ ImNum next_num(ImNum z, double a, double b){ ImNum z_i = {pow(z.real, 2) - pow(z.im, 2) + a, 2*z.real*z.im + b}; return z_i; } __global__ void run_pixel(double a, double hscale, double bottom, int max_steps, float *red, float *green, float *blue){ int y = threadIdx.x; ImNum curr_z = {0, 0}; double b = bottom + (y * hscale); int steps = 0; for(int i = 0; i < max_steps; i++){ curr_z = next_num(curr_z, a, b); if(hypot(curr_z.real, curr_z.im) > 2){ break; } steps++; } if(steps != max_steps){ red[y] = (steps*1.0/max_steps); green[y] = 1-(steps*1.0/max_steps); blue[y] = (steps*1.0/max_steps); } } void displayfunc() { double hscale = fabs(top-bottom)/H; double wscale = fabs(right-left)/W; glClear(GL_COLOR_BUFFER_BIT); double start = get_time(); for(int x = 0; x < W; x++){ double a = left + (x * wscale); float *red, *green, *blue; float *red_device, *green_device, *blue_device; red = (float*)malloc(H*sizeof(float)); blue = (float*)malloc(H*sizeof(float)); green = (float*)malloc(H*sizeof(float)); hipMalloc((void**)&red_device, sizeof(float)*H); hipMalloc((void**)&blue_device, sizeof(float)*H); hipMalloc((void**)&green_device, sizeof(float)*H); for(int i = 0; i < H; i++){ red[i] = 1; blue[i] = 1; green[i] = 1; } hipMemcpy(red_device, red, sizeof(float)*H, hipMemcpyHostToDevice); hipMemcpy(blue_device, blue, sizeof(float)*H, hipMemcpyHostToDevice); hipMemcpy(green_device, green, sizeof(float)*H, hipMemcpyHostToDevice); dim3 dimGrid(1), dimBlock(H); hipLaunchKernelGGL(( run_pixel), dim3(dimGrid),dim3(dimBlock), 0, 0, a, hscale, bottom, max_steps, red_device, green_device, blue_device); hipMemcpy(red, red_device, sizeof(float)*H, hipMemcpyDeviceToHost); hipMemcpy(blue, blue_device, sizeof(float)*H, hipMemcpyDeviceToHost); hipMemcpy(green, green_device, sizeof(float)*H, hipMemcpyDeviceToHost); for(int y = 0; y < H; y++){ glColor3f(red[y], green[y], blue[y]); glBegin(GL_POINTS); glVertex2f(x,y); glEnd(); } free(red); free(blue); free(green); hipFree(red_device); hipFree(blue_device); hipFree(green_device); } glutSwapBuffers(); double end = get_time(); printf("dx: %f\ndy: %f\n", fabs(right-left), fabs(top-bottom)); printf("max steps: %d\n", max_steps); printf("time: %f\n", end - start); printf("***********************************\n"); } void reshapefunc(int wscr,int hscr) { glViewport(0,0,(GLsizei)W,(GLsizei)H); glMatrixMode(GL_PROJECTION); glLoadIdentity(); gluOrtho2D(0.0,1.0*W,0.0,1.0*H); // always a square glMatrixMode(GL_MODELVIEW); } void mousefunc(int button,int state,int xscr,int yscr) { if(button == 3 && state == GLUT_DOWN){ max_steps *= 2; } if(button == 4 && state == GLUT_DOWN){ max_steps /= 2; } if(button == GLUT_LEFT_BUTTON && state == GLUT_DOWN){ double hscale = fabs(top-bottom)/H; double wscale = fabs(right-left)/W; double x_coord = left + (xscr * wscale); double y_coord = bottom + ((H-yscr) * hscale); double temp_left = x_coord - (fabs(right-left)/4); double temp_right = x_coord + (fabs(right-left)/4); left = temp_left; right = temp_right; double temp_bottom = y_coord - fabs(top-bottom)/4; double temp_top = y_coord + fabs(top-bottom)/4; bottom = temp_bottom; top = temp_top; } if(button == GLUT_RIGHT_BUTTON && state == GLUT_DOWN){ double hscale = fabs(top-bottom)/H; double wscale = fabs(right-left)/W; double x_coord = left + (xscr * wscale); double y_coord = bottom + ((H-yscr) * hscale); double temp_left = x_coord - (fabs(right-left)); double temp_right = x_coord + (fabs(right-left)); left = temp_left; right = temp_right; double temp_bottom = y_coord - fabs(top-bottom); double temp_top = y_coord + fabs(top-bottom); bottom = temp_bottom; top = temp_top; } glutPostRedisplay(); } void keyfunc(unsigned char key,int xscr,int yscr) { double hscale = fabs(top-bottom)/H; double wscale = fabs(right-left)/W; if(key == 'a'){ left -= (W/10)*wscale; right -= (W/10)*wscale; } if(key == 'd'){ left += (W/10)*wscale; right += (W/10)*wscale; } if(key == 'w'){ bottom += (H/10)*hscale; top += (H/10)*hscale; } if(key == 's'){ bottom -= (H/10)*hscale; top -= (H/10)*hscale; } glutPostRedisplay(); } int main(int argc,char* argv[]) { glutInit(&argc,argv); glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGB); glutInitWindowSize(W, H); glutInitWindowPosition(100,50); glutCreateWindow("Fractals Lab"); glClearColor(1.0,1.0,1.0,0.0); glShadeModel(GL_SMOOTH); glutDisplayFunc(displayfunc); glutReshapeFunc(reshapefunc); glutMouseFunc(mousefunc); glutKeyboardFunc(keyfunc); glutMainLoop(); return 0; }
95f8b87f9f3fc60b675ff381c9beb8338e140a49.cu
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <time.h> #include <sys/time.h> #include <GL/glut.h> #define W 800 #define H 600 int max_steps = 10; double left = -2.5; double right = 1.5; double top = 1.5; double bottom = -1.5; double get_time() { double t ; // struct timeval* ptr = (struct timeval*)malloc( sizeof(struct timeval) ) ; // gettimeofday( ptr , NULL ) ; // second argument is time zone... NULL // t = ptr->tv_sec * 1000000.0 + ptr->tv_usec ; // free( ptr ) ; // return t / 1000000.0 ; } typedef struct { double real; double im; } ImNum; __device__ ImNum next_num(ImNum z, double a, double b){ ImNum z_i = {pow(z.real, 2) - pow(z.im, 2) + a, 2*z.real*z.im + b}; return z_i; } __global__ void run_pixel(double a, double hscale, double bottom, int max_steps, float *red, float *green, float *blue){ int y = threadIdx.x; ImNum curr_z = {0, 0}; double b = bottom + (y * hscale); int steps = 0; for(int i = 0; i < max_steps; i++){ curr_z = next_num(curr_z, a, b); if(hypot(curr_z.real, curr_z.im) > 2){ break; } steps++; } if(steps != max_steps){ red[y] = (steps*1.0/max_steps); green[y] = 1-(steps*1.0/max_steps); blue[y] = (steps*1.0/max_steps); } } void displayfunc() { double hscale = fabs(top-bottom)/H; double wscale = fabs(right-left)/W; glClear(GL_COLOR_BUFFER_BIT); double start = get_time(); for(int x = 0; x < W; x++){ double a = left + (x * wscale); float *red, *green, *blue; float *red_device, *green_device, *blue_device; red = (float*)malloc(H*sizeof(float)); blue = (float*)malloc(H*sizeof(float)); green = (float*)malloc(H*sizeof(float)); cudaMalloc((void**)&red_device, sizeof(float)*H); cudaMalloc((void**)&blue_device, sizeof(float)*H); cudaMalloc((void**)&green_device, sizeof(float)*H); for(int i = 0; i < H; i++){ red[i] = 1; blue[i] = 1; green[i] = 1; } cudaMemcpy(red_device, red, sizeof(float)*H, cudaMemcpyHostToDevice); cudaMemcpy(blue_device, blue, sizeof(float)*H, cudaMemcpyHostToDevice); cudaMemcpy(green_device, green, sizeof(float)*H, cudaMemcpyHostToDevice); dim3 dimGrid(1), dimBlock(H); run_pixel<<<dimGrid,dimBlock>>>(a, hscale, bottom, max_steps, red_device, green_device, blue_device); cudaMemcpy(red, red_device, sizeof(float)*H, cudaMemcpyDeviceToHost); cudaMemcpy(blue, blue_device, sizeof(float)*H, cudaMemcpyDeviceToHost); cudaMemcpy(green, green_device, sizeof(float)*H, cudaMemcpyDeviceToHost); for(int y = 0; y < H; y++){ glColor3f(red[y], green[y], blue[y]); glBegin(GL_POINTS); glVertex2f(x,y); glEnd(); } free(red); free(blue); free(green); cudaFree(red_device); cudaFree(blue_device); cudaFree(green_device); } glutSwapBuffers(); double end = get_time(); printf("dx: %f\ndy: %f\n", fabs(right-left), fabs(top-bottom)); printf("max steps: %d\n", max_steps); printf("time: %f\n", end - start); printf("***********************************\n"); } void reshapefunc(int wscr,int hscr) { glViewport(0,0,(GLsizei)W,(GLsizei)H); glMatrixMode(GL_PROJECTION); glLoadIdentity(); gluOrtho2D(0.0,1.0*W,0.0,1.0*H); // always a square glMatrixMode(GL_MODELVIEW); } void mousefunc(int button,int state,int xscr,int yscr) { if(button == 3 && state == GLUT_DOWN){ max_steps *= 2; } if(button == 4 && state == GLUT_DOWN){ max_steps /= 2; } if(button == GLUT_LEFT_BUTTON && state == GLUT_DOWN){ double hscale = fabs(top-bottom)/H; double wscale = fabs(right-left)/W; double x_coord = left + (xscr * wscale); double y_coord = bottom + ((H-yscr) * hscale); double temp_left = x_coord - (fabs(right-left)/4); double temp_right = x_coord + (fabs(right-left)/4); left = temp_left; right = temp_right; double temp_bottom = y_coord - fabs(top-bottom)/4; double temp_top = y_coord + fabs(top-bottom)/4; bottom = temp_bottom; top = temp_top; } if(button == GLUT_RIGHT_BUTTON && state == GLUT_DOWN){ double hscale = fabs(top-bottom)/H; double wscale = fabs(right-left)/W; double x_coord = left + (xscr * wscale); double y_coord = bottom + ((H-yscr) * hscale); double temp_left = x_coord - (fabs(right-left)); double temp_right = x_coord + (fabs(right-left)); left = temp_left; right = temp_right; double temp_bottom = y_coord - fabs(top-bottom); double temp_top = y_coord + fabs(top-bottom); bottom = temp_bottom; top = temp_top; } glutPostRedisplay(); } void keyfunc(unsigned char key,int xscr,int yscr) { double hscale = fabs(top-bottom)/H; double wscale = fabs(right-left)/W; if(key == 'a'){ left -= (W/10)*wscale; right -= (W/10)*wscale; } if(key == 'd'){ left += (W/10)*wscale; right += (W/10)*wscale; } if(key == 'w'){ bottom += (H/10)*hscale; top += (H/10)*hscale; } if(key == 's'){ bottom -= (H/10)*hscale; top -= (H/10)*hscale; } glutPostRedisplay(); } int main(int argc,char* argv[]) { glutInit(&argc,argv); glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGB); glutInitWindowSize(W, H); glutInitWindowPosition(100,50); glutCreateWindow("Fractals Lab"); glClearColor(1.0,1.0,1.0,0.0); glShadeModel(GL_SMOOTH); glutDisplayFunc(displayfunc); glutReshapeFunc(reshapefunc); glutMouseFunc(mousefunc); glutKeyboardFunc(keyfunc); glutMainLoop(); return 0; }
c2e9f2a04da2a740cb8afc17f2c7713d56fa0302.hip
// !!! This is a file automatically generated by hipify!!! //data-racer #include <hip/hip_runtime.h> #include <stdio.h> #define SIZE 2 #define TILES 4 #define LENGTH (TILES * SIZE) #define N 2 __global__ void matrix_transpose(float* A) { __shared__ float tile [SIZE][SIZE]; int x = threadIdx.x; int y = threadIdx.y; int tile_x = blockIdx.x; int tile_y = blockIdx.y; tile[x][y] = A[((x + (tile_x * SIZE)) * LENGTH) + (y + (tile_y * SIZE))]; tile[x][y] = tile[y][x]; __syncthreads(); A[((x + (tile_y * SIZE)) * LENGTH) + (y + (tile_x * SIZE))] = tile[x][y]; }
c2e9f2a04da2a740cb8afc17f2c7713d56fa0302.cu
//data-racer #include <cuda.h> #include <stdio.h> #define SIZE 2 #define TILES 4 #define LENGTH (TILES * SIZE) #define N 2 __global__ void matrix_transpose(float* A) { __shared__ float tile [SIZE][SIZE]; int x = threadIdx.x; int y = threadIdx.y; int tile_x = blockIdx.x; int tile_y = blockIdx.y; tile[x][y] = A[((x + (tile_x * SIZE)) * LENGTH) + (y + (tile_y * SIZE))]; tile[x][y] = tile[y][x]; __syncthreads(); A[((x + (tile_y * SIZE)) * LENGTH) + (y + (tile_x * SIZE))] = tile[x][y]; }
31d521ff05f930d69c481ac87bf833f6adf6c0e8.hip
// !!! This is a file automatically generated by hipify!!! #include <complex> #include <iostream> #include <vector> using namespace std; #include <stdlib.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <math.h> #include <hip/hip_complex.h> #include <stdio.h> #include "longrange.h" #include "../core_cuda/spinsystem.hpp" #define JM_FORWARD 1 #define JM_BACKWARD 0 #define REAL float //#define CUCOMPLEX hipDoubleComplex #define CUCOMPLEX cuFloatComplex //#define MAKECOMPLEX(a,b) make_cuDoubleComplex(a,b) #define MAKECOMPLEX(a,b) make_cuFloatComplex(a,b) // #define SMART_SCHEDULE #ifdef SMART_SCHEDULE #define IDX_PATT(a, b) \ #else #define IDX_PATT(a, b) \ const int a = blockDim.x * blockIdx.x + threadIdx.x; \ const int b = blockDim.y * blockIdx.y + threadIdx.y; #endif #if 1 #define BOUND_CHECKS 1 #define KCHECK \ { \ const hipError_t i = hipGetLastError();\ if(i) \ {\ printf("(%s:%i) %s\n", __FILE__, __LINE__-1, hipGetErrorString(i));\ exit(-1);\ }\ } #define CHECKCALL(expression) \ { \ const hipError_t err = (expression); \ if(err != hipSuccess) \ printf("(%s:%i) (%i)%s\n", __FILE__, __LINE__, err, hipGetErrorString(err)); \ /* printf("(%s:%i) %s => %i\n", __FILE__, __LINE__, #expression, err); */ \ } #else #define KCHECK ; #define CHECKERR(e) ; #endif typedef struct fft_plan_phase { int depth; int num_plan; int dest_count; int* h_src; int* h_dest; float* h_W_angles; int* d_src; int* d_dest; float* d_W_angles; }fft_plan_phase; //mixed radix plan typedef struct fft_plan { fft_plan_phase* phase; int num_phases; int n; int refcount; } fft_plan; static fft_plan** fft_plans = 0; static int num_fft_plans; static int size_fft_plans; __global__ void getRPart32(const int N_x, const int N_y, REAL* d_dest, CUCOMPLEX* d_src) { IDX_PATT(i, y); if(i >= N_x || y >= N_y) return; const int idx = i + y * N_x; d_dest[idx] = d_src[idx].x; } __global__ void getIPart32(const int N_x, const int N_y, REAL* d_dest, CUCOMPLEX* d_src) { IDX_PATT(i, y); if(i >= N_x || y >= N_y) return; const int idx = i + y * N_x; d_dest[idx] = d_src[idx].y; } __global__ void scaleC32(const int N_x, const int N_y, CUCOMPLEX* d, CUCOMPLEX* s, float v) { IDX_PATT(x, y); if(x >= N_x || y >= N_y) return; const int idx = x + y * N_x; // d[idx] = cuCmulf(MAKECOMPLEX(v,0), s[idx]); d[idx].x = v * s[idx].x; d[idx].y = v * s[idx].y; } static void d_scaleC32(const int nx, const int ny, CUCOMPLEX* d_dest, CUCOMPLEX* d_src, float scale) { #ifdef SMART_SCHEDULE //different thread schedules for different access patterns dim3 blocks(nx); dim3 threads(ny); #else const int _blocksx = nx / 32 + 1; const int _blocksy = ny / 32 + 1; dim3 blocks(_blocksx, _blocksy); dim3 threads(32,32); #endif hipLaunchKernelGGL(( scaleC32), dim3(blocks), dim3(threads), 0, 0, nx, ny, d_dest, d_src, scale); KCHECK; } __global__ void setC32(const int N_x, const int N_y, CUCOMPLEX* v, float R, float I) { IDX_PATT(x, y); if(x >= N_x || y >= N_y) return; const int idx = x + y * N_x; v[idx].x = R; v[idx].y = I; } typedef struct JM_LONGRANGE_PLAN { int N_x, N_y, N_z; fft_plan* plan_x; fft_plan* plan_y; REAL* d_output; CUCOMPLEX* h_temp; // 2D arrays, 1st dimmension is for layer CUCOMPLEX** d_sx_q; CUCOMPLEX** d_sy_q; CUCOMPLEX** d_sz_q; CUCOMPLEX* d_hA_q; // 2D arrays, 1st dimmension is for interlayer offset CUCOMPLEX** d_GammaXX; CUCOMPLEX** d_GammaXY; CUCOMPLEX** d_GammaXZ; CUCOMPLEX** d_GammaYY; CUCOMPLEX** d_GammaYZ; CUCOMPLEX** d_GammaZZ; }JM_LONGRANGE_PLAN; static void add_plan(fft_plan* p) { if(num_fft_plans == size_fft_plans) { size_fft_plans *= 2; fft_plans = (fft_plan**) realloc(fft_plans, sizeof(fft_plan) * size_fft_plans); } fft_plans[num_fft_plans] = p; num_fft_plans++; } static void free_plan(fft_plan* p) { p->refcount--; if(p->refcount == 0) { for(int i=0; i<num_fft_plans; i++) { if(fft_plans[i] == p) { fft_plans[i] = 0; } } for(int i=0; i<p->num_phases; i++) { CHECKCALL(hipFree(p->phase[i].d_src)); CHECKCALL(hipFree(p->phase[i].d_dest)); CHECKCALL(hipFree(p->phase[i].d_W_angles)); } delete [] p->phase; free(p); } } #define ll_call(in,out) \ if(lua_pcall(L, in, out, 0)) \ { \ fprintf(stderr, "%s\n", lua_tostring(L, -1)); \ lua_close(L); \ return 0; \ } static fft_plan* make_plan(int n) { if(fft_plans == 0) { fft_plans = (fft_plan**) malloc(sizeof(fft_plan) * 32); num_fft_plans = 0; size_fft_plans = 32; } for(int i=0; i<num_fft_plans; i++) { if(fft_plans[i] && fft_plans[i]->n == n) { fft_plans[i]->refcount++; return fft_plans[i]; } } lua_State* L = lua_open(); luaL_openlibs(L); lua_newtable(L); for(int i=1; i<=n; i++) { lua_pushinteger(L, i); lua_pushinteger(L, i); lua_settable(L, -3); } lua_setglobal(L, "indices"); if(luaL_dostring(L, __longrange)) { fprintf(stderr, "%s\n", lua_tostring(L, -1)); lua_close(L); return 0; } fft_plan* plan = new fft_plan; plan->n = n; lua_getglobal(L, "max_depth"); int max_depth = lua_tointeger(L, -1); lua_pop(L, 1); plan->phase = new fft_plan_phase[max_depth]; plan->num_phases = max_depth; // printf("max_depth %i\n", max_depth); for(int i=0; i<max_depth; i++) { plan->phase[i].depth = i; lua_getglobal(L, "get_num_plan"); lua_pushinteger(L, i+1); ll_call(1,1); plan->phase[i].num_plan = lua_tointeger(L, -1); lua_pop(L, lua_gettop(L)); lua_getglobal(L, "get_dest_count"); lua_pushinteger(L, i+1); ll_call(1,1); plan->phase[i].dest_count = lua_tointeger(L, -1); lua_pop(L, lua_gettop(L)); const int np = plan->phase[i].num_plan; const int dc = plan->phase[i].dest_count; const int sz_i = dc * np * sizeof(int); const int sz_d = dc * np * sizeof(float); plan->phase[i].h_src = (int* )malloc(sz_i); plan->phase[i].h_dest = (int* )malloc(sz_i); plan->phase[i].h_W_angles = (float*)malloc(sz_d); CHECKCALL(malloc_device(&(plan->phase[i].d_src), sz_i)); CHECKCALL(malloc_device(&(plan->phase[i].d_dest), sz_i)); CHECKCALL(malloc_device(&(plan->phase[i].d_W_angles), sz_d)); int* src = plan->phase[i].h_src; int* dst = plan->phase[i].h_dest; float* wan = plan->phase[i].h_W_angles; // printf("np: %i\n", np); for(int j=0; j<np; j++) { lua_getglobal(L, "get_plan"); lua_pushinteger(L, i+1); lua_pushinteger(L, j+1); ll_call(2,3*dc); for(int q=0; q<dc; q++) { src[j*dc+q] = lua_tointeger(L, q+1)-1; // printf("%i ", src[j*dc+q]); } for(int q=0; q<dc; q++) { wan[j*dc+q] = -2.0*3.14159265358979* lua_tonumber(L, q+dc+1); // printf("%g ", wan[j*dc+q]); } for(int q=0; q<dc; q++) { dst[j*dc+q] = lua_tointeger(L, q+dc*2+1)-1; // printf("%i ", dst[j*dc+q]); } // printf("\n"); lua_pop(L, lua_gettop(L)); } // move plan over to device and delete it here CHECKCALL(hipMemcpy( plan->phase[i].d_src, plan->phase[i].h_src, sz_i, hipMemcpyHostToDevice)); CHECKCALL(hipMemcpy( plan->phase[i].d_dest, plan->phase[i].h_dest, sz_i, hipMemcpyHostToDevice)); CHECKCALL(hipMemcpy( plan->phase[i].d_W_angles, plan->phase[i].h_W_angles, sz_d, hipMemcpyHostToDevice)); free(plan->phase[i].h_src); free(plan->phase[i].h_dest); free(plan->phase[i].h_W_angles); } add_plan(plan); return make_plan(n); } __global__ void __r2c(const int nx, const int ny, CUCOMPLEX* d_dest, const REAL* d_src) { IDX_PATT(x, y); if(x >= nx || y >= ny) return; d_dest[x+y*nx] = MAKECOMPLEX(d_src[x+y*nx], 0); } static void d_r2c(const int nx, const int ny, CUCOMPLEX* d_dest, const REAL* d_src) { #ifdef SMART_SCHEDULE //different thread schedules for different access patterns dim3 blocks(nx); dim3 threads(ny); #else const int _blocksx = nx / 32 + 1; const int _blocksy = ny / 32 + 1; dim3 blocks(_blocksx, _blocksy); dim3 threads(32,32); #endif // printf("%i %i %p %p\n", nx, ny, d_dest, d_src); hipLaunchKernelGGL(( __r2c), dim3(blocks), dim3(threads), 0, 0, nx, ny, d_dest, d_src); KCHECK; } __device__ int d_sizeof_s_fft_iteration(int R) { return sizeof(float) * sizeof(int) * (R+1); } template <int direction, int dest_count> __global__ void Fourier_2D_x(const int nx, const int ny, int* d_src, int* d_dest, float* d_W_angles, CUCOMPLEX* dest, CUCOMPLEX* src) { const int base = threadIdx.x * dest_count; const int y = blockIdx.x; float Wreal, Wimag; CUCOMPLEX res; // fetch sources CUCOMPLEX s[dest_count]; // CUCOMPLEX d[dest_count]; #pragma unroll for(int i=0; i<dest_count; i++) { s[i] = src[y*nx+d_src[base+i]]; } for(int i=0; i<dest_count; i++) { if(direction == 1) //forward sincos( d_W_angles[base+i] , &Wimag, &Wreal); else //backward sincos(-d_W_angles[base+i] , &Wimag, &Wreal); CUCOMPLEX W = MAKECOMPLEX(Wreal, Wimag); CUCOMPLEX Wi = W; res = cuCaddf(s[0], cuCmulf(W, s[1])); #pragma unroll for(int j=2; j<dest_count; j++) { Wi = cuCmulf(Wi, W); res = cuCaddf(res, cuCmulf(Wi, s[j])); } dest[y*nx+d_dest[base+i]] = res; // cache dest // d[i] = res; } // #pragma unroll // for(int i=0; i<dest_count; i++) // { // // write to dest // dest[y*nx+d_dest[base+i]] = d[i]; // } } // #define TESTING template<int direction> static void fourier2D_x_element( fft_plan* plan, int nx, const int ny, CUCOMPLEX* d_dest, CUCOMPLEX* d_src) { dim3 blocks(ny); #ifdef TESTING CUCOMPLEX* h_data; CHECKCALL(malloc_host(&h_data, sizeof(CUCOMPLEX) * nx*ny)); for(int i=0; i<ny; i++) { h_data[i*nx+0] = MAKECOMPLEX(1,0); h_data[i*nx+1] = MAKECOMPLEX(2,0); h_data[i*nx+2] = MAKECOMPLEX(1,0); h_data[i*nx+3] = MAKECOMPLEX(1,0); h_data[i*nx+4] = MAKECOMPLEX(1,0); h_data[i*nx+5] = MAKECOMPLEX(3,0); h_data[i*nx+6] = MAKECOMPLEX(3,0); h_data[i*nx+7] = MAKECOMPLEX(4,0); } //h_data[8] = MAKECOMPLEX(5,0); CHECKCALL(hipMemcpy(d_src, h_data, sizeof(CUCOMPLEX)*nx*ny, hipMemcpyHostToDevice)); #endif CUCOMPLEX* T; for(int phase=plan->num_phases-1; phase>=0; phase--) // int phase=plan->num_phases-1; { struct fft_plan_phase& p = plan->phase[phase]; dim3 threads(p.num_plan); // template <int direction, int dest_count> // __global__ void Fourier_2D_x(const int nx, const int ny, // int* d_src, int* d_dest, float* d_W_angles, // CUCOMPLEX* dest, CUCOMPLEX* src) #define FFF(dc) case dc: \ hipLaunchKernelGGL(( Fourier_2D_x<direction,dc>), dim3(blocks), dim3(threads), 0, 0, nx, ny, \ p.d_src, p.d_dest, p.d_W_angles, d_dest, d_src); \ break switch(p.dest_count) { FFF( 2); FFF( 3); FFF( 4); FFF( 5); FFF( 6); FFF( 7); FFF( 8); FFF( 9); FFF(10); FFF(11); FFF(12); FFF(13); FFF(14); FFF(15); FFF(16); FFF(17); default: fprintf(stderr, "Spurious dest_count[%i] (%s:%i)\n", p.dest_count, __FILE__, __LINE__); } KCHECK; #undef FFF T = d_src; d_src = d_dest; d_dest = T; } #ifdef TESTING printf("Goal:\n"); printf(" 16.0000 + 0.0000i\n"); printf(" 1.4142 + 4.8284i\n"); printf(" -2.0000 + 0.0000i\n"); printf(" -1.4142 + 0.8284i\n"); printf(" -4.0000 + 0.0000i\n"); printf(" -1.4142 - 0.8284i\n"); printf(" -2.0000 - 0.0000i\n"); printf(" 1.4142 - 4.8284i\n"); // printf(" 9.0000 + 0.0000i\n"); // printf(" 1.5000 + 0.8660i\n"); // printf(" -1.5000 + 0.8660i\n"); // printf(" -3.0000 + 0.0000i\n"); // printf(" -1.5000 - 0.8660i\n"); // printf(" 1.5000 - 0.8660i\n"); for(int q=1; q>=0; q--) { if(q == 0) { CHECKCALL(hipMemcpy(h_data, d_dest, sizeof(CUCOMPLEX)*nx*ny, hipMemcpyDeviceToHost)); } else { CHECKCALL(hipMemcpy(h_data, d_src, sizeof(CUCOMPLEX)*nx*ny, hipMemcpyDeviceToHost)); } printf("\nres (%i):\n", q); for(int i=0; i<nx; i++) { printf("%i) % -.4f %s % -.4fi\n", i, h_data[nx+i].x, h_data[nx+i].y<0?"-":"+", fabs(h_data[nx+i].y)); } } exit(-1); #endif } // template<int direction> // static void fourier2D_x( // fft_plan* plan, int nx, const int ny, // CUCOMPLEX* d_dest, CUCOMPLEX* d_src) // { // // // #ifdef TESTING // // CUCOMPLEX* h_data; // CHECKCALL(malloc_host(&h_data, sizeof(CUCOMPLEX) * nx)); // // h_data[0] = MAKECOMPLEX(1,0); // h_data[1] = MAKECOMPLEX(2,0); // h_data[2] = MAKECOMPLEX(1,0); // h_data[3] = MAKECOMPLEX(1,0); // h_data[4] = MAKECOMPLEX(1,0); // h_data[5] = MAKECOMPLEX(3,0); // //h_data[6] = MAKECOMPLEX(3,0); // //h_data[7] = MAKECOMPLEX(4,0); // //h_data[8] = MAKECOMPLEX(5,0); // // CHECKCALL(hipMemcpy(d_src, h_data, sizeof(CUCOMPLEX)*nx, hipMemcpyHostToDevice)); // // printf("Passes: %i\n", passes); // #endif // // fourier2D_x_element(plan, nx, ny, d_dest, d_src); // fft now in d_src // // // // Fourier_2D_x<direction><<<blocks, threads>>>(nx, ny, plan->d_p, plan->d_radix, nradix, d_dest, d_src, passes); // /* // if(!(nradix & 0x1)) //then even number of operations, need to copy sol'n to d_dest // { // CHECKCALL(hipMemcpy(d_dest, d_src, sizeof(CUCOMPLEX)*nx*ny, hipMemcpyDeviceToDevice)); // }*/ // // printf("fourier2D (%s;%i)\n", __FILE__, __LINE__); // // #ifdef TESTING // // printf("Goal:\n"); // printf(" 9.0000 + 0.0000i\n"); // printf(" 1.5000 + 0.8660i\n"); // printf(" -1.5000 + 0.8660i\n"); // printf(" -3.0000 + 0.0000i\n"); // printf(" -1.5000 - 0.8660i\n"); // printf(" 1.5000 - 0.8660i\n"); // // for(int q=0; q<2; q++) // { // if(q == 0) // { // CHECKCALL(hipMemcpy(h_data, d_dest, sizeof(CUCOMPLEX)*nx, hipMemcpyDeviceToHost)); // } // else // { // CHECKCALL(hipMemcpy(h_data, d_src, sizeof(CUCOMPLEX)*nx, hipMemcpyDeviceToHost)); // } // // printf("\nres (%i):\n", q); // for(int i=0; i<nx; i++) // { // printf("%i) %g%+gi\n", i, h_data[i].x, h_data[i].y); // } // } // exit(-1); // #endif // } __global__ void transposeSimple(int nx, int ny, CUCOMPLEX *d_dest, CUCOMPLEX *d_src) { IDX_PATT(x, y); if(x >= nx || y >= ny) return; d_dest[x*ny + y] = d_src[y*nx + x]; } template<int direction> static void fourier2D_Transposed( fft_plan* planx, fft_plan* plany, const int nx, const int ny, CUCOMPLEX* d_dest, CUCOMPLEX* d_src) { const int npx = planx->num_phases; const int npy = plany->num_phases; fourier2D_x_element<direction>(planx, nx, ny, d_dest, d_src); //if npx is odd then results are in d_dest #ifdef SMART_SCHEDULE //different thread schedules for different access patterns dim3 blocks(nx); dim3 threads(ny); #else const int _blocksx = nx / 32 + 1; const int _blocksy = ny / 32 + 1; dim3 blocks(_blocksx, _blocksy); dim3 threads(32,32); #endif if(npx % 2) //then odd, fft res in d_dest { hipLaunchKernelGGL(( transposeSimple), dim3(blocks), dim3(threads), 0, 0, nx, ny, d_src, d_dest); fourier2D_x_element<direction>(plany, ny, nx, d_dest, d_src); } else { hipLaunchKernelGGL(( transposeSimple), dim3(blocks), dim3(threads), 0, 0, nx, ny, d_dest, d_src); fourier2D_x_element<direction>(plany, ny, nx, d_src, d_dest); } if((npx + npy) % 2) { CHECKCALL(hipMemcpy(d_dest, d_src, sizeof(CUCOMPLEX)*nx*ny, hipMemcpyDeviceToDevice)); } } int JM_LONGRANGE_PLAN_ws_size(int nx, int ny, int /*nz*/) { return sizeof(CUCOMPLEX) * nx*ny; } JM_LONGRANGE_PLAN* make_JM_LONGRANGE_PLAN(int N_x, int N_y, int N_z, double* GammaXX, double* GammaXY, double* GammaXZ, double* GammaYY, double* GammaYZ, double* GammaZZ, void* ws_d_A, void* ws_d_B) { const int nz = N_z; const int nxy = N_x * N_y; const int sRxy = sizeof(REAL) * nxy; const int sCxy = sizeof(CUCOMPLEX) * nxy; JM_LONGRANGE_PLAN* p = new JM_LONGRANGE_PLAN; p->N_x = N_x; p->N_y = N_y; p->N_z = N_z; p->plan_x = make_plan(N_x); p->plan_y = make_plan(N_y); // temporary workspaces (host) CHECKCALL(malloc_host(&(p->h_temp), sCxy)); CUCOMPLEX* d_A = (CUCOMPLEX*)ws_d_A; CUCOMPLEX* d_B = (CUCOMPLEX*)ws_d_B; // 2D arrays, 1st dimmension is for layer p->d_sx_q = new CUCOMPLEX*[nz]; p->d_sy_q = new CUCOMPLEX*[nz]; p->d_sz_q = new CUCOMPLEX*[nz]; for(int i=0; i<nz; i++) { CHECKCALL(malloc_device(&(p->d_sx_q[i]), sCxy)); CHECKCALL(malloc_device(&(p->d_sy_q[i]), sCxy)); CHECKCALL(malloc_device(&(p->d_sz_q[i]), sCxy)); } CHECKCALL(malloc_device(&(p->d_hA_q), sCxy)); // make room for FT'd interaction matrices p->d_GammaXX = new CUCOMPLEX*[nz]; p->d_GammaXY = new CUCOMPLEX*[nz]; p->d_GammaXZ = new CUCOMPLEX*[nz]; p->d_GammaYY = new CUCOMPLEX*[nz]; p->d_GammaYZ = new CUCOMPLEX*[nz]; p->d_GammaZZ = new CUCOMPLEX*[nz]; for(int i=0; i<nz; i++) { CHECKCALL(malloc_device(&(p->d_GammaXX[i]), sCxy)); CHECKCALL(malloc_device(&(p->d_GammaXY[i]), sCxy)); CHECKCALL(malloc_device(&(p->d_GammaXZ[i]), sCxy)); CHECKCALL(malloc_device(&(p->d_GammaYY[i]), sCxy)); CHECKCALL(malloc_device(&(p->d_GammaYZ[i]), sCxy)); CHECKCALL(malloc_device(&(p->d_GammaZZ[i]), sCxy)); } CHECKCALL(malloc_device(&(p->d_output),sRxy)); // now we will work on loading all the interaction matrices // onto the GPU and fourier transforming them struct { double* h; //host memory CUCOMPLEX** d; //device memory } sd[] = { //sd = static data {GammaXX, p->d_GammaXX}, {GammaXY, p->d_GammaXY}, {GammaXZ, p->d_GammaXZ}, {GammaYY, p->d_GammaYY}, {GammaYZ, p->d_GammaYZ}, {GammaZZ, p->d_GammaZZ}, {0,0} }; for(int k=0; k<6; k++) //XX XY XZ YY YZ ZZ { for(int j=0; j<nz; j++) { for(int c=0; c<nxy; c++) { p->h_temp[c] = MAKECOMPLEX(sd[k].h[j*nxy + c], 0); } CHECKCALL(hipMemcpy(d_A, p->h_temp, sizeof(CUCOMPLEX)*nxy, hipMemcpyHostToDevice)); fourier2D_Transposed<1>(p->plan_x, p->plan_y, N_x, N_y, d_B, d_A); KCHECK; // going to prescale the data into d_GammaAB: d_scaleC32(N_x, N_y, sd[k].d[j], d_B, 1.0/((float)(nxy))); // d_scaleC(N_x, N_y, sd[k].d[j], p->d_B, 1.0); } } return p; } void free_JM_LONGRANGE_PLAN(JM_LONGRANGE_PLAN* p) { const int N_z = p->N_z; const int nz = N_z; // * 2 - 1; CHECKCALL(hipFree(p->d_output)); CHECKCALL(hipHostFree(p->h_temp)); for(int z=0; z<N_z; z++) { CHECKCALL(hipFree(p->d_sx_q[z])); CHECKCALL(hipFree(p->d_sy_q[z])); CHECKCALL(hipFree(p->d_sz_q[z])); } CHECKCALL(hipFree(p->d_hA_q)); delete [] p->d_sx_q; delete [] p->d_sy_q; delete [] p->d_sz_q; for(int z=0; z<nz; z++) { CHECKCALL(hipFree(p->d_GammaXX[z])); CHECKCALL(hipFree(p->d_GammaXY[z])); CHECKCALL(hipFree(p->d_GammaXZ[z])); CHECKCALL(hipFree(p->d_GammaYY[z])); CHECKCALL(hipFree(p->d_GammaYZ[z])); CHECKCALL(hipFree(p->d_GammaZZ[z])); } delete [] p->d_GammaXX; delete [] p->d_GammaXY; delete [] p->d_GammaXZ; delete [] p->d_GammaYY; delete [] p->d_GammaYZ; delete [] p->d_GammaZZ; free_plan(p->plan_x); free_plan(p->plan_y); delete p; } __global__ void convolveSum(const int N_x, const int N_y, CUCOMPLEX* d_dest, CUCOMPLEX* d_A, CUCOMPLEX* d_B, float sign) { IDX_PATT(i, y); if(i >= N_x || y >= N_y) return; const int idx = i + y * N_x; #ifdef BOUND_CHECKS if(idx >= N_x * N_y) return; #endif d_dest[idx] = cuCaddf(d_dest[idx], cuCmulf(d_A[idx], cuCmulf(MAKECOMPLEX(sign,0), d_B[idx]))); } __global__ void getLayer32(const int nx, const int ny, const int layer, REAL* d_dest, const REAL* d_src) { IDX_PATT(row, col); if(row >= nx || col >= ny) return; const int _a = col + row*nx; const int _b = _a + layer*nx*ny; d_dest[_a] = d_src[_b]; } __global__ void setLayer32(const int N_x, const int N_y, const int layer, REAL* d_dest, REAL* d_src) { IDX_PATT(i, y); if(i >= N_x || y >= N_y) return; const int _b = i + y * N_x; const int _a = _b + layer * N_x * N_y; #ifdef BOUND_CHECKS if(_b >= N_x * N_y) return; #endif d_dest[_a] = d_src[_b]; } void JM_LONGRANGE(JM_LONGRANGE_PLAN* p, const float* d_sx, const float* d_sy, const float* d_sz, float* d_hx, float* d_hy, float* d_hz, void* ws_d_A, void* ws_d_B) { const int N_x = p->N_x; const int N_y = p->N_y; const int N_z = p->N_z; #ifdef SMART_SCHEDULE //different thread schedules for different access patterns dim3 blocks(N_x); dim3 threads(N_y); #else const int _blocksx = N_x / 32 + 1; const int _blocksy = N_y / 32 + 1; dim3 blocks(_blocksx, _blocksy); dim3 threads(32,32); #endif CUCOMPLEX* d_A = (CUCOMPLEX*)ws_d_A; CUCOMPLEX* d_B = (CUCOMPLEX*)ws_d_B; CUCOMPLEX* d_src = d_A; //local vars for swapping workspace CUCOMPLEX* d_dest = d_B; // FT the spins struct { const float* d_s_r; CUCOMPLEX** d_s_q; float* d_h_r; } sd[] = { //sd = static data {d_sx, p->d_sx_q, d_hx}, {d_sy, p->d_sy_q, d_hy}, {d_sz, p->d_sz_q, d_hz} }; for(int k=0; k<3; k++) // x y z { const float* d_s_r = sd[k].d_s_r; for(int z=0; z<N_z; z++) { d_src = d_A; d_dest = d_B; //destination CUCOMPLEX* d_s_q = sd[k].d_s_q[z]; d_r2c(N_x, N_y, d_dest, &(d_s_r[z*N_x*N_y])); fourier2D_Transposed<1>(p->plan_x, p->plan_y, N_x, N_y, d_s_q, d_dest); // fourier2D(N_x, N_y, p->Rx, p->Ry, // p->d_exp2pi_x_f, p->d_exp2pi_y_f, // p->d_base_x, p->d_base_y, // p->d_step_x, p->d_step_y, // d_s_q, d_dest); } } // OK! Now we have all the spins FT'd and the interaction matrix ready. // We will now convolve the signals into hq // Nov 9/2011. Negative offsets are the same as positive offsets except tensors with odd number // of Zs are negated (XZ, YZ, not ZZ) for(int targetLayer=0; targetLayer<N_z; targetLayer++) { for(int c=0; c<3; c++) //c = 0,1,2: X,Y,Z { hipLaunchKernelGGL(( setC32), dim3(blocks), dim3(threads), 0, 0, N_x, N_y, p->d_hA_q, 0, 0); KCHECK; for(int sourceLayer=0; sourceLayer<N_z; sourceLayer++) { //const int offset = (sourceLayer - targetLayer + N_z - 1); int offset = sourceLayer - targetLayer; float sign = 1; if(offset < 0) { offset = -offset; sign = -1; } switch(c) { case 0: hipLaunchKernelGGL(( convolveSum), dim3(blocks), dim3(threads), 0, 0, N_x, N_y, p->d_hA_q, p->d_sx_q[sourceLayer], p->d_GammaXX[offset], 1); hipLaunchKernelGGL(( convolveSum), dim3(blocks), dim3(threads), 0, 0, N_x, N_y, p->d_hA_q, p->d_sy_q[sourceLayer], p->d_GammaXY[offset], 1); hipLaunchKernelGGL(( convolveSum), dim3(blocks), dim3(threads), 0, 0, N_x, N_y, p->d_hA_q, p->d_sz_q[sourceLayer], p->d_GammaXZ[offset], sign); break; case 1: hipLaunchKernelGGL(( convolveSum), dim3(blocks), dim3(threads), 0, 0, N_x, N_y, p->d_hA_q, p->d_sx_q[sourceLayer], p->d_GammaXY[offset], 1); hipLaunchKernelGGL(( convolveSum), dim3(blocks), dim3(threads), 0, 0, N_x, N_y, p->d_hA_q, p->d_sy_q[sourceLayer], p->d_GammaYY[offset], 1); hipLaunchKernelGGL(( convolveSum), dim3(blocks), dim3(threads), 0, 0, N_x, N_y, p->d_hA_q, p->d_sz_q[sourceLayer], p->d_GammaYZ[offset], sign); break; case 2: hipLaunchKernelGGL(( convolveSum), dim3(blocks), dim3(threads), 0, 0, N_x, N_y, p->d_hA_q, p->d_sx_q[sourceLayer], p->d_GammaXZ[offset], sign); hipLaunchKernelGGL(( convolveSum), dim3(blocks), dim3(threads), 0, 0, N_x, N_y, p->d_hA_q, p->d_sy_q[sourceLayer], p->d_GammaYZ[offset], sign); hipLaunchKernelGGL(( convolveSum), dim3(blocks), dim3(threads), 0, 0, N_x, N_y, p->d_hA_q, p->d_sz_q[sourceLayer], p->d_GammaZZ[offset], 1); } KCHECK } // h(q) now calculated, iFT it float* d_hxyz = sd[c].d_h_r; // this is where the result will go d_src = d_A; d_dest = d_B; fourier2D_Transposed<-1>(p->plan_y, p->plan_x, N_y, N_x, d_src, p->d_hA_q); // fourier2D(N_x, N_y, p->Rx, p->Ry, // p->d_exp2pi_x_b, p->d_exp2pi_y_b, // p->d_base_x, p->d_base_y, // p->d_step_x, p->d_step_y, // d_src, p->d_hA_q); // //real space of iFFT in d_src, need to chop off the (hopefully) zero imag part hipLaunchKernelGGL(( getRPart32), dim3(blocks), dim3(threads), 0, 0, N_x, N_y, p->d_output, d_src); KCHECK; hipLaunchKernelGGL(( setLayer32), dim3(blocks), dim3(threads), 0, 0, N_x, N_y, targetLayer, d_hxyz, p->d_output); KCHECK; } } //holy crap, we're done. }
31d521ff05f930d69c481ac87bf833f6adf6c0e8.cu
#include <complex> #include <iostream> #include <vector> using namespace std; #include <stdlib.h> #include <cuda.h> #include <cuda_runtime.h> #include <math.h> #include <cuComplex.h> #include <stdio.h> #include "longrange.h" #include "../core_cuda/spinsystem.hpp" #define JM_FORWARD 1 #define JM_BACKWARD 0 #define REAL float //#define CUCOMPLEX cuDoubleComplex #define CUCOMPLEX cuFloatComplex //#define MAKECOMPLEX(a,b) make_cuDoubleComplex(a,b) #define MAKECOMPLEX(a,b) make_cuFloatComplex(a,b) // #define SMART_SCHEDULE #ifdef SMART_SCHEDULE #define IDX_PATT(a, b) \ #else #define IDX_PATT(a, b) \ const int a = blockDim.x * blockIdx.x + threadIdx.x; \ const int b = blockDim.y * blockIdx.y + threadIdx.y; #endif #if 1 #define BOUND_CHECKS 1 #define KCHECK \ { \ const cudaError_t i = cudaGetLastError();\ if(i) \ {\ printf("(%s:%i) %s\n", __FILE__, __LINE__-1, cudaGetErrorString(i));\ exit(-1);\ }\ } #define CHECKCALL(expression) \ { \ const cudaError_t err = (expression); \ if(err != cudaSuccess) \ printf("(%s:%i) (%i)%s\n", __FILE__, __LINE__, err, cudaGetErrorString(err)); \ /* printf("(%s:%i) %s => %i\n", __FILE__, __LINE__, #expression, err); */ \ } #else #define KCHECK ; #define CHECKERR(e) ; #endif typedef struct fft_plan_phase { int depth; int num_plan; int dest_count; int* h_src; int* h_dest; float* h_W_angles; int* d_src; int* d_dest; float* d_W_angles; }fft_plan_phase; //mixed radix plan typedef struct fft_plan { fft_plan_phase* phase; int num_phases; int n; int refcount; } fft_plan; static fft_plan** fft_plans = 0; static int num_fft_plans; static int size_fft_plans; __global__ void getRPart32(const int N_x, const int N_y, REAL* d_dest, CUCOMPLEX* d_src) { IDX_PATT(i, y); if(i >= N_x || y >= N_y) return; const int idx = i + y * N_x; d_dest[idx] = d_src[idx].x; } __global__ void getIPart32(const int N_x, const int N_y, REAL* d_dest, CUCOMPLEX* d_src) { IDX_PATT(i, y); if(i >= N_x || y >= N_y) return; const int idx = i + y * N_x; d_dest[idx] = d_src[idx].y; } __global__ void scaleC32(const int N_x, const int N_y, CUCOMPLEX* d, CUCOMPLEX* s, float v) { IDX_PATT(x, y); if(x >= N_x || y >= N_y) return; const int idx = x + y * N_x; // d[idx] = cuCmulf(MAKECOMPLEX(v,0), s[idx]); d[idx].x = v * s[idx].x; d[idx].y = v * s[idx].y; } static void d_scaleC32(const int nx, const int ny, CUCOMPLEX* d_dest, CUCOMPLEX* d_src, float scale) { #ifdef SMART_SCHEDULE //different thread schedules for different access patterns dim3 blocks(nx); dim3 threads(ny); #else const int _blocksx = nx / 32 + 1; const int _blocksy = ny / 32 + 1; dim3 blocks(_blocksx, _blocksy); dim3 threads(32,32); #endif scaleC32<<<blocks, threads>>>(nx, ny, d_dest, d_src, scale); KCHECK; } __global__ void setC32(const int N_x, const int N_y, CUCOMPLEX* v, float R, float I) { IDX_PATT(x, y); if(x >= N_x || y >= N_y) return; const int idx = x + y * N_x; v[idx].x = R; v[idx].y = I; } typedef struct JM_LONGRANGE_PLAN { int N_x, N_y, N_z; fft_plan* plan_x; fft_plan* plan_y; REAL* d_output; CUCOMPLEX* h_temp; // 2D arrays, 1st dimmension is for layer CUCOMPLEX** d_sx_q; CUCOMPLEX** d_sy_q; CUCOMPLEX** d_sz_q; CUCOMPLEX* d_hA_q; // 2D arrays, 1st dimmension is for interlayer offset CUCOMPLEX** d_GammaXX; CUCOMPLEX** d_GammaXY; CUCOMPLEX** d_GammaXZ; CUCOMPLEX** d_GammaYY; CUCOMPLEX** d_GammaYZ; CUCOMPLEX** d_GammaZZ; }JM_LONGRANGE_PLAN; static void add_plan(fft_plan* p) { if(num_fft_plans == size_fft_plans) { size_fft_plans *= 2; fft_plans = (fft_plan**) realloc(fft_plans, sizeof(fft_plan) * size_fft_plans); } fft_plans[num_fft_plans] = p; num_fft_plans++; } static void free_plan(fft_plan* p) { p->refcount--; if(p->refcount == 0) { for(int i=0; i<num_fft_plans; i++) { if(fft_plans[i] == p) { fft_plans[i] = 0; } } for(int i=0; i<p->num_phases; i++) { CHECKCALL(cudaFree(p->phase[i].d_src)); CHECKCALL(cudaFree(p->phase[i].d_dest)); CHECKCALL(cudaFree(p->phase[i].d_W_angles)); } delete [] p->phase; free(p); } } #define ll_call(in,out) \ if(lua_pcall(L, in, out, 0)) \ { \ fprintf(stderr, "%s\n", lua_tostring(L, -1)); \ lua_close(L); \ return 0; \ } static fft_plan* make_plan(int n) { if(fft_plans == 0) { fft_plans = (fft_plan**) malloc(sizeof(fft_plan) * 32); num_fft_plans = 0; size_fft_plans = 32; } for(int i=0; i<num_fft_plans; i++) { if(fft_plans[i] && fft_plans[i]->n == n) { fft_plans[i]->refcount++; return fft_plans[i]; } } lua_State* L = lua_open(); luaL_openlibs(L); lua_newtable(L); for(int i=1; i<=n; i++) { lua_pushinteger(L, i); lua_pushinteger(L, i); lua_settable(L, -3); } lua_setglobal(L, "indices"); if(luaL_dostring(L, __longrange)) { fprintf(stderr, "%s\n", lua_tostring(L, -1)); lua_close(L); return 0; } fft_plan* plan = new fft_plan; plan->n = n; lua_getglobal(L, "max_depth"); int max_depth = lua_tointeger(L, -1); lua_pop(L, 1); plan->phase = new fft_plan_phase[max_depth]; plan->num_phases = max_depth; // printf("max_depth %i\n", max_depth); for(int i=0; i<max_depth; i++) { plan->phase[i].depth = i; lua_getglobal(L, "get_num_plan"); lua_pushinteger(L, i+1); ll_call(1,1); plan->phase[i].num_plan = lua_tointeger(L, -1); lua_pop(L, lua_gettop(L)); lua_getglobal(L, "get_dest_count"); lua_pushinteger(L, i+1); ll_call(1,1); plan->phase[i].dest_count = lua_tointeger(L, -1); lua_pop(L, lua_gettop(L)); const int np = plan->phase[i].num_plan; const int dc = plan->phase[i].dest_count; const int sz_i = dc * np * sizeof(int); const int sz_d = dc * np * sizeof(float); plan->phase[i].h_src = (int* )malloc(sz_i); plan->phase[i].h_dest = (int* )malloc(sz_i); plan->phase[i].h_W_angles = (float*)malloc(sz_d); CHECKCALL(malloc_device(&(plan->phase[i].d_src), sz_i)); CHECKCALL(malloc_device(&(plan->phase[i].d_dest), sz_i)); CHECKCALL(malloc_device(&(plan->phase[i].d_W_angles), sz_d)); int* src = plan->phase[i].h_src; int* dst = plan->phase[i].h_dest; float* wan = plan->phase[i].h_W_angles; // printf("np: %i\n", np); for(int j=0; j<np; j++) { lua_getglobal(L, "get_plan"); lua_pushinteger(L, i+1); lua_pushinteger(L, j+1); ll_call(2,3*dc); for(int q=0; q<dc; q++) { src[j*dc+q] = lua_tointeger(L, q+1)-1; // printf("%i ", src[j*dc+q]); } for(int q=0; q<dc; q++) { wan[j*dc+q] = -2.0*3.14159265358979* lua_tonumber(L, q+dc+1); // printf("%g ", wan[j*dc+q]); } for(int q=0; q<dc; q++) { dst[j*dc+q] = lua_tointeger(L, q+dc*2+1)-1; // printf("%i ", dst[j*dc+q]); } // printf("\n"); lua_pop(L, lua_gettop(L)); } // move plan over to device and delete it here CHECKCALL(cudaMemcpy( plan->phase[i].d_src, plan->phase[i].h_src, sz_i, cudaMemcpyHostToDevice)); CHECKCALL(cudaMemcpy( plan->phase[i].d_dest, plan->phase[i].h_dest, sz_i, cudaMemcpyHostToDevice)); CHECKCALL(cudaMemcpy( plan->phase[i].d_W_angles, plan->phase[i].h_W_angles, sz_d, cudaMemcpyHostToDevice)); free(plan->phase[i].h_src); free(plan->phase[i].h_dest); free(plan->phase[i].h_W_angles); } add_plan(plan); return make_plan(n); } __global__ void __r2c(const int nx, const int ny, CUCOMPLEX* d_dest, const REAL* d_src) { IDX_PATT(x, y); if(x >= nx || y >= ny) return; d_dest[x+y*nx] = MAKECOMPLEX(d_src[x+y*nx], 0); } static void d_r2c(const int nx, const int ny, CUCOMPLEX* d_dest, const REAL* d_src) { #ifdef SMART_SCHEDULE //different thread schedules for different access patterns dim3 blocks(nx); dim3 threads(ny); #else const int _blocksx = nx / 32 + 1; const int _blocksy = ny / 32 + 1; dim3 blocks(_blocksx, _blocksy); dim3 threads(32,32); #endif // printf("%i %i %p %p\n", nx, ny, d_dest, d_src); __r2c<<<blocks, threads>>>(nx, ny, d_dest, d_src); KCHECK; } __device__ int d_sizeof_s_fft_iteration(int R) { return sizeof(float) * sizeof(int) * (R+1); } template <int direction, int dest_count> __global__ void Fourier_2D_x(const int nx, const int ny, int* d_src, int* d_dest, float* d_W_angles, CUCOMPLEX* dest, CUCOMPLEX* src) { const int base = threadIdx.x * dest_count; const int y = blockIdx.x; float Wreal, Wimag; CUCOMPLEX res; // fetch sources CUCOMPLEX s[dest_count]; // CUCOMPLEX d[dest_count]; #pragma unroll for(int i=0; i<dest_count; i++) { s[i] = src[y*nx+d_src[base+i]]; } for(int i=0; i<dest_count; i++) { if(direction == 1) //forward sincos( d_W_angles[base+i] , &Wimag, &Wreal); else //backward sincos(-d_W_angles[base+i] , &Wimag, &Wreal); CUCOMPLEX W = MAKECOMPLEX(Wreal, Wimag); CUCOMPLEX Wi = W; res = cuCaddf(s[0], cuCmulf(W, s[1])); #pragma unroll for(int j=2; j<dest_count; j++) { Wi = cuCmulf(Wi, W); res = cuCaddf(res, cuCmulf(Wi, s[j])); } dest[y*nx+d_dest[base+i]] = res; // cache dest // d[i] = res; } // #pragma unroll // for(int i=0; i<dest_count; i++) // { // // write to dest // dest[y*nx+d_dest[base+i]] = d[i]; // } } // #define TESTING template<int direction> static void fourier2D_x_element( fft_plan* plan, int nx, const int ny, CUCOMPLEX* d_dest, CUCOMPLEX* d_src) { dim3 blocks(ny); #ifdef TESTING CUCOMPLEX* h_data; CHECKCALL(malloc_host(&h_data, sizeof(CUCOMPLEX) * nx*ny)); for(int i=0; i<ny; i++) { h_data[i*nx+0] = MAKECOMPLEX(1,0); h_data[i*nx+1] = MAKECOMPLEX(2,0); h_data[i*nx+2] = MAKECOMPLEX(1,0); h_data[i*nx+3] = MAKECOMPLEX(1,0); h_data[i*nx+4] = MAKECOMPLEX(1,0); h_data[i*nx+5] = MAKECOMPLEX(3,0); h_data[i*nx+6] = MAKECOMPLEX(3,0); h_data[i*nx+7] = MAKECOMPLEX(4,0); } //h_data[8] = MAKECOMPLEX(5,0); CHECKCALL(cudaMemcpy(d_src, h_data, sizeof(CUCOMPLEX)*nx*ny, cudaMemcpyHostToDevice)); #endif CUCOMPLEX* T; for(int phase=plan->num_phases-1; phase>=0; phase--) // int phase=plan->num_phases-1; { struct fft_plan_phase& p = plan->phase[phase]; dim3 threads(p.num_plan); // template <int direction, int dest_count> // __global__ void Fourier_2D_x(const int nx, const int ny, // int* d_src, int* d_dest, float* d_W_angles, // CUCOMPLEX* dest, CUCOMPLEX* src) #define FFF(dc) case dc: \ Fourier_2D_x<direction,dc><<<blocks, threads>>>(nx, ny, \ p.d_src, p.d_dest, p.d_W_angles, d_dest, d_src); \ break switch(p.dest_count) { FFF( 2); FFF( 3); FFF( 4); FFF( 5); FFF( 6); FFF( 7); FFF( 8); FFF( 9); FFF(10); FFF(11); FFF(12); FFF(13); FFF(14); FFF(15); FFF(16); FFF(17); default: fprintf(stderr, "Spurious dest_count[%i] (%s:%i)\n", p.dest_count, __FILE__, __LINE__); } KCHECK; #undef FFF T = d_src; d_src = d_dest; d_dest = T; } #ifdef TESTING printf("Goal:\n"); printf(" 16.0000 + 0.0000i\n"); printf(" 1.4142 + 4.8284i\n"); printf(" -2.0000 + 0.0000i\n"); printf(" -1.4142 + 0.8284i\n"); printf(" -4.0000 + 0.0000i\n"); printf(" -1.4142 - 0.8284i\n"); printf(" -2.0000 - 0.0000i\n"); printf(" 1.4142 - 4.8284i\n"); // printf(" 9.0000 + 0.0000i\n"); // printf(" 1.5000 + 0.8660i\n"); // printf(" -1.5000 + 0.8660i\n"); // printf(" -3.0000 + 0.0000i\n"); // printf(" -1.5000 - 0.8660i\n"); // printf(" 1.5000 - 0.8660i\n"); for(int q=1; q>=0; q--) { if(q == 0) { CHECKCALL(cudaMemcpy(h_data, d_dest, sizeof(CUCOMPLEX)*nx*ny, cudaMemcpyDeviceToHost)); } else { CHECKCALL(cudaMemcpy(h_data, d_src, sizeof(CUCOMPLEX)*nx*ny, cudaMemcpyDeviceToHost)); } printf("\nres (%i):\n", q); for(int i=0; i<nx; i++) { printf("%i) % -.4f %s % -.4fi\n", i, h_data[nx+i].x, h_data[nx+i].y<0?"-":"+", fabs(h_data[nx+i].y)); } } exit(-1); #endif } // template<int direction> // static void fourier2D_x( // fft_plan* plan, int nx, const int ny, // CUCOMPLEX* d_dest, CUCOMPLEX* d_src) // { // // // #ifdef TESTING // // CUCOMPLEX* h_data; // CHECKCALL(malloc_host(&h_data, sizeof(CUCOMPLEX) * nx)); // // h_data[0] = MAKECOMPLEX(1,0); // h_data[1] = MAKECOMPLEX(2,0); // h_data[2] = MAKECOMPLEX(1,0); // h_data[3] = MAKECOMPLEX(1,0); // h_data[4] = MAKECOMPLEX(1,0); // h_data[5] = MAKECOMPLEX(3,0); // //h_data[6] = MAKECOMPLEX(3,0); // //h_data[7] = MAKECOMPLEX(4,0); // //h_data[8] = MAKECOMPLEX(5,0); // // CHECKCALL(cudaMemcpy(d_src, h_data, sizeof(CUCOMPLEX)*nx, cudaMemcpyHostToDevice)); // // printf("Passes: %i\n", passes); // #endif // // fourier2D_x_element(plan, nx, ny, d_dest, d_src); // fft now in d_src // // // // Fourier_2D_x<direction><<<blocks, threads>>>(nx, ny, plan->d_p, plan->d_radix, nradix, d_dest, d_src, passes); // /* // if(!(nradix & 0x1)) //then even number of operations, need to copy sol'n to d_dest // { // CHECKCALL(cudaMemcpy(d_dest, d_src, sizeof(CUCOMPLEX)*nx*ny, cudaMemcpyDeviceToDevice)); // }*/ // // printf("fourier2D (%s;%i)\n", __FILE__, __LINE__); // // #ifdef TESTING // // printf("Goal:\n"); // printf(" 9.0000 + 0.0000i\n"); // printf(" 1.5000 + 0.8660i\n"); // printf(" -1.5000 + 0.8660i\n"); // printf(" -3.0000 + 0.0000i\n"); // printf(" -1.5000 - 0.8660i\n"); // printf(" 1.5000 - 0.8660i\n"); // // for(int q=0; q<2; q++) // { // if(q == 0) // { // CHECKCALL(cudaMemcpy(h_data, d_dest, sizeof(CUCOMPLEX)*nx, cudaMemcpyDeviceToHost)); // } // else // { // CHECKCALL(cudaMemcpy(h_data, d_src, sizeof(CUCOMPLEX)*nx, cudaMemcpyDeviceToHost)); // } // // printf("\nres (%i):\n", q); // for(int i=0; i<nx; i++) // { // printf("%i) %g%+gi\n", i, h_data[i].x, h_data[i].y); // } // } // exit(-1); // #endif // } __global__ void transposeSimple(int nx, int ny, CUCOMPLEX *d_dest, CUCOMPLEX *d_src) { IDX_PATT(x, y); if(x >= nx || y >= ny) return; d_dest[x*ny + y] = d_src[y*nx + x]; } template<int direction> static void fourier2D_Transposed( fft_plan* planx, fft_plan* plany, const int nx, const int ny, CUCOMPLEX* d_dest, CUCOMPLEX* d_src) { const int npx = planx->num_phases; const int npy = plany->num_phases; fourier2D_x_element<direction>(planx, nx, ny, d_dest, d_src); //if npx is odd then results are in d_dest #ifdef SMART_SCHEDULE //different thread schedules for different access patterns dim3 blocks(nx); dim3 threads(ny); #else const int _blocksx = nx / 32 + 1; const int _blocksy = ny / 32 + 1; dim3 blocks(_blocksx, _blocksy); dim3 threads(32,32); #endif if(npx % 2) //then odd, fft res in d_dest { transposeSimple<<<blocks, threads>>>(nx, ny, d_src, d_dest); fourier2D_x_element<direction>(plany, ny, nx, d_dest, d_src); } else { transposeSimple<<<blocks, threads>>>(nx, ny, d_dest, d_src); fourier2D_x_element<direction>(plany, ny, nx, d_src, d_dest); } if((npx + npy) % 2) { CHECKCALL(cudaMemcpy(d_dest, d_src, sizeof(CUCOMPLEX)*nx*ny, cudaMemcpyDeviceToDevice)); } } int JM_LONGRANGE_PLAN_ws_size(int nx, int ny, int /*nz*/) { return sizeof(CUCOMPLEX) * nx*ny; } JM_LONGRANGE_PLAN* make_JM_LONGRANGE_PLAN(int N_x, int N_y, int N_z, double* GammaXX, double* GammaXY, double* GammaXZ, double* GammaYY, double* GammaYZ, double* GammaZZ, void* ws_d_A, void* ws_d_B) { const int nz = N_z; const int nxy = N_x * N_y; const int sRxy = sizeof(REAL) * nxy; const int sCxy = sizeof(CUCOMPLEX) * nxy; JM_LONGRANGE_PLAN* p = new JM_LONGRANGE_PLAN; p->N_x = N_x; p->N_y = N_y; p->N_z = N_z; p->plan_x = make_plan(N_x); p->plan_y = make_plan(N_y); // temporary workspaces (host) CHECKCALL(malloc_host(&(p->h_temp), sCxy)); CUCOMPLEX* d_A = (CUCOMPLEX*)ws_d_A; CUCOMPLEX* d_B = (CUCOMPLEX*)ws_d_B; // 2D arrays, 1st dimmension is for layer p->d_sx_q = new CUCOMPLEX*[nz]; p->d_sy_q = new CUCOMPLEX*[nz]; p->d_sz_q = new CUCOMPLEX*[nz]; for(int i=0; i<nz; i++) { CHECKCALL(malloc_device(&(p->d_sx_q[i]), sCxy)); CHECKCALL(malloc_device(&(p->d_sy_q[i]), sCxy)); CHECKCALL(malloc_device(&(p->d_sz_q[i]), sCxy)); } CHECKCALL(malloc_device(&(p->d_hA_q), sCxy)); // make room for FT'd interaction matrices p->d_GammaXX = new CUCOMPLEX*[nz]; p->d_GammaXY = new CUCOMPLEX*[nz]; p->d_GammaXZ = new CUCOMPLEX*[nz]; p->d_GammaYY = new CUCOMPLEX*[nz]; p->d_GammaYZ = new CUCOMPLEX*[nz]; p->d_GammaZZ = new CUCOMPLEX*[nz]; for(int i=0; i<nz; i++) { CHECKCALL(malloc_device(&(p->d_GammaXX[i]), sCxy)); CHECKCALL(malloc_device(&(p->d_GammaXY[i]), sCxy)); CHECKCALL(malloc_device(&(p->d_GammaXZ[i]), sCxy)); CHECKCALL(malloc_device(&(p->d_GammaYY[i]), sCxy)); CHECKCALL(malloc_device(&(p->d_GammaYZ[i]), sCxy)); CHECKCALL(malloc_device(&(p->d_GammaZZ[i]), sCxy)); } CHECKCALL(malloc_device(&(p->d_output),sRxy)); // now we will work on loading all the interaction matrices // onto the GPU and fourier transforming them struct { double* h; //host memory CUCOMPLEX** d; //device memory } sd[] = { //sd = static data {GammaXX, p->d_GammaXX}, {GammaXY, p->d_GammaXY}, {GammaXZ, p->d_GammaXZ}, {GammaYY, p->d_GammaYY}, {GammaYZ, p->d_GammaYZ}, {GammaZZ, p->d_GammaZZ}, {0,0} }; for(int k=0; k<6; k++) //XX XY XZ YY YZ ZZ { for(int j=0; j<nz; j++) { for(int c=0; c<nxy; c++) { p->h_temp[c] = MAKECOMPLEX(sd[k].h[j*nxy + c], 0); } CHECKCALL(cudaMemcpy(d_A, p->h_temp, sizeof(CUCOMPLEX)*nxy, cudaMemcpyHostToDevice)); fourier2D_Transposed<1>(p->plan_x, p->plan_y, N_x, N_y, d_B, d_A); KCHECK; // going to prescale the data into d_GammaAB: d_scaleC32(N_x, N_y, sd[k].d[j], d_B, 1.0/((float)(nxy))); // d_scaleC(N_x, N_y, sd[k].d[j], p->d_B, 1.0); } } return p; } void free_JM_LONGRANGE_PLAN(JM_LONGRANGE_PLAN* p) { const int N_z = p->N_z; const int nz = N_z; // * 2 - 1; CHECKCALL(cudaFree(p->d_output)); CHECKCALL(cudaFreeHost(p->h_temp)); for(int z=0; z<N_z; z++) { CHECKCALL(cudaFree(p->d_sx_q[z])); CHECKCALL(cudaFree(p->d_sy_q[z])); CHECKCALL(cudaFree(p->d_sz_q[z])); } CHECKCALL(cudaFree(p->d_hA_q)); delete [] p->d_sx_q; delete [] p->d_sy_q; delete [] p->d_sz_q; for(int z=0; z<nz; z++) { CHECKCALL(cudaFree(p->d_GammaXX[z])); CHECKCALL(cudaFree(p->d_GammaXY[z])); CHECKCALL(cudaFree(p->d_GammaXZ[z])); CHECKCALL(cudaFree(p->d_GammaYY[z])); CHECKCALL(cudaFree(p->d_GammaYZ[z])); CHECKCALL(cudaFree(p->d_GammaZZ[z])); } delete [] p->d_GammaXX; delete [] p->d_GammaXY; delete [] p->d_GammaXZ; delete [] p->d_GammaYY; delete [] p->d_GammaYZ; delete [] p->d_GammaZZ; free_plan(p->plan_x); free_plan(p->plan_y); delete p; } __global__ void convolveSum(const int N_x, const int N_y, CUCOMPLEX* d_dest, CUCOMPLEX* d_A, CUCOMPLEX* d_B, float sign) { IDX_PATT(i, y); if(i >= N_x || y >= N_y) return; const int idx = i + y * N_x; #ifdef BOUND_CHECKS if(idx >= N_x * N_y) return; #endif d_dest[idx] = cuCaddf(d_dest[idx], cuCmulf(d_A[idx], cuCmulf(MAKECOMPLEX(sign,0), d_B[idx]))); } __global__ void getLayer32(const int nx, const int ny, const int layer, REAL* d_dest, const REAL* d_src) { IDX_PATT(row, col); if(row >= nx || col >= ny) return; const int _a = col + row*nx; const int _b = _a + layer*nx*ny; d_dest[_a] = d_src[_b]; } __global__ void setLayer32(const int N_x, const int N_y, const int layer, REAL* d_dest, REAL* d_src) { IDX_PATT(i, y); if(i >= N_x || y >= N_y) return; const int _b = i + y * N_x; const int _a = _b + layer * N_x * N_y; #ifdef BOUND_CHECKS if(_b >= N_x * N_y) return; #endif d_dest[_a] = d_src[_b]; } void JM_LONGRANGE(JM_LONGRANGE_PLAN* p, const float* d_sx, const float* d_sy, const float* d_sz, float* d_hx, float* d_hy, float* d_hz, void* ws_d_A, void* ws_d_B) { const int N_x = p->N_x; const int N_y = p->N_y; const int N_z = p->N_z; #ifdef SMART_SCHEDULE //different thread schedules for different access patterns dim3 blocks(N_x); dim3 threads(N_y); #else const int _blocksx = N_x / 32 + 1; const int _blocksy = N_y / 32 + 1; dim3 blocks(_blocksx, _blocksy); dim3 threads(32,32); #endif CUCOMPLEX* d_A = (CUCOMPLEX*)ws_d_A; CUCOMPLEX* d_B = (CUCOMPLEX*)ws_d_B; CUCOMPLEX* d_src = d_A; //local vars for swapping workspace CUCOMPLEX* d_dest = d_B; // FT the spins struct { const float* d_s_r; CUCOMPLEX** d_s_q; float* d_h_r; } sd[] = { //sd = static data {d_sx, p->d_sx_q, d_hx}, {d_sy, p->d_sy_q, d_hy}, {d_sz, p->d_sz_q, d_hz} }; for(int k=0; k<3; k++) // x y z { const float* d_s_r = sd[k].d_s_r; for(int z=0; z<N_z; z++) { d_src = d_A; d_dest = d_B; //destination CUCOMPLEX* d_s_q = sd[k].d_s_q[z]; d_r2c(N_x, N_y, d_dest, &(d_s_r[z*N_x*N_y])); fourier2D_Transposed<1>(p->plan_x, p->plan_y, N_x, N_y, d_s_q, d_dest); // fourier2D(N_x, N_y, p->Rx, p->Ry, // p->d_exp2pi_x_f, p->d_exp2pi_y_f, // p->d_base_x, p->d_base_y, // p->d_step_x, p->d_step_y, // d_s_q, d_dest); } } // OK! Now we have all the spins FT'd and the interaction matrix ready. // We will now convolve the signals into hq // Nov 9/2011. Negative offsets are the same as positive offsets except tensors with odd number // of Zs are negated (XZ, YZ, not ZZ) for(int targetLayer=0; targetLayer<N_z; targetLayer++) { for(int c=0; c<3; c++) //c = 0,1,2: X,Y,Z { setC32<<<blocks, threads>>>(N_x, N_y, p->d_hA_q, 0, 0); KCHECK; for(int sourceLayer=0; sourceLayer<N_z; sourceLayer++) { //const int offset = (sourceLayer - targetLayer + N_z - 1); int offset = sourceLayer - targetLayer; float sign = 1; if(offset < 0) { offset = -offset; sign = -1; } switch(c) { case 0: convolveSum<<<blocks, threads>>>(N_x, N_y, p->d_hA_q, p->d_sx_q[sourceLayer], p->d_GammaXX[offset], 1); convolveSum<<<blocks, threads>>>(N_x, N_y, p->d_hA_q, p->d_sy_q[sourceLayer], p->d_GammaXY[offset], 1); convolveSum<<<blocks, threads>>>(N_x, N_y, p->d_hA_q, p->d_sz_q[sourceLayer], p->d_GammaXZ[offset], sign); break; case 1: convolveSum<<<blocks, threads>>>(N_x, N_y, p->d_hA_q, p->d_sx_q[sourceLayer], p->d_GammaXY[offset], 1); convolveSum<<<blocks, threads>>>(N_x, N_y, p->d_hA_q, p->d_sy_q[sourceLayer], p->d_GammaYY[offset], 1); convolveSum<<<blocks, threads>>>(N_x, N_y, p->d_hA_q, p->d_sz_q[sourceLayer], p->d_GammaYZ[offset], sign); break; case 2: convolveSum<<<blocks, threads>>>(N_x, N_y, p->d_hA_q, p->d_sx_q[sourceLayer], p->d_GammaXZ[offset], sign); convolveSum<<<blocks, threads>>>(N_x, N_y, p->d_hA_q, p->d_sy_q[sourceLayer], p->d_GammaYZ[offset], sign); convolveSum<<<blocks, threads>>>(N_x, N_y, p->d_hA_q, p->d_sz_q[sourceLayer], p->d_GammaZZ[offset], 1); } KCHECK } // h(q) now calculated, iFT it float* d_hxyz = sd[c].d_h_r; // this is where the result will go d_src = d_A; d_dest = d_B; fourier2D_Transposed<-1>(p->plan_y, p->plan_x, N_y, N_x, d_src, p->d_hA_q); // fourier2D(N_x, N_y, p->Rx, p->Ry, // p->d_exp2pi_x_b, p->d_exp2pi_y_b, // p->d_base_x, p->d_base_y, // p->d_step_x, p->d_step_y, // d_src, p->d_hA_q); // //real space of iFFT in d_src, need to chop off the (hopefully) zero imag part getRPart32<<<blocks, threads>>>(N_x, N_y, p->d_output, d_src); KCHECK; setLayer32<<<blocks, threads>>>(N_x, N_y, targetLayer, d_hxyz, p->d_output); KCHECK; } } //holy crap, we're done. }
36d36f33f93821cce771628d7756732c41a1e9d4.hip
// !!! This is a file automatically generated by hipify!!! #define TORCH_ASSERT_ONLY_METHOD_OPERATORS #include <ATen/core/Tensor.h> #include <ATen/Dispatch.h> #include <ATen/ExpandUtils.h> #include <ATen/InitialTensorOptions.h> #include <ATen/SparseCsrTensorImpl.h> #include <ATen/SparseCsrTensorUtils.h> #include <ATen/WrapDimUtilsMulti.h> #include <ATen/native/BinaryOps.h> #include <ATen/native/Resize.h> #include <ATen/native/SparseTensorUtils.h> #include <algorithm> #include <ATen/AccumulateType.h> #ifndef AT_PER_OPERATOR_HEADERS #include <ATen/NativeFunctions.h> #else #include <ATen/ops/_convert_indices_from_coo_to_csr_native.h> #include <ATen/ops/_convert_indices_from_csr_to_coo_native.h> #include <ATen/ops/_sparse_csr_tensor_unsafe_native.h> #include <ATen/ops/_unique.h> #include <ATen/ops/add_native.h> #include <ATen/ops/resize_as_sparse_native.h> #include <ATen/ops/tensor.h> #include <ATen/ops/zeros.h> #endif #include <hip/hip_runtime.h> #include <type_traits> #include <ATen/hip/HIPContext.h> #include <ATen/hip/HIPUtils.h> #include <ATen/hip/ThrustAllocator.h> #include <ATen/hip/impl/HIPCachingAllocatorMasqueradingAsCUDA.h> #include <ATen/native/hip/Reduce.cuh> #include <ATen/native/sparse/hip/SparseBlasImpl.h> #include <ATen/native/sparse/hip/SparseHIPBlas.h> #include <ATen/native/sparse/hip/SparseHIPTensorMath.cuh> #include <thrust/device_ptr.h> #include <thrust/execution_policy.h> #include <thrust/fill.h> #include <thrust/for_each.h> #include <thrust/sequence.h> namespace at { namespace native { namespace { template <typename input_t, typename output_t> __global__ void convert_indices_from_coo_to_csr_cuda_kernel(output_t* data_out, const input_t* data_in, const int64_t size, const int64_t numel) { int64_t tid = blockDim.x * blockIdx.x + threadIdx.x; if (tid == 0) { for (int64_t i = 0; i <= data_in[0]; i++) data_out[i] = static_cast<output_t>(0); } else if (tid < numel) { for (int64_t i = data_in[tid - 1]; i < data_in[tid]; i++) data_out[i + 1] = static_cast<output_t>(tid); } else if (tid == numel) { for (int64_t i = data_in[numel - 1] + 1; i < size + 1; i++) data_out[i] = static_cast<output_t>(numel); } } template <typename input_t, typename output_t> void convert_indices_from_coo_to_csr_cuda(const Tensor& result, const Tensor& input, const int64_t size) { int64_t numel = input.numel(); const input_t* data_in = input.data_ptr<input_t>(); output_t* data_out = result.data_ptr<output_t>(); if (numel == 0) { result.zero_(); return; } // Run (numel + 1) threads... int64_t THREADS = at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock; int64_t BLOCKS = (numel + THREADS) / THREADS; at::hip::HIPStreamMasqueradingAsCUDA stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); hipLaunchKernelGGL(( convert_indices_from_coo_to_csr_cuda_kernel), dim3(BLOCKS), dim3(THREADS), 0, stream, data_out, data_in, size, numel); C10_HIP_KERNEL_LAUNCH_CHECK(); } template <typename input_t, typename output_t> __global__ void convert_indices_from_csr_to_coo_cuda_kernel(output_t* data_out, const input_t* data_in, const int64_t nrows) { int64_t tid = blockDim.x * blockIdx.x + threadIdx.x; if (tid < nrows) { for (int64_t i = data_in[tid]; i < data_in[tid + 1]; i++) data_out[i] = static_cast<output_t>(tid); } } template <typename input_t, typename output_t> void convert_indices_from_csr_to_coo_cuda(const Tensor& indices, const Tensor& crow_indices, const Tensor& col_indices, const bool transpose=false) { int64_t nrows = crow_indices.numel() - 1; if (nrows == 0) { indices.zero_(); return; } auto crow_indices_ = crow_indices.expect_contiguous(); const input_t* crow_indices_data_in = crow_indices_->data_ptr<input_t>(); TORCH_INTERNAL_ASSERT(indices.is_contiguous()); auto row0 = indices.select(0, transpose?1:0); auto row1 = indices.select(0, transpose?0:1); output_t* data_out = row0.data_ptr<output_t>(); // Run nrows threads... int64_t THREADS = at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock; int64_t BLOCKS = (nrows + THREADS) / THREADS; at::hip::HIPStreamMasqueradingAsCUDA stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); row1.copy_(*col_indices.expect_contiguous()); hipLaunchKernelGGL(( convert_indices_from_csr_to_coo_cuda_kernel), dim3(BLOCKS), dim3(THREADS), 0, stream, data_out, crow_indices_data_in, nrows); C10_HIP_KERNEL_LAUNCH_CHECK(); } } // namespace using namespace at::sparse_csr; // certain utiliy functions are usable from sparse COO. using namespace at::sparse; Tensor& add_out_dense_sparse_csr_cuda( Tensor& output, const Tensor& dense, const SparseCsrTensor& src, const Scalar& alpha) { TORCH_INTERNAL_ASSERT(dense.layout() == kStrided); TORCH_INTERNAL_ASSERT(src.is_sparse_csr()); TORCH_INTERNAL_ASSERT(dense.is_cuda()); TORCH_CHECK( output.is_contiguous(), "out argument must be contiguous, but got: ", output.suggest_memory_format()); TORCH_CHECK( output.is_cuda(), "add: expected 'out' to be CUDA tensor, but got tensor on device: ", output.device()); TORCH_CHECK( src.is_cuda(), "add: expected 'other' to be a CUDA tensor, but got tensor on device: ", src.device()); TORCH_CHECK( dense.sizes().equals(src.sizes()), "add: expected 'self' and 'other' to have same size, but self has size ", dense.sizes(), " while other has size ", src.sizes(), " (FYI: dense-sparse addition does not currently support broadcasting)"); auto commonDtype = promoteTypes(dense.scalar_type(), src.scalar_type()); TORCH_CHECK( canCast(commonDtype, output.scalar_type()), "Can't convert result type ", commonDtype, " to output ", output.scalar_type(), " in add operation"); Tensor src_values = src.values(); resize_output(output, dense.sizes()); Tensor resultBuffer = output; if (output.scalar_type() != commonDtype) { resultBuffer = dense.to(commonDtype); } else if (!is_same_tensor(output, dense)) { resultBuffer.copy_(dense); } if (src._nnz() == 0) { return output; } auto valuesBuffer = src_values.to(commonDtype).reshape({-1, src_values.size(-1)}).contiguous(); resultBuffer = resultBuffer.view({-1, output.size(-2), output.size(-1)}); auto src_crow_indices = src.crow_indices().reshape({-1, src.crow_indices().size(-1)}).contiguous(); auto src_col_indices = src.col_indices().reshape({-1, src.col_indices().size(-1)}).contiguous(); AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND4( kComplexHalf, kHalf, kBool, kBFloat16, commonDtype, "add_out_op2_sparse_csr", [&valuesBuffer, &resultBuffer, &alpha, &src_crow_indices, &src_col_indices]() { AT_DISPATCH_INDEX_TYPES( src_crow_indices.scalar_type(), "csr_add_out_crow_indices", [&valuesBuffer, &resultBuffer, &alpha, &src_crow_indices, &src_col_indices]() { auto batch_count = resultBuffer.dim() > 2 ? resultBuffer.size(-3) : 1; scalar_t* values_accessor = valuesBuffer.data_ptr<scalar_t>(); scalar_t* out_ptr = resultBuffer.data_ptr<scalar_t>(); scalar_t cast_value = alpha.to<scalar_t>(); index_t* crow_indices_accessor = src_crow_indices.data_ptr<index_t>(); index_t* col_indices_accessor = src_col_indices.data_ptr<index_t>(); int64_t out_storage_offset = resultBuffer.storage_offset(); auto out_strides = resultBuffer.strides(); auto out_strides0 = out_strides[0]; auto out_strides1 = out_strides[1]; auto crow_stride0 = src_crow_indices.stride(0); auto col_stride0 = src_col_indices.stride(0); auto val_stride0 = valuesBuffer.stride(0); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); at::cuda::ThrustAllocator allocator; auto policy = thrust::hip::par(allocator).on(stream); // Note that this could be wildly imbalanced if the sparsity pattern varies a lot between rows. thrust::for_each( policy, thrust::make_counting_iterator(int64_t(0)), thrust::make_counting_iterator(int64_t(src_crow_indices.size(-1) - 1)), [values_accessor, crow_indices_accessor, col_indices_accessor, out_ptr, cast_value, out_strides0, out_strides1, crow_stride0, col_stride0, val_stride0, batch_count ]__device__(int64_t irow) { for (index_t batch_idx = 0; batch_idx < batch_count; batch_idx++) { index_t start_index = crow_indices_accessor[batch_idx*crow_stride0 + irow]; index_t end_index = crow_indices_accessor[batch_idx*crow_stride0 + irow + 1]; for (index_t i = start_index; i < end_index; ++i) { auto icol = col_indices_accessor[batch_idx*col_stride0 + i]; auto index = batch_idx * out_strides0 + irow * out_strides1 + icol; out_ptr[index] += cast_value * values_accessor[batch_idx*val_stride0 + i]; } } }); }); }); if (output.scalar_type() != commonDtype) { output.copy_(resultBuffer); } return output; } Tensor& add_out_sparse_csr_cuda( const Tensor& self, const SparseCsrTensor& other, const Scalar& alpha, SparseCsrTensor& out) { if (self.layout() == kStrided) { add_out_dense_sparse_csr_cuda(out, self, other, alpha); } else { TORCH_CHECK( self.sizes().equals(other.sizes()), "torch.add: Expected input tensors to have the same shape, but got tensor `self` with shape ", self.sizes(), " and tensor `other` with shape ", other.sizes()); TORCH_CHECK( self.is_cuda(), "add: expected 'self' to be CUDA tensor, but got tensor on device: ", self.device()); TORCH_CHECK( other.is_cuda(), "add: expected 'other' to be CUDA tensor, but got tensor on device: ", other.device()); TORCH_CHECK( out.is_cuda(), "add: expected 'out' to be CUDA tensor, but got tensor on device: ", out.device()); if (only_sparse_compressed_add_trivial_cases(self, other, alpha, out)) { return out; } at::native::resize_as_sparse_compressed_(out, self); sparse::impl::cuda::add_out_sparse_csr(self, other, Scalar(1), alpha, out); } return out; } TORCH_IMPL_FUNC(_convert_indices_from_coo_to_csr_structured_cuda) ( const Tensor& input, const int64_t size, const bool out_int32, const Tensor& result ) { if (out_int32) { AT_DISPATCH_INTEGRAL_TYPES(input.scalar_type(), "convert_indices_from_coo_to_csr_cuda", [&] { convert_indices_from_coo_to_csr_cuda<scalar_t, int>(result, input, size); }); } else { AT_DISPATCH_INTEGRAL_TYPES(input.scalar_type(), "convert_indices_from_coo_to_csr_cuda", [&] { convert_indices_from_coo_to_csr_cuda<scalar_t, int64_t>(result, input, size); }); } } TORCH_IMPL_FUNC(_convert_indices_from_csr_to_coo_structured_cuda) ( const Tensor& crow_indices, const Tensor& col_indices, const bool out_int32, const bool transpose, const Tensor& result ) { if (out_int32) { AT_DISPATCH_INTEGRAL_TYPES(crow_indices.scalar_type(), "convert_indices_from_csr_to_coo_cuda", [&] { convert_indices_from_csr_to_coo_cuda<scalar_t, int32_t>(result, crow_indices, col_indices, transpose); }); } else { AT_DISPATCH_INTEGRAL_TYPES(crow_indices.scalar_type(), "convert_indices_from_csr_to_coo_cuda", [&] { convert_indices_from_csr_to_coo_cuda<scalar_t, int64_t>(result, crow_indices, col_indices, transpose); }); } } /* Reductions on sparse CSR tensors using masked semantics. - To support a reduction operator on a CSR tensor with CUDA storage, define template <typename scalar_t> struct Reduction...Op { __device__ __forceinline__ scalar_t operator()(const scalar_t a, const scalar_t b) const { return a ... b; } __device__ __forceinline__ scalar_t identity() const { return ...; } __forceinline__ scalar_t identity_cpu() const { return ...; } }; Tensor _sparse_csr_..._cuda(const Tensor& input, IntArrayRef dims_to_sum, bool keepdim, c10::optional<ScalarType> dtype) { ... result = reduce_sparse_csr_cuda_template<scalar_t>(input_, dims_to_sum, keepdim, Reduction...Op<scalar_t>()); ... return result; } and add the following - func: _sparse_csr_op.dim_dtype(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor dispatch: SparseCsrCUDA: _sparse_csr_..._cuda to native_functions.yaml */ namespace { template <typename scalar_t, typename index_t, typename ReductionOp, typename acc_t> __global__ void reduce_sparse_csr_dim0_cuda_kernel(acc_t* new_values, const index_t* new_col_indices, const int64_t new_nnz, const scalar_t* values, const index_t* col_indices, const int64_t nnz, ReductionOp rop ) { int64_t tid = blockDim.x * blockIdx.x + threadIdx.x; if (tid < new_nnz) { index_t col = new_col_indices[tid]; acc_t v = rop.identity(); for (int64_t j=0; j < nnz; j++) { if (col == col_indices[j]) { v = rop(v, acc_t(values[j])); } } new_values[tid] = v; } } template <typename scalar_t, typename ReductionOp> Tensor reduce_sparse_csr_dim0_cuda_template(const Tensor& sparse, ReductionOp rop) { /* Consider the following sparse tensor: 1 * * * * * * * 2 * * * 3 * * * * * * * 4 * 5 * * that has CSR representation crow_indices = [0, 1, 2, 3, 3, 5] col_indices = [0, 3, 2, 0, 2] values = [1, 2, 3, 4, 5] Reduction with dim=0 results: rop(1,4) * rop(3,5) 2 * that has CSR representation new_crow_indices = [0, 3] new_col_indices = [0, 2, 3] new_values = [rop(1, 4], rop(3, 5), 2] In general, the CSR representation data can be computed as follows: nnz = col_indices.numel() new_col_indices = col_indices.unique(sorted=True, return_inverse=False) new_nnz = new_col_indices.numel() new_crow_indices = [0, new_nnz] new_values.resize(new_nnz) for i in range(new_nnz): v = identity col = new_col_indices[i] for j in range(nnz): if col == col_indices[j]: v = rop(v, values[j]) new_values[i] = v Notice this algorithm is different from the one used on CPU data. */ Tensor col_indices = sparse.col_indices(); Tensor values = sparse.values(); auto ncols = sparse.size(1); auto nnz = col_indices.numel(); Tensor new_col_indices; std::tie(new_col_indices, std::ignore) = at::_unique(col_indices, true, false); auto new_nnz = new_col_indices.numel(); Tensor new_crow_indices = at::tensor(ArrayRef<int64_t>{0, new_nnz}, col_indices.options()); // Set `is_cuda` = `true` in acc_type in CPU backend. Because the accumulate type // of float should be float in current scenario. In CUDA, float is the accumulate type // of float, while in CPU, double is the accumulate type of float. using acc_t = at::acc_type<scalar_t, true>; auto acc_buffer = at::sparse_csr::create_acc_buffer<acc_t, scalar_t>( values.options(), values.scalar_type(), new_nnz); Tensor new_values = std::get<0>(acc_buffer); Tensor new_values_acc = std::get<1>(acc_buffer); scalar_t* values_ptr = values.data_ptr<scalar_t>(); acc_t* new_values_acc_ptr = new_values_acc.data_ptr<acc_t>(); int64_t THREADS = at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock; int64_t BLOCKS = (new_nnz + THREADS) / THREADS; at::hip::HIPStreamMasqueradingAsCUDA stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); AT_DISPATCH_INDEX_TYPES(col_indices.scalar_type(), "reduce_sparse_csr_dim0_cuda_indices", [&]() { index_t* col_indices_ptr = col_indices.data_ptr<index_t>(); index_t* new_col_indices_ptr = new_col_indices.data_ptr<index_t>(); hipLaunchKernelGGL(( reduce_sparse_csr_dim0_cuda_kernel), dim3(BLOCKS), dim3(THREADS), 0, stream, new_values_acc_ptr, new_col_indices_ptr, new_nnz, values_ptr, col_indices_ptr, nnz, rop ); }); copy_from_acc_buffer(new_values, new_values_acc); C10_HIP_KERNEL_LAUNCH_CHECK(); return at::native::_sparse_csr_tensor_unsafe(new_crow_indices, new_col_indices, new_values, {1, ncols}, new_values.scalar_type(), sparse.layout(), new_values.device()); } template <typename index_t> __global__ void reduce_crow_indices_dim1_cuda_kernel(index_t* new_crow_indices, index_t* row_map, const index_t* crow_indices, const int64_t nrows ) { int64_t nnz = 0; new_crow_indices[0] = 0; for(int64_t i=0; i<nrows; i++) { if (crow_indices[i] != crow_indices[i + 1]) { row_map[i] = nnz; nnz++; } new_crow_indices[i + 1] = nnz; } } template <typename scalar_t, typename index_t, typename ReductionOp, typename acc_t> __global__ void reduce_sparse_csr_dim1_cuda_kernel(acc_t* new_values, const scalar_t* values, const index_t* crow_indices, const index_t* row_map, const int64_t nrows, ReductionOp rop ) { int64_t tid = blockDim.x * blockIdx.x + threadIdx.x; if (tid < nrows) { index_t i_start = crow_indices[tid]; index_t i_end = crow_indices[tid+1]; if (i_start != i_end) { acc_t acc = rop.identity(); for (index_t i = i_start; i < i_end; i++) { acc = rop(acc, acc_t(values[i])); } new_values[row_map[tid]] = acc; } } } template <typename scalar_t, typename ReductionOp> Tensor reduce_sparse_csr_dim1_cuda_template(const Tensor& sparse, ReductionOp rop) { /* The algorithm of computing reduce of a CSR tensor along the last dimension is explained in the comment of the reduce_sparse_csr_dim1_cpu_template function. */ Tensor crow_indices = sparse.crow_indices(); auto ioptions = crow_indices.options(); Tensor values = sparse.values(); auto nrows = sparse.size(0); auto numel = values.numel(); Tensor new_crow_indices = at::empty({crow_indices.numel()}, ioptions); Tensor new_col_indices = at::empty({}, ioptions); Tensor row_map = at::empty({nrows}, ioptions); // Set `is_cuda` = `true` in acc_type in CPU backend. Because the accumulate type // of float should be float in current scenario. In CUDA, float is the accumulate type // of float, while in CPU, double is the accumulate type of float. using acc_t = at::acc_type<scalar_t, true>; auto acc_buffer = at::sparse_csr::create_acc_buffer<acc_t, scalar_t>( values.options(), values.scalar_type()); Tensor new_values = std::get<0>(acc_buffer); Tensor new_values_acc = std::get<1>(acc_buffer); at::hip::HIPStreamMasqueradingAsCUDA stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); int64_t THREADS = at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock; int64_t BLOCKS = (nrows + THREADS) / THREADS; AT_DISPATCH_INDEX_TYPES(crow_indices.scalar_type(), "reduce_sparse_csr_dim1_cuda_indices", [&]() { index_t* crow_indices_ptr = crow_indices.data_ptr<index_t>(); index_t* new_crow_indices_ptr = new_crow_indices.data_ptr<index_t>(); index_t* row_map_ptr = row_map.data_ptr<index_t>(); hipLaunchKernelGGL(( reduce_crow_indices_dim1_cuda_kernel), dim3(1), dim3(1), 0, stream, new_crow_indices_ptr, row_map_ptr, crow_indices_ptr, nrows); C10_HIP_KERNEL_LAUNCH_CHECK(); index_t new_nnz = new_crow_indices[-1].item<index_t>(); new_col_indices.resize_(new_nnz); new_col_indices.fill_(index_t(0)); new_values.resize_(new_nnz); new_values_acc.resize_(new_nnz); scalar_t* values_ptr = values.data_ptr<scalar_t>(); acc_t* new_values_acc_ptr = new_values_acc.data_ptr<acc_t>(); hipLaunchKernelGGL(( reduce_sparse_csr_dim1_cuda_kernel), dim3(BLOCKS), dim3(THREADS), 0, stream, new_values_acc_ptr, values_ptr, crow_indices_ptr, row_map_ptr, nrows, rop); C10_HIP_KERNEL_LAUNCH_CHECK(); }); copy_from_acc_buffer(new_values, new_values_acc); return at::native::_sparse_csr_tensor_unsafe(new_crow_indices, new_col_indices, new_values, {sparse.size(0), 1}, new_values.scalar_type(), sparse.layout(), new_values.device()); } template <typename scalar_t, typename ReductionOp> Tensor reduce_sparse_csr_dim01_cuda_template(const Tensor& sparse, ReductionOp rop) { auto ioptions = sparse.col_indices().options(); Tensor values = sparse.values(); auto numel = values.numel(); auto nnz = std::min<int64_t>(1, numel); auto result_dtype = at::isIntegralType(values.scalar_type(), /*includeBool=*/true) ? ScalarType::Long : values.scalar_type(); Tensor new_values, new_values_acc; if (numel > 0) { new_values = at::empty({1}, values.options().dtype(result_dtype)); new_values_acc = at::empty({1}, values.options()); auto iter = TensorIterator::reduce_op(new_values_acc, values); gpu_reduce_kernel<scalar_t, scalar_t>(iter, func_wrapper<scalar_t>(rop), rop.identity_cpu()); new_values.copy_(new_values_acc); } else { new_values = at::empty({}, values.options().dtype(result_dtype)); } Tensor new_col_indices = at::zeros({nnz}, ioptions); Tensor new_crow_indices = at::tensor(ArrayRef<int64_t>{0, nnz}, ioptions); return at::native::_sparse_csr_tensor_unsafe(new_crow_indices, new_col_indices, new_values, {1, std::min<int64_t>(1, sparse.size(1))}, new_values.scalar_type(), sparse.layout(), new_values.device()); } template <typename scalar_t, typename ReductionOp> Tensor reduce_sparse_csr_cuda_template(const Tensor& sparse, std::vector<int64_t> dims, ReductionOp rop) { if (dims.size() == 1) { if (dims[0] == 0) { return reduce_sparse_csr_dim0_cuda_template<scalar_t>(sparse, rop); } else { TORCH_INTERNAL_ASSERT(dims[0] == 1); return reduce_sparse_csr_dim1_cuda_template<scalar_t>(sparse, rop); } } else if (dims.size() == 2) { TORCH_INTERNAL_ASSERT(((dims[0] == 0 && dims[1] == 1) || (dims[0] == 1 && dims[1] == 0))); return reduce_sparse_csr_dim01_cuda_template<scalar_t>(sparse, rop); } TORCH_INTERNAL_ASSERT(dims.size() == 0); // effective after gh-29137 has been resolved return sparse.clone(); } template <typename scalar_t, typename ReductionOp> Tensor reduce_sparse_csr_cuda_template(const Tensor& sparse, IntArrayRef dims_to_sum, bool keepdim, ReductionOp rop) { TORCH_INTERNAL_ASSERT(sparse.is_sparse_csr()); TORCH_CHECK(keepdim, "reduction operations on CSR tensors with keepdim=False is unsupported"); TORCH_INTERNAL_ASSERT(sparse.is_cuda()); const int64_t input_dim = sparse.dim(); TORCH_INTERNAL_ASSERT(input_dim == 2); auto dims = dims_to_sum.vec(); maybe_wrap_dims(dims, input_dim); if (dims.size() == 0) { // after gh-29137 is resolved, delete this if-block dims.emplace_back(0); dims.emplace_back(1); } return reduce_sparse_csr_cuda_template<scalar_t>(sparse, dims, rop); } template <typename scalar_t> struct ReductionAddOp { __device__ __forceinline__ scalar_t operator()(const scalar_t a, const scalar_t b) const { return a + b; } __device__ __forceinline__ scalar_t identity() const { return 0; } __forceinline__ scalar_t identity_cpu() const { return 0; } }; template <typename scalar_t> struct ReductionMulOp { __device__ __forceinline__ scalar_t operator()(const scalar_t a, const scalar_t b) const { return a * b; } __device__ __forceinline__ scalar_t identity() const { return 1; } __forceinline__ scalar_t identity_cpu() const { return 1; } }; } // namespace Tensor _sparse_csr_sum_cuda(const Tensor& input, IntArrayRef dims_to_sum, bool keepdim, c10::optional<ScalarType> dtype) { ScalarType dtype_ = dtype.value_or(input.scalar_type()); Tensor input_ = at::sparse_csr::to_type(input, dtype_); Tensor result; AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2( kHalf, kBFloat16, input_.scalar_type(), "_sparse_csr_sum_cuda", [&] { // Set `is_cuda` = `true` in acc_type in CPU backend. Because the accumulate type // of float should be float in current scenario. In CUDA, float is the accumulate type // of float, while in CPU, double is the accumulate type of float. using acc_t = at::acc_type<scalar_t, true>; result = reduce_sparse_csr_cuda_template<scalar_t>( input_, dims_to_sum, keepdim, ReductionAddOp<acc_t>()); }); return result; } Tensor _sparse_csr_prod_cuda(const Tensor& input, IntArrayRef dims_to_reduce, bool keepdim, c10::optional<ScalarType> dtype) { ScalarType dtype_ = dtype.value_or(input.scalar_type()); Tensor input_ = input.to(dtype_); Tensor result; AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2( kHalf, kBFloat16, input_.scalar_type(), "_sparse_csr_prod_cuda", [&] { result = reduce_sparse_csr_cuda_template<scalar_t>(input_, dims_to_reduce, keepdim, ReductionMulOp<scalar_t>()); }); return result; } } // namespace native } // namespace at
36d36f33f93821cce771628d7756732c41a1e9d4.cu
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS #include <ATen/core/Tensor.h> #include <ATen/Dispatch.h> #include <ATen/ExpandUtils.h> #include <ATen/InitialTensorOptions.h> #include <ATen/SparseCsrTensorImpl.h> #include <ATen/SparseCsrTensorUtils.h> #include <ATen/WrapDimUtilsMulti.h> #include <ATen/native/BinaryOps.h> #include <ATen/native/Resize.h> #include <ATen/native/SparseTensorUtils.h> #include <algorithm> #include <ATen/AccumulateType.h> #ifndef AT_PER_OPERATOR_HEADERS #include <ATen/NativeFunctions.h> #else #include <ATen/ops/_convert_indices_from_coo_to_csr_native.h> #include <ATen/ops/_convert_indices_from_csr_to_coo_native.h> #include <ATen/ops/_sparse_csr_tensor_unsafe_native.h> #include <ATen/ops/_unique.h> #include <ATen/ops/add_native.h> #include <ATen/ops/resize_as_sparse_native.h> #include <ATen/ops/tensor.h> #include <ATen/ops/zeros.h> #endif #include <cuda_runtime.h> #include <type_traits> #include <ATen/cuda/CUDAContext.h> #include <ATen/cuda/CUDAUtils.h> #include <ATen/cuda/ThrustAllocator.h> #include <c10/cuda/CUDACachingAllocator.h> #include <ATen/native/cuda/Reduce.cuh> #include <ATen/native/sparse/cuda/SparseBlasImpl.h> #include <ATen/native/sparse/cuda/SparseCUDABlas.h> #include <ATen/native/sparse/cuda/SparseCUDATensorMath.cuh> #include <thrust/device_ptr.h> #include <thrust/execution_policy.h> #include <thrust/fill.h> #include <thrust/for_each.h> #include <thrust/sequence.h> namespace at { namespace native { namespace { template <typename input_t, typename output_t> __global__ void convert_indices_from_coo_to_csr_cuda_kernel(output_t* data_out, const input_t* data_in, const int64_t size, const int64_t numel) { int64_t tid = blockDim.x * blockIdx.x + threadIdx.x; if (tid == 0) { for (int64_t i = 0; i <= data_in[0]; i++) data_out[i] = static_cast<output_t>(0); } else if (tid < numel) { for (int64_t i = data_in[tid - 1]; i < data_in[tid]; i++) data_out[i + 1] = static_cast<output_t>(tid); } else if (tid == numel) { for (int64_t i = data_in[numel - 1] + 1; i < size + 1; i++) data_out[i] = static_cast<output_t>(numel); } } template <typename input_t, typename output_t> void convert_indices_from_coo_to_csr_cuda(const Tensor& result, const Tensor& input, const int64_t size) { int64_t numel = input.numel(); const input_t* data_in = input.data_ptr<input_t>(); output_t* data_out = result.data_ptr<output_t>(); if (numel == 0) { result.zero_(); return; } // Run (numel + 1) threads... int64_t THREADS = at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock; int64_t BLOCKS = (numel + THREADS) / THREADS; at::cuda::CUDAStream stream = at::cuda::getCurrentCUDAStream(); convert_indices_from_coo_to_csr_cuda_kernel<<<BLOCKS, THREADS, 0, stream>>>(data_out, data_in, size, numel); C10_CUDA_KERNEL_LAUNCH_CHECK(); } template <typename input_t, typename output_t> __global__ void convert_indices_from_csr_to_coo_cuda_kernel(output_t* data_out, const input_t* data_in, const int64_t nrows) { int64_t tid = blockDim.x * blockIdx.x + threadIdx.x; if (tid < nrows) { for (int64_t i = data_in[tid]; i < data_in[tid + 1]; i++) data_out[i] = static_cast<output_t>(tid); } } template <typename input_t, typename output_t> void convert_indices_from_csr_to_coo_cuda(const Tensor& indices, const Tensor& crow_indices, const Tensor& col_indices, const bool transpose=false) { int64_t nrows = crow_indices.numel() - 1; if (nrows == 0) { indices.zero_(); return; } auto crow_indices_ = crow_indices.expect_contiguous(); const input_t* crow_indices_data_in = crow_indices_->data_ptr<input_t>(); TORCH_INTERNAL_ASSERT(indices.is_contiguous()); auto row0 = indices.select(0, transpose?1:0); auto row1 = indices.select(0, transpose?0:1); output_t* data_out = row0.data_ptr<output_t>(); // Run nrows threads... int64_t THREADS = at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock; int64_t BLOCKS = (nrows + THREADS) / THREADS; at::cuda::CUDAStream stream = at::cuda::getCurrentCUDAStream(); row1.copy_(*col_indices.expect_contiguous()); convert_indices_from_csr_to_coo_cuda_kernel<<<BLOCKS, THREADS, 0, stream>>>(data_out, crow_indices_data_in, nrows); C10_CUDA_KERNEL_LAUNCH_CHECK(); } } // namespace using namespace at::sparse_csr; // certain utiliy functions are usable from sparse COO. using namespace at::sparse; Tensor& add_out_dense_sparse_csr_cuda( Tensor& output, const Tensor& dense, const SparseCsrTensor& src, const Scalar& alpha) { TORCH_INTERNAL_ASSERT(dense.layout() == kStrided); TORCH_INTERNAL_ASSERT(src.is_sparse_csr()); TORCH_INTERNAL_ASSERT(dense.is_cuda()); TORCH_CHECK( output.is_contiguous(), "out argument must be contiguous, but got: ", output.suggest_memory_format()); TORCH_CHECK( output.is_cuda(), "add: expected 'out' to be CUDA tensor, but got tensor on device: ", output.device()); TORCH_CHECK( src.is_cuda(), "add: expected 'other' to be a CUDA tensor, but got tensor on device: ", src.device()); TORCH_CHECK( dense.sizes().equals(src.sizes()), "add: expected 'self' and 'other' to have same size, but self has size ", dense.sizes(), " while other has size ", src.sizes(), " (FYI: dense-sparse addition does not currently support broadcasting)"); auto commonDtype = promoteTypes(dense.scalar_type(), src.scalar_type()); TORCH_CHECK( canCast(commonDtype, output.scalar_type()), "Can't convert result type ", commonDtype, " to output ", output.scalar_type(), " in add operation"); Tensor src_values = src.values(); resize_output(output, dense.sizes()); Tensor resultBuffer = output; if (output.scalar_type() != commonDtype) { resultBuffer = dense.to(commonDtype); } else if (!is_same_tensor(output, dense)) { resultBuffer.copy_(dense); } if (src._nnz() == 0) { return output; } auto valuesBuffer = src_values.to(commonDtype).reshape({-1, src_values.size(-1)}).contiguous(); resultBuffer = resultBuffer.view({-1, output.size(-2), output.size(-1)}); auto src_crow_indices = src.crow_indices().reshape({-1, src.crow_indices().size(-1)}).contiguous(); auto src_col_indices = src.col_indices().reshape({-1, src.col_indices().size(-1)}).contiguous(); AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND4( kComplexHalf, kHalf, kBool, kBFloat16, commonDtype, "add_out_op2_sparse_csr", [&valuesBuffer, &resultBuffer, &alpha, &src_crow_indices, &src_col_indices]() { AT_DISPATCH_INDEX_TYPES( src_crow_indices.scalar_type(), "csr_add_out_crow_indices", [&valuesBuffer, &resultBuffer, &alpha, &src_crow_indices, &src_col_indices]() { auto batch_count = resultBuffer.dim() > 2 ? resultBuffer.size(-3) : 1; scalar_t* values_accessor = valuesBuffer.data_ptr<scalar_t>(); scalar_t* out_ptr = resultBuffer.data_ptr<scalar_t>(); scalar_t cast_value = alpha.to<scalar_t>(); index_t* crow_indices_accessor = src_crow_indices.data_ptr<index_t>(); index_t* col_indices_accessor = src_col_indices.data_ptr<index_t>(); int64_t out_storage_offset = resultBuffer.storage_offset(); auto out_strides = resultBuffer.strides(); auto out_strides0 = out_strides[0]; auto out_strides1 = out_strides[1]; auto crow_stride0 = src_crow_indices.stride(0); auto col_stride0 = src_col_indices.stride(0); auto val_stride0 = valuesBuffer.stride(0); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); at::cuda::ThrustAllocator allocator; auto policy = thrust::cuda::par(allocator).on(stream); // Note that this could be wildly imbalanced if the sparsity pattern varies a lot between rows. thrust::for_each( policy, thrust::make_counting_iterator(int64_t(0)), thrust::make_counting_iterator(int64_t(src_crow_indices.size(-1) - 1)), [values_accessor, crow_indices_accessor, col_indices_accessor, out_ptr, cast_value, out_strides0, out_strides1, crow_stride0, col_stride0, val_stride0, batch_count ]__device__(int64_t irow) { for (index_t batch_idx = 0; batch_idx < batch_count; batch_idx++) { index_t start_index = crow_indices_accessor[batch_idx*crow_stride0 + irow]; index_t end_index = crow_indices_accessor[batch_idx*crow_stride0 + irow + 1]; for (index_t i = start_index; i < end_index; ++i) { auto icol = col_indices_accessor[batch_idx*col_stride0 + i]; auto index = batch_idx * out_strides0 + irow * out_strides1 + icol; out_ptr[index] += cast_value * values_accessor[batch_idx*val_stride0 + i]; } } }); }); }); if (output.scalar_type() != commonDtype) { output.copy_(resultBuffer); } return output; } Tensor& add_out_sparse_csr_cuda( const Tensor& self, const SparseCsrTensor& other, const Scalar& alpha, SparseCsrTensor& out) { if (self.layout() == kStrided) { add_out_dense_sparse_csr_cuda(out, self, other, alpha); } else { TORCH_CHECK( self.sizes().equals(other.sizes()), "torch.add: Expected input tensors to have the same shape, but got tensor `self` with shape ", self.sizes(), " and tensor `other` with shape ", other.sizes()); TORCH_CHECK( self.is_cuda(), "add: expected 'self' to be CUDA tensor, but got tensor on device: ", self.device()); TORCH_CHECK( other.is_cuda(), "add: expected 'other' to be CUDA tensor, but got tensor on device: ", other.device()); TORCH_CHECK( out.is_cuda(), "add: expected 'out' to be CUDA tensor, but got tensor on device: ", out.device()); if (only_sparse_compressed_add_trivial_cases(self, other, alpha, out)) { return out; } at::native::resize_as_sparse_compressed_(out, self); sparse::impl::cuda::add_out_sparse_csr(self, other, Scalar(1), alpha, out); } return out; } TORCH_IMPL_FUNC(_convert_indices_from_coo_to_csr_structured_cuda) ( const Tensor& input, const int64_t size, const bool out_int32, const Tensor& result ) { if (out_int32) { AT_DISPATCH_INTEGRAL_TYPES(input.scalar_type(), "convert_indices_from_coo_to_csr_cuda", [&] { convert_indices_from_coo_to_csr_cuda<scalar_t, int>(result, input, size); }); } else { AT_DISPATCH_INTEGRAL_TYPES(input.scalar_type(), "convert_indices_from_coo_to_csr_cuda", [&] { convert_indices_from_coo_to_csr_cuda<scalar_t, int64_t>(result, input, size); }); } } TORCH_IMPL_FUNC(_convert_indices_from_csr_to_coo_structured_cuda) ( const Tensor& crow_indices, const Tensor& col_indices, const bool out_int32, const bool transpose, const Tensor& result ) { if (out_int32) { AT_DISPATCH_INTEGRAL_TYPES(crow_indices.scalar_type(), "convert_indices_from_csr_to_coo_cuda", [&] { convert_indices_from_csr_to_coo_cuda<scalar_t, int32_t>(result, crow_indices, col_indices, transpose); }); } else { AT_DISPATCH_INTEGRAL_TYPES(crow_indices.scalar_type(), "convert_indices_from_csr_to_coo_cuda", [&] { convert_indices_from_csr_to_coo_cuda<scalar_t, int64_t>(result, crow_indices, col_indices, transpose); }); } } /* Reductions on sparse CSR tensors using masked semantics. - To support a reduction operator on a CSR tensor with CUDA storage, define template <typename scalar_t> struct Reduction...Op { __device__ __forceinline__ scalar_t operator()(const scalar_t a, const scalar_t b) const { return a ... b; } __device__ __forceinline__ scalar_t identity() const { return ...; } __forceinline__ scalar_t identity_cpu() const { return ...; } }; Tensor _sparse_csr_..._cuda(const Tensor& input, IntArrayRef dims_to_sum, bool keepdim, c10::optional<ScalarType> dtype) { ... result = reduce_sparse_csr_cuda_template<scalar_t>(input_, dims_to_sum, keepdim, Reduction...Op<scalar_t>()); ... return result; } and add the following - func: _sparse_csr_op.dim_dtype(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor dispatch: SparseCsrCUDA: _sparse_csr_..._cuda to native_functions.yaml */ namespace { template <typename scalar_t, typename index_t, typename ReductionOp, typename acc_t> __global__ void reduce_sparse_csr_dim0_cuda_kernel(acc_t* new_values, const index_t* new_col_indices, const int64_t new_nnz, const scalar_t* values, const index_t* col_indices, const int64_t nnz, ReductionOp rop ) { int64_t tid = blockDim.x * blockIdx.x + threadIdx.x; if (tid < new_nnz) { index_t col = new_col_indices[tid]; acc_t v = rop.identity(); for (int64_t j=0; j < nnz; j++) { if (col == col_indices[j]) { v = rop(v, acc_t(values[j])); } } new_values[tid] = v; } } template <typename scalar_t, typename ReductionOp> Tensor reduce_sparse_csr_dim0_cuda_template(const Tensor& sparse, ReductionOp rop) { /* Consider the following sparse tensor: 1 * * * * * * * 2 * * * 3 * * * * * * * 4 * 5 * * that has CSR representation crow_indices = [0, 1, 2, 3, 3, 5] col_indices = [0, 3, 2, 0, 2] values = [1, 2, 3, 4, 5] Reduction with dim=0 results: rop(1,4) * rop(3,5) 2 * that has CSR representation new_crow_indices = [0, 3] new_col_indices = [0, 2, 3] new_values = [rop(1, 4], rop(3, 5), 2] In general, the CSR representation data can be computed as follows: nnz = col_indices.numel() new_col_indices = col_indices.unique(sorted=True, return_inverse=False) new_nnz = new_col_indices.numel() new_crow_indices = [0, new_nnz] new_values.resize(new_nnz) for i in range(new_nnz): v = identity col = new_col_indices[i] for j in range(nnz): if col == col_indices[j]: v = rop(v, values[j]) new_values[i] = v Notice this algorithm is different from the one used on CPU data. */ Tensor col_indices = sparse.col_indices(); Tensor values = sparse.values(); auto ncols = sparse.size(1); auto nnz = col_indices.numel(); Tensor new_col_indices; std::tie(new_col_indices, std::ignore) = at::_unique(col_indices, true, false); auto new_nnz = new_col_indices.numel(); Tensor new_crow_indices = at::tensor(ArrayRef<int64_t>{0, new_nnz}, col_indices.options()); // Set `is_cuda` = `true` in acc_type in CPU backend. Because the accumulate type // of float should be float in current scenario. In CUDA, float is the accumulate type // of float, while in CPU, double is the accumulate type of float. using acc_t = at::acc_type<scalar_t, true>; auto acc_buffer = at::sparse_csr::create_acc_buffer<acc_t, scalar_t>( values.options(), values.scalar_type(), new_nnz); Tensor new_values = std::get<0>(acc_buffer); Tensor new_values_acc = std::get<1>(acc_buffer); scalar_t* values_ptr = values.data_ptr<scalar_t>(); acc_t* new_values_acc_ptr = new_values_acc.data_ptr<acc_t>(); int64_t THREADS = at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock; int64_t BLOCKS = (new_nnz + THREADS) / THREADS; at::cuda::CUDAStream stream = at::cuda::getCurrentCUDAStream(); AT_DISPATCH_INDEX_TYPES(col_indices.scalar_type(), "reduce_sparse_csr_dim0_cuda_indices", [&]() { index_t* col_indices_ptr = col_indices.data_ptr<index_t>(); index_t* new_col_indices_ptr = new_col_indices.data_ptr<index_t>(); reduce_sparse_csr_dim0_cuda_kernel<<<BLOCKS, THREADS, 0, stream>>>(new_values_acc_ptr, new_col_indices_ptr, new_nnz, values_ptr, col_indices_ptr, nnz, rop ); }); copy_from_acc_buffer(new_values, new_values_acc); C10_CUDA_KERNEL_LAUNCH_CHECK(); return at::native::_sparse_csr_tensor_unsafe(new_crow_indices, new_col_indices, new_values, {1, ncols}, new_values.scalar_type(), sparse.layout(), new_values.device()); } template <typename index_t> __global__ void reduce_crow_indices_dim1_cuda_kernel(index_t* new_crow_indices, index_t* row_map, const index_t* crow_indices, const int64_t nrows ) { int64_t nnz = 0; new_crow_indices[0] = 0; for(int64_t i=0; i<nrows; i++) { if (crow_indices[i] != crow_indices[i + 1]) { row_map[i] = nnz; nnz++; } new_crow_indices[i + 1] = nnz; } } template <typename scalar_t, typename index_t, typename ReductionOp, typename acc_t> __global__ void reduce_sparse_csr_dim1_cuda_kernel(acc_t* new_values, const scalar_t* values, const index_t* crow_indices, const index_t* row_map, const int64_t nrows, ReductionOp rop ) { int64_t tid = blockDim.x * blockIdx.x + threadIdx.x; if (tid < nrows) { index_t i_start = crow_indices[tid]; index_t i_end = crow_indices[tid+1]; if (i_start != i_end) { acc_t acc = rop.identity(); for (index_t i = i_start; i < i_end; i++) { acc = rop(acc, acc_t(values[i])); } new_values[row_map[tid]] = acc; } } } template <typename scalar_t, typename ReductionOp> Tensor reduce_sparse_csr_dim1_cuda_template(const Tensor& sparse, ReductionOp rop) { /* The algorithm of computing reduce of a CSR tensor along the last dimension is explained in the comment of the reduce_sparse_csr_dim1_cpu_template function. */ Tensor crow_indices = sparse.crow_indices(); auto ioptions = crow_indices.options(); Tensor values = sparse.values(); auto nrows = sparse.size(0); auto numel = values.numel(); Tensor new_crow_indices = at::empty({crow_indices.numel()}, ioptions); Tensor new_col_indices = at::empty({}, ioptions); Tensor row_map = at::empty({nrows}, ioptions); // Set `is_cuda` = `true` in acc_type in CPU backend. Because the accumulate type // of float should be float in current scenario. In CUDA, float is the accumulate type // of float, while in CPU, double is the accumulate type of float. using acc_t = at::acc_type<scalar_t, true>; auto acc_buffer = at::sparse_csr::create_acc_buffer<acc_t, scalar_t>( values.options(), values.scalar_type()); Tensor new_values = std::get<0>(acc_buffer); Tensor new_values_acc = std::get<1>(acc_buffer); at::cuda::CUDAStream stream = at::cuda::getCurrentCUDAStream(); int64_t THREADS = at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock; int64_t BLOCKS = (nrows + THREADS) / THREADS; AT_DISPATCH_INDEX_TYPES(crow_indices.scalar_type(), "reduce_sparse_csr_dim1_cuda_indices", [&]() { index_t* crow_indices_ptr = crow_indices.data_ptr<index_t>(); index_t* new_crow_indices_ptr = new_crow_indices.data_ptr<index_t>(); index_t* row_map_ptr = row_map.data_ptr<index_t>(); reduce_crow_indices_dim1_cuda_kernel<<<1, 1, 0, stream>>>(new_crow_indices_ptr, row_map_ptr, crow_indices_ptr, nrows); C10_CUDA_KERNEL_LAUNCH_CHECK(); index_t new_nnz = new_crow_indices[-1].item<index_t>(); new_col_indices.resize_(new_nnz); new_col_indices.fill_(index_t(0)); new_values.resize_(new_nnz); new_values_acc.resize_(new_nnz); scalar_t* values_ptr = values.data_ptr<scalar_t>(); acc_t* new_values_acc_ptr = new_values_acc.data_ptr<acc_t>(); reduce_sparse_csr_dim1_cuda_kernel<<<BLOCKS, THREADS, 0, stream>>>(new_values_acc_ptr, values_ptr, crow_indices_ptr, row_map_ptr, nrows, rop); C10_CUDA_KERNEL_LAUNCH_CHECK(); }); copy_from_acc_buffer(new_values, new_values_acc); return at::native::_sparse_csr_tensor_unsafe(new_crow_indices, new_col_indices, new_values, {sparse.size(0), 1}, new_values.scalar_type(), sparse.layout(), new_values.device()); } template <typename scalar_t, typename ReductionOp> Tensor reduce_sparse_csr_dim01_cuda_template(const Tensor& sparse, ReductionOp rop) { auto ioptions = sparse.col_indices().options(); Tensor values = sparse.values(); auto numel = values.numel(); auto nnz = std::min<int64_t>(1, numel); auto result_dtype = at::isIntegralType(values.scalar_type(), /*includeBool=*/true) ? ScalarType::Long : values.scalar_type(); Tensor new_values, new_values_acc; if (numel > 0) { new_values = at::empty({1}, values.options().dtype(result_dtype)); new_values_acc = at::empty({1}, values.options()); auto iter = TensorIterator::reduce_op(new_values_acc, values); gpu_reduce_kernel<scalar_t, scalar_t>(iter, func_wrapper<scalar_t>(rop), rop.identity_cpu()); new_values.copy_(new_values_acc); } else { new_values = at::empty({}, values.options().dtype(result_dtype)); } Tensor new_col_indices = at::zeros({nnz}, ioptions); Tensor new_crow_indices = at::tensor(ArrayRef<int64_t>{0, nnz}, ioptions); return at::native::_sparse_csr_tensor_unsafe(new_crow_indices, new_col_indices, new_values, {1, std::min<int64_t>(1, sparse.size(1))}, new_values.scalar_type(), sparse.layout(), new_values.device()); } template <typename scalar_t, typename ReductionOp> Tensor reduce_sparse_csr_cuda_template(const Tensor& sparse, std::vector<int64_t> dims, ReductionOp rop) { if (dims.size() == 1) { if (dims[0] == 0) { return reduce_sparse_csr_dim0_cuda_template<scalar_t>(sparse, rop); } else { TORCH_INTERNAL_ASSERT(dims[0] == 1); return reduce_sparse_csr_dim1_cuda_template<scalar_t>(sparse, rop); } } else if (dims.size() == 2) { TORCH_INTERNAL_ASSERT(((dims[0] == 0 && dims[1] == 1) || (dims[0] == 1 && dims[1] == 0))); return reduce_sparse_csr_dim01_cuda_template<scalar_t>(sparse, rop); } TORCH_INTERNAL_ASSERT(dims.size() == 0); // effective after gh-29137 has been resolved return sparse.clone(); } template <typename scalar_t, typename ReductionOp> Tensor reduce_sparse_csr_cuda_template(const Tensor& sparse, IntArrayRef dims_to_sum, bool keepdim, ReductionOp rop) { TORCH_INTERNAL_ASSERT(sparse.is_sparse_csr()); TORCH_CHECK(keepdim, "reduction operations on CSR tensors with keepdim=False is unsupported"); TORCH_INTERNAL_ASSERT(sparse.is_cuda()); const int64_t input_dim = sparse.dim(); TORCH_INTERNAL_ASSERT(input_dim == 2); auto dims = dims_to_sum.vec(); maybe_wrap_dims(dims, input_dim); if (dims.size() == 0) { // after gh-29137 is resolved, delete this if-block dims.emplace_back(0); dims.emplace_back(1); } return reduce_sparse_csr_cuda_template<scalar_t>(sparse, dims, rop); } template <typename scalar_t> struct ReductionAddOp { __device__ __forceinline__ scalar_t operator()(const scalar_t a, const scalar_t b) const { return a + b; } __device__ __forceinline__ scalar_t identity() const { return 0; } __forceinline__ scalar_t identity_cpu() const { return 0; } }; template <typename scalar_t> struct ReductionMulOp { __device__ __forceinline__ scalar_t operator()(const scalar_t a, const scalar_t b) const { return a * b; } __device__ __forceinline__ scalar_t identity() const { return 1; } __forceinline__ scalar_t identity_cpu() const { return 1; } }; } // namespace Tensor _sparse_csr_sum_cuda(const Tensor& input, IntArrayRef dims_to_sum, bool keepdim, c10::optional<ScalarType> dtype) { ScalarType dtype_ = dtype.value_or(input.scalar_type()); Tensor input_ = at::sparse_csr::to_type(input, dtype_); Tensor result; AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2( kHalf, kBFloat16, input_.scalar_type(), "_sparse_csr_sum_cuda", [&] { // Set `is_cuda` = `true` in acc_type in CPU backend. Because the accumulate type // of float should be float in current scenario. In CUDA, float is the accumulate type // of float, while in CPU, double is the accumulate type of float. using acc_t = at::acc_type<scalar_t, true>; result = reduce_sparse_csr_cuda_template<scalar_t>( input_, dims_to_sum, keepdim, ReductionAddOp<acc_t>()); }); return result; } Tensor _sparse_csr_prod_cuda(const Tensor& input, IntArrayRef dims_to_reduce, bool keepdim, c10::optional<ScalarType> dtype) { ScalarType dtype_ = dtype.value_or(input.scalar_type()); Tensor input_ = input.to(dtype_); Tensor result; AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2( kHalf, kBFloat16, input_.scalar_type(), "_sparse_csr_prod_cuda", [&] { result = reduce_sparse_csr_cuda_template<scalar_t>(input_, dims_to_reduce, keepdim, ReductionMulOp<scalar_t>()); }); return result; } } // namespace native } // namespace at
ea5ab97c8580ac8c3d5a95084a39425aef715a4d.hip
// !!! This is a file automatically generated by hipify!!! /* Processamento de Imagens na GPU Rafael Cardoso da Silva 21048012 Segmentacao de Imagem com o Algoritmo de Watershed Implementacao da Transformada de Distancia em GPU com CUDA (utilizando a Global e a Shared Memory) */ #include <iostream> #include <stdio.h> #include <cstring> #include <hip/hip_runtime_api.h> #include <hip/hip_runtime.h> #include "PBM1d.cpp" #include "PICUDA.cu" #include "TEMPO.cpp" #define BLOCK_SIZE 16 #define RAIO 1 using namespace std; void chamaKernelTD_global(PBM1d* in, PBM1d* out, int N, size_t sizeMat); __global__ void vet_td_gpu_g_kernel(int* IN, int* OUT); void chamaKernelTD_shared(PBM1d* in, PBM1d* out, int N, size_t sizeMat); __global__ void vet_td_gpu_s_kernel(int* IN, int* OUT); void PICUDA::TD2D_multGPU(PBM1d* img) { // Nas duas gpu int devicesCount, deviceIndex; hipGetDeviceCount(&devicesCount); for( deviceIndex = 0; deviceIndex < devicesCount; ++deviceIndex) { hipDeviceProp_t deviceProperties; hipGetDeviceProperties(&deviceProperties, deviceIndex); hipSetDevice(deviceIndex); printf("%s \n", deviceProperties.name ); // GPU global memory cout << " GLOBAL = "; TEMPO_tic(); PBM1d* imgG = PICUDA::TD2D_GLOBAL(img); TEMPO_toc_TD(); delete imgG; // GPU shared memory cout << " SHARED = "; TEMPO_tic(); PBM1d* imgS = PICUDA::TD2D_SHARED(img); TEMPO_toc_TD(); delete imgS; } cout << endl; } /// =============================== GLOBAL MEMORY /// TRANSFORMADA DA DISTANCIA em 2D com GLOBAL MEMORY PBM1d* PICUDA::TD2D_GLOBAL(PBM1d* img) { int i; int HEIGTH = img->getHeight(); int WIDTH = img->getWidth(); int N = img->getTam(); PBM1d* in = new PBM1d(); in->copyOf(img); PBM1d* out = new PBM1d(); out->zerado( HEIGTH, WIDTH); int ALTO = HEIGTH * WIDTH; for(i = 0; i < ALTO; i++) { in->set1(i, in->get1(i)*ALTO); } // tamanho de bytes da img (ela eh mod 16) size_t sizeMat = (sizeof (int) * in->getTam() ); /// chama Horizontalmente chamaKernelTD_global(in, out, N, sizeMat); // copia OUT -> IN memcpy(in->getMat(), out->getMat(), sizeMat); // faz a transposta da img in->transpose(); /// chama Verticalmente chamaKernelTD_global(in, out, N, sizeMat); // faz a transposta da img out->transpose(); delete in; return out; } void chamaKernelTD_global(PBM1d* in, PBM1d* out, int N, size_t sizeMat) { int ite = 0; while(1) { // Aloca o IN e o OUT na GPU int* d_IN; int* d_OUT; hipMalloc((void**)&d_IN, sizeMat); hipMalloc((void**)&d_OUT, sizeMat); // Copia o IN da CPU para a GPU hipMemcpy(d_IN, (void*)in->getMat(), sizeMat, hipMemcpyHostToDevice); // Lanca Kernel dim3 dimBlock(BLOCK_SIZE, 1); dim3 dimGrid( N / dimBlock.x, 1); hipLaunchKernelGGL(( vet_td_gpu_g_kernel) , dim3(N / BLOCK_SIZE), dim3(BLOCK_SIZE) , 0, 0, d_IN + RAIO, d_OUT + RAIO); // Copia o OUT da GPU para a CPU hipMemcpy((void*)out->getMat(), d_OUT, sizeMat, hipMemcpyDeviceToHost); // Desaloca da GPU hipFree(d_IN); hipFree(d_OUT); // verifica se nao houve mudanca ( se IN==OUT entao PARA) if( PBM1d::imgIGUAL(in, out) ) break; // copia OUT -> IN memcpy(in->getMat(), out->getMat(), sizeMat); if(ite > in->getWidth() + 1) { break; } } } __global__ void vet_td_gpu_g_kernel(int* IN, int* OUT) { int id = threadIdx.x + blockIdx.x * blockDim.x; // aplica int menor = INT_MAX; if(IN[id] == 0) { // se o analizado eh 0 OUT[id] = 0; } else { // escolhe o menor de seus 2 vizinhos for (int vizinho = -RAIO ; vizinho <= RAIO ; vizinho++) menor = min(menor, IN[id + vizinho]); // Salva o menor + 1 if(menor >= IN[id] ) { // (sem estourar o limite, no cazo de dimensao grande) OUT[id] = IN[id]; } else { OUT[id] = menor + 1; } } } /// =============================== SHARED MEMORY /// TRANSFORMADA DA DISTANCIA em 2D com SHARED MEMORY PBM1d* PICUDA::TD2D_SHARED(PBM1d* img) { int i; int HEIGTH = img->getHeight(); int WIDTH = img->getWidth(); int N = img->getTam(); PBM1d* in = new PBM1d(); in->copyOf(img); PBM1d* out = new PBM1d(); out->zerado( HEIGTH, WIDTH); int ALTO = HEIGTH * WIDTH; for(i = 0; i < ALTO; i++) { in->set1(i, in->get1(i)*ALTO); } // tamanho de bytes da img (ela eh mod 16) size_t sizeMat = (sizeof (int) * in->getTam() ); /// chama Horizontalmente chamaKernelTD_shared(in, out, N, sizeMat); // copia OUT -> IN memcpy(in->getMat(), out->getMat(), sizeMat); // faz a transposta da img in->transpose(); /// chama Verticalmente chamaKernelTD_shared(in, out, N, sizeMat); // faz a transposta da img out->transpose(); delete in; return out; } void chamaKernelTD_shared(PBM1d* in, PBM1d* out, int N, size_t sizeMat) { int ite = 0; while(1) { // Aloca o IN e o OUT na GPU int* d_IN; int* d_OUT; hipMalloc((void**)&d_IN, sizeMat); hipMalloc((void**)&d_OUT, sizeMat); // Copia o IN da CPU para a GPU hipMemcpy(d_IN, (void*)in->getMat(), sizeMat, hipMemcpyHostToDevice); // Lanca Kernel dim3 dimBlock(BLOCK_SIZE, 1); dim3 dimGrid( N / dimBlock.x, 1); hipLaunchKernelGGL(( vet_td_gpu_s_kernel) , dim3(N / BLOCK_SIZE), dim3(BLOCK_SIZE) , 0, 0, d_IN + RAIO, d_OUT + RAIO); // Copia o OUT da GPU para a CPU hipMemcpy((void*)out->getMat(), d_OUT, sizeMat, hipMemcpyDeviceToHost); // Desaloca da GPU hipFree(d_IN); hipFree(d_OUT); // verifica se nao houve mudanca ( se IN==OUT entao PARA) if( PBM1d::imgIGUAL(in, out) ) break; // copia OUT -> IN memcpy(in->getMat(), out->getMat(), sizeMat); if(ite > in->getWidth() + 1) { break; } } } __global__ void vet_td_gpu_s_kernel(int* IN, int* OUT) { __shared__ int temp[BLOCK_SIZE + 2 * RAIO]; int Gid = threadIdx.x + blockIdx.x * blockDim.x; int Lid = threadIdx.x + RAIO; // Adiciona os elementos na memoria Shared temp[Lid] = IN[Gid]; if (threadIdx.x < RAIO) { temp[Lid - RAIO] = IN[Gid - RAIO]; temp[Lid + BLOCK_SIZE] = IN[Gid + BLOCK_SIZE]; } // Sincroniza pra garantir q todos so dados vao estar disponivel __syncthreads(); // aplica int menor = INT_MAX; if(temp[Lid] == 0) { // se o analizado eh 0 OUT[Gid] = 0; } else { // escolhe o menor de seus 2 vizinhos for (int vizinho = -RAIO ; vizinho <= RAIO ; vizinho++) menor = min(menor, temp[Lid + vizinho]); // Salva o menor + 1 if(menor >= temp[Lid] ) { // (sem estourar o limite, no cazo de dimensao grande) OUT[Gid] = temp[Lid]; } else { OUT[Gid] = menor + 1; } } }
ea5ab97c8580ac8c3d5a95084a39425aef715a4d.cu
/* Processamento de Imagens na GPU Rafael Cardoso da Silva 21048012 Segmentacao de Imagem com o Algoritmo de Watershed Implementacao da Transformada de Distancia em GPU com CUDA (utilizando a Global e a Shared Memory) */ #include <iostream> #include <stdio.h> #include <cstring> #include <cuda_runtime_api.h> #include <cuda.h> #include "PBM1d.cpp" #include "PICUDA.cu" #include "TEMPO.cpp" #define BLOCK_SIZE 16 #define RAIO 1 using namespace std; void chamaKernelTD_global(PBM1d* in, PBM1d* out, int N, size_t sizeMat); __global__ void vet_td_gpu_g_kernel(int* IN, int* OUT); void chamaKernelTD_shared(PBM1d* in, PBM1d* out, int N, size_t sizeMat); __global__ void vet_td_gpu_s_kernel(int* IN, int* OUT); void PICUDA::TD2D_multGPU(PBM1d* img) { // Nas duas gpu int devicesCount, deviceIndex; cudaGetDeviceCount(&devicesCount); for( deviceIndex = 0; deviceIndex < devicesCount; ++deviceIndex) { cudaDeviceProp deviceProperties; cudaGetDeviceProperties(&deviceProperties, deviceIndex); cudaSetDevice(deviceIndex); printf("%s \n", deviceProperties.name ); // GPU global memory cout << " GLOBAL = "; TEMPO_tic(); PBM1d* imgG = PICUDA::TD2D_GLOBAL(img); TEMPO_toc_TD(); delete imgG; // GPU shared memory cout << " SHARED = "; TEMPO_tic(); PBM1d* imgS = PICUDA::TD2D_SHARED(img); TEMPO_toc_TD(); delete imgS; } cout << endl; } /// =============================== GLOBAL MEMORY /// TRANSFORMADA DA DISTANCIA em 2D com GLOBAL MEMORY PBM1d* PICUDA::TD2D_GLOBAL(PBM1d* img) { int i; int HEIGTH = img->getHeight(); int WIDTH = img->getWidth(); int N = img->getTam(); PBM1d* in = new PBM1d(); in->copyOf(img); PBM1d* out = new PBM1d(); out->zerado( HEIGTH, WIDTH); int ALTO = HEIGTH * WIDTH; for(i = 0; i < ALTO; i++) { in->set1(i, in->get1(i)*ALTO); } // tamanho de bytes da img (ela eh mod 16) size_t sizeMat = (sizeof (int) * in->getTam() ); /// chama Horizontalmente chamaKernelTD_global(in, out, N, sizeMat); // copia OUT -> IN memcpy(in->getMat(), out->getMat(), sizeMat); // faz a transposta da img in->transpose(); /// chama Verticalmente chamaKernelTD_global(in, out, N, sizeMat); // faz a transposta da img out->transpose(); delete in; return out; } void chamaKernelTD_global(PBM1d* in, PBM1d* out, int N, size_t sizeMat) { int ite = 0; while(1) { // Aloca o IN e o OUT na GPU int* d_IN; int* d_OUT; cudaMalloc((void**)&d_IN, sizeMat); cudaMalloc((void**)&d_OUT, sizeMat); // Copia o IN da CPU para a GPU cudaMemcpy(d_IN, (void*)in->getMat(), sizeMat, cudaMemcpyHostToDevice); // Lanca Kernel dim3 dimBlock(BLOCK_SIZE, 1); dim3 dimGrid( N / dimBlock.x, 1); vet_td_gpu_g_kernel <<< N / BLOCK_SIZE, BLOCK_SIZE >>> (d_IN + RAIO, d_OUT + RAIO); // Copia o OUT da GPU para a CPU cudaMemcpy((void*)out->getMat(), d_OUT, sizeMat, cudaMemcpyDeviceToHost); // Desaloca da GPU cudaFree(d_IN); cudaFree(d_OUT); // verifica se nao houve mudanca ( se IN==OUT entao PARA) if( PBM1d::imgIGUAL(in, out) ) break; // copia OUT -> IN memcpy(in->getMat(), out->getMat(), sizeMat); if(ite > in->getWidth() + 1) { break; } } } __global__ void vet_td_gpu_g_kernel(int* IN, int* OUT) { int id = threadIdx.x + blockIdx.x * blockDim.x; // aplica int menor = INT_MAX; if(IN[id] == 0) { // se o analizado eh 0 OUT[id] = 0; } else { // escolhe o menor de seus 2 vizinhos for (int vizinho = -RAIO ; vizinho <= RAIO ; vizinho++) menor = min(menor, IN[id + vizinho]); // Salva o menor + 1 if(menor >= IN[id] ) { // (sem estourar o limite, no cazo de dimensao grande) OUT[id] = IN[id]; } else { OUT[id] = menor + 1; } } } /// =============================== SHARED MEMORY /// TRANSFORMADA DA DISTANCIA em 2D com SHARED MEMORY PBM1d* PICUDA::TD2D_SHARED(PBM1d* img) { int i; int HEIGTH = img->getHeight(); int WIDTH = img->getWidth(); int N = img->getTam(); PBM1d* in = new PBM1d(); in->copyOf(img); PBM1d* out = new PBM1d(); out->zerado( HEIGTH, WIDTH); int ALTO = HEIGTH * WIDTH; for(i = 0; i < ALTO; i++) { in->set1(i, in->get1(i)*ALTO); } // tamanho de bytes da img (ela eh mod 16) size_t sizeMat = (sizeof (int) * in->getTam() ); /// chama Horizontalmente chamaKernelTD_shared(in, out, N, sizeMat); // copia OUT -> IN memcpy(in->getMat(), out->getMat(), sizeMat); // faz a transposta da img in->transpose(); /// chama Verticalmente chamaKernelTD_shared(in, out, N, sizeMat); // faz a transposta da img out->transpose(); delete in; return out; } void chamaKernelTD_shared(PBM1d* in, PBM1d* out, int N, size_t sizeMat) { int ite = 0; while(1) { // Aloca o IN e o OUT na GPU int* d_IN; int* d_OUT; cudaMalloc((void**)&d_IN, sizeMat); cudaMalloc((void**)&d_OUT, sizeMat); // Copia o IN da CPU para a GPU cudaMemcpy(d_IN, (void*)in->getMat(), sizeMat, cudaMemcpyHostToDevice); // Lanca Kernel dim3 dimBlock(BLOCK_SIZE, 1); dim3 dimGrid( N / dimBlock.x, 1); vet_td_gpu_s_kernel <<< N / BLOCK_SIZE, BLOCK_SIZE >>> (d_IN + RAIO, d_OUT + RAIO); // Copia o OUT da GPU para a CPU cudaMemcpy((void*)out->getMat(), d_OUT, sizeMat, cudaMemcpyDeviceToHost); // Desaloca da GPU cudaFree(d_IN); cudaFree(d_OUT); // verifica se nao houve mudanca ( se IN==OUT entao PARA) if( PBM1d::imgIGUAL(in, out) ) break; // copia OUT -> IN memcpy(in->getMat(), out->getMat(), sizeMat); if(ite > in->getWidth() + 1) { break; } } } __global__ void vet_td_gpu_s_kernel(int* IN, int* OUT) { __shared__ int temp[BLOCK_SIZE + 2 * RAIO]; int Gid = threadIdx.x + blockIdx.x * blockDim.x; int Lid = threadIdx.x + RAIO; // Adiciona os elementos na memoria Shared temp[Lid] = IN[Gid]; if (threadIdx.x < RAIO) { temp[Lid - RAIO] = IN[Gid - RAIO]; temp[Lid + BLOCK_SIZE] = IN[Gid + BLOCK_SIZE]; } // Sincroniza pra garantir q todos so dados vao estar disponivel __syncthreads(); // aplica int menor = INT_MAX; if(temp[Lid] == 0) { // se o analizado eh 0 OUT[Gid] = 0; } else { // escolhe o menor de seus 2 vizinhos for (int vizinho = -RAIO ; vizinho <= RAIO ; vizinho++) menor = min(menor, temp[Lid + vizinho]); // Salva o menor + 1 if(menor >= temp[Lid] ) { // (sem estourar o limite, no cazo de dimensao grande) OUT[Gid] = temp[Lid]; } else { OUT[Gid] = menor + 1; } } }
4158f919cdfe839f72e2e79ead7fc668fcb6cc45.hip
// !!! This is a file automatically generated by hipify!!! #include <ATen/Dispatch.h> #include <ATen/native/DispatchStub.h> #include <ATen/native/hip/Loops.cuh> #include <ATen/native/TensorIterator.h> #include <ATen/native/BinaryOps.h> // NOTE: CUDA on Windows requires that the enclosing function // of a __device__ lambda not have internal linkage. namespace at { namespace native { void atan2_kernel_cuda(TensorIterator& iter) { AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.common_dtype(), "atan2_cuda", [&]() { gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t { return ::atan2(a, b); }); }); } void bitwise_xor_kernel_cuda(TensorIterator& iter) { if (iter.dtype() == ScalarType::Bool) { // Boolean type does not work with ^ (bitwise XOR) in C++. bitwise_xor wraps this operation for both Boolean and // integral types. gpu_kernel_with_scalars( iter, []GPU_LAMBDA(bool a, bool b) { return a != b; }); } else { AT_DISPATCH_INTEGRAL_TYPES(iter.dtype(), "bitwise_xor_cuda", [&]() { gpu_kernel_with_scalars( iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t { return a ^ b; }); }); } } void logical_xor_kernel_cuda(TensorIterator& iter) { AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBool, iter.common_dtype(), "logical_xor_cuda", [&]() { gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> bool { return bool(a) != bool(b); }); }); } void smooth_l1_kernel_cuda(TensorIterator& iter) { AT_DISPATCH_ALL_TYPES_AND(kHalf, iter.dtype(), "smooth_l1_cuda", [&]() { gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t { auto z = fabs(a - b); return z < scalar_t(1.) ? scalar_t(0.5) * z * z : z - scalar_t(0.5); }); }); } void sigmoid_backward_kernel_cuda(TensorIterator& iter) { AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "sigmoid_backward_cuda", [&]() { gpu_kernel(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t { return a * (scalar_t(1.) - b) * b; }); }); } void mse_kernel_cuda(TensorIterator& iter) { AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "mse_cuda", [&]() { gpu_kernel(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t { auto diff = a - b; return diff * diff; }); }); } REGISTER_DISPATCH(atan2_stub, &atan2_kernel_cuda); REGISTER_DISPATCH(bitwise_xor_stub, &bitwise_xor_kernel_cuda); REGISTER_DISPATCH(logical_xor_stub, &logical_xor_kernel_cuda); REGISTER_DISPATCH(smooth_l1_stub, &smooth_l1_kernel_cuda); REGISTER_DISPATCH(sigmoid_backward_stub, &sigmoid_backward_kernel_cuda); REGISTER_DISPATCH(mse_stub, &mse_kernel_cuda); }} // namespace at::native
4158f919cdfe839f72e2e79ead7fc668fcb6cc45.cu
#include <ATen/Dispatch.h> #include <ATen/native/DispatchStub.h> #include <ATen/native/cuda/Loops.cuh> #include <ATen/native/TensorIterator.h> #include <ATen/native/BinaryOps.h> // NOTE: CUDA on Windows requires that the enclosing function // of a __device__ lambda not have internal linkage. namespace at { namespace native { void atan2_kernel_cuda(TensorIterator& iter) { AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.common_dtype(), "atan2_cuda", [&]() { gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t { return ::atan2(a, b); }); }); } void bitwise_xor_kernel_cuda(TensorIterator& iter) { if (iter.dtype() == ScalarType::Bool) { // Boolean type does not work with ^ (bitwise XOR) in C++. bitwise_xor wraps this operation for both Boolean and // integral types. gpu_kernel_with_scalars( iter, []GPU_LAMBDA(bool a, bool b) { return a != b; }); } else { AT_DISPATCH_INTEGRAL_TYPES(iter.dtype(), "bitwise_xor_cuda", [&]() { gpu_kernel_with_scalars( iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t { return a ^ b; }); }); } } void logical_xor_kernel_cuda(TensorIterator& iter) { AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBool, iter.common_dtype(), "logical_xor_cuda", [&]() { gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> bool { return bool(a) != bool(b); }); }); } void smooth_l1_kernel_cuda(TensorIterator& iter) { AT_DISPATCH_ALL_TYPES_AND(kHalf, iter.dtype(), "smooth_l1_cuda", [&]() { gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t { auto z = fabs(a - b); return z < scalar_t(1.) ? scalar_t(0.5) * z * z : z - scalar_t(0.5); }); }); } void sigmoid_backward_kernel_cuda(TensorIterator& iter) { AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "sigmoid_backward_cuda", [&]() { gpu_kernel(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t { return a * (scalar_t(1.) - b) * b; }); }); } void mse_kernel_cuda(TensorIterator& iter) { AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "mse_cuda", [&]() { gpu_kernel(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t { auto diff = a - b; return diff * diff; }); }); } REGISTER_DISPATCH(atan2_stub, &atan2_kernel_cuda); REGISTER_DISPATCH(bitwise_xor_stub, &bitwise_xor_kernel_cuda); REGISTER_DISPATCH(logical_xor_stub, &logical_xor_kernel_cuda); REGISTER_DISPATCH(smooth_l1_stub, &smooth_l1_kernel_cuda); REGISTER_DISPATCH(sigmoid_backward_stub, &sigmoid_backward_kernel_cuda); REGISTER_DISPATCH(mse_stub, &mse_kernel_cuda); }} // namespace at::native
11297da06c7649abd8e66ee1d4ab888fe1cd6b92.hip
// !!! This is a file automatically generated by hipify!!! #include <ccut/utils.cuh> #include "nvidia_utilities.cuh" #include <utilities.h> #include <log.h> #include <Assertion.h> namespace { //number of blocks in grid if have to use 2D grid const size_t BLOCK_IN_X_DIV = 4; //I hope it will be about 16000 } namespace Cuda { void Init(){ //TODO fix checkCudaErrors(hipSetDevice(0)); }; size_t RoundToBunch(size_t x, size_t bunch) { return x%bunch == 0 ? x : (x/bunch+1)*bunch; } void Sync(){ Assert(hipSuccess == hipDeviceSynchronize()); } } dim3 CudaParams::m_maxBlocks = 1; dim3 CudaParams::m_maxThreads = 1; CudaParams::CudaParams(int dataSize, size_t THREADS, bool yesIknowWhatImDoing) { static bool init = CudaParams::init(); if(dataSize < 0) { LogError("Wrong data size " << dataSize); } //For release - when Asserts disappear nThreads = 1; nBlocks = 1; //------- //THREADS //------- if(THREADS > m_maxThreads.x) { LogCritical("Wrong Threads nb"); Assert(0); } nThreads = THREADS; //------ //BLOCKS //------ size_t roundedN = Cuda::RoundToBunch(dataSize,THREADS); size_t _nBlocks = roundedN / nThreads.x; //1D; if(_nBlocks <=m_maxBlocks.x) { nBlocks.x = _nBlocks; return; } if (!yesIknowWhatImDoing) { LogCritical("ERROR: Problem with data size detected"); Assert(0); } //2D nBlocks.x = m_maxBlocks.x / BLOCK_IN_X_DIV; nBlocks.y = _nBlocks % nBlocks.x == 0 ? _nBlocks / nBlocks.x : (_nBlocks / nBlocks.x)+1; if (nBlocks.y <= m_maxBlocks.y) { return; } //3D //TODO implement me LogCritical("ERROR: Problem with data size detected"); Assert(0); } bool CudaParams::init() { hipDeviceProp_t deviceProp; int dev=0; hipGetDeviceProperties(&deviceProp, dev); m_maxBlocks = dim3(deviceProp.maxGridSize[0], deviceProp.maxGridSize[1], deviceProp.maxGridSize[2]); m_maxThreads = dim3(deviceProp.maxThreadsDim[0], deviceProp.maxThreadsDim[1], deviceProp.maxThreadsDim[2]); return true; }
11297da06c7649abd8e66ee1d4ab888fe1cd6b92.cu
#include <ccut/utils.cuh> #include "nvidia_utilities.cuh" #include <utilities.h> #include <log.h> #include <Assertion.h> namespace { //number of blocks in grid if have to use 2D grid const size_t BLOCK_IN_X_DIV = 4; //I hope it will be about 16000 } namespace Cuda { void Init(){ //TODO fix checkCudaErrors(cudaSetDevice(0)); }; size_t RoundToBunch(size_t x, size_t bunch) { return x%bunch == 0 ? x : (x/bunch+1)*bunch; } void Sync(){ Assert(cudaSuccess == cudaDeviceSynchronize()); } } dim3 CudaParams::m_maxBlocks = 1; dim3 CudaParams::m_maxThreads = 1; CudaParams::CudaParams(int dataSize, size_t THREADS, bool yesIknowWhatImDoing) { static bool init = CudaParams::init(); if(dataSize < 0) { LogError("Wrong data size " << dataSize); } //For release - when Asserts disappear nThreads = 1; nBlocks = 1; //------- //THREADS //------- if(THREADS > m_maxThreads.x) { LogCritical("Wrong Threads nb"); Assert(0); } nThreads = THREADS; //------ //BLOCKS //------ size_t roundedN = Cuda::RoundToBunch(dataSize,THREADS); size_t _nBlocks = roundedN / nThreads.x; //1D; if(_nBlocks <=m_maxBlocks.x) { nBlocks.x = _nBlocks; return; } if (!yesIknowWhatImDoing) { LogCritical("ERROR: Problem with data size detected"); Assert(0); } //2D nBlocks.x = m_maxBlocks.x / BLOCK_IN_X_DIV; nBlocks.y = _nBlocks % nBlocks.x == 0 ? _nBlocks / nBlocks.x : (_nBlocks / nBlocks.x)+1; if (nBlocks.y <= m_maxBlocks.y) { return; } //3D //TODO implement me LogCritical("ERROR: Problem with data size detected"); Assert(0); } bool CudaParams::init() { cudaDeviceProp deviceProp; int dev=0; cudaGetDeviceProperties(&deviceProp, dev); m_maxBlocks = dim3(deviceProp.maxGridSize[0], deviceProp.maxGridSize[1], deviceProp.maxGridSize[2]); m_maxThreads = dim3(deviceProp.maxThreadsDim[0], deviceProp.maxThreadsDim[1], deviceProp.maxThreadsDim[2]); return true; }
d3fda2188237c46c9b31b541e422d657474fbe00.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // MP Scan // Given a list (lst) of length n // Output its prefix sum = {lst[0], lst[0] + lst[1], lst[0] + lst[1] + ... + lst[n-1]} #include <wb.h> #define BLOCK_SIZE 512 //@@ You can change this #define wbCheck(stmt) do { \ hipError_t err = stmt; \ if (err != hipSuccess) { \ wbLog(ERROR, "Failed to run stmt ", #stmt); \ wbLog(ERROR, "Got CUDA error ... ", hipGetErrorString(err)); \ return -1; \ } \ } while(0) __device__ void reduction(int threadPos, float * input){ // Reduction simulates a binary tree (balanced, actually) hence the 2 by 2 mult. // Until block size because it can be a exact multiplier or not for(int stride = 1; stride <= BLOCK_SIZE; stride *= 2){ int i = (threadPos+1) * stride * 2 - 1; if( i < 2 * BLOCK_SIZE ){ // ensure boundary condition input[i] += input[i-stride]; } __syncthreads(); } } __device__ void reverse(int threadPos, float * input){ //Runs the binary tree from the bottom to the top for(int stride = BLOCK_SIZE/2; stride > 0; stride /= 2){ __syncthreads(); int i = (threadPos+1) * stride * 2 - 1; if( i + stride < 2 * BLOCK_SIZE ){ input[i+stride] += input[i]; } } } __global__ void scan(float * input, float * output, float * interm, int len) { //@@ Modify the body of this function to complete the functionality of //@@ the scan on the device //@@ You may need multiple kernel calls; write your kernels before this //@@ function and call them from here __shared__ float sh_input[2*BLOCK_SIZE]; int i = threadIdx.x + blockDim.x * blockIdx.x; if( i + threadIdx.x < len ){ sh_input[threadIdx.x*2] = input[i + threadIdx.x]; }else{ sh_input[threadIdx.x*2] = 0; } if( i + threadIdx.x + 1 < len ){ sh_input[threadIdx.x*2+1] = input[i + threadIdx.x + 1]; }else{ sh_input[threadIdx.x*2+1] = 0; } __syncthreads(); reduction(threadIdx.x, sh_input); reverse(threadIdx.x, sh_input); __syncthreads(); if( i < len ) output[i] = sh_input[threadIdx.x]; //Store the last number //Maybe this could lead to a control divergence, not clear if( interm != NULL && threadIdx.x == blockDim.x - 1 ) interm[blockIdx.x] = sh_input[threadIdx.x]; } __global__ void sumByBlocks(float * sum, float* values, int length){ int i = threadIdx.x + blockDim.x * (blockIdx.x+1); if(i < length) sum[i] += values[blockIdx.x]; } int main(int argc, char ** argv) { wbArg_t args; float * hostInput; // The input 1D list float * hostOutput; // The output list float * deviceInput; float * deviceOutput; int numElements; // number of elements in the list float * intermScan; float * intermScanOutput; args = wbArg_read(argc, argv); wbTime_start(Generic, "Importing data and creating memory on host"); hostInput = (float *) wbImport(wbArg_getInputFile(args, 0), &numElements); hostOutput = (float*) malloc(numElements * sizeof(float)); wbTime_stop(Generic, "Importing data and creating memory on host"); wbLog(TRACE, "The number of input elements in the input is ", numElements); wbTime_start(GPU, "Allocating GPU memory."); wbCheck(hipMalloc((void**)&deviceInput, numElements*sizeof(float))); wbCheck(hipMalloc((void**)&deviceOutput, numElements*sizeof(float))); wbTime_stop(GPU, "Allocating GPU memory."); wbTime_start(GPU, "Clearing output memory."); wbCheck(hipMemset(deviceOutput, 0, numElements*sizeof(float))); wbTime_stop(GPU, "Clearing output memory."); wbTime_start(GPU, "Copying input memory to the GPU."); wbCheck(hipMemcpy(deviceInput, hostInput, numElements*sizeof(float), hipMemcpyHostToDevice)); wbTime_stop(GPU, "Copying input memory to the GPU."); //@@ Initialize the grid and block dimensions here int quantity_blocks = (numElements-1)/BLOCK_SIZE + 1; wbCheck(hipMalloc((void**)&intermScan, quantity_blocks*sizeof(float))); wbCheck(hipMalloc((void**)&intermScanOutput, quantity_blocks*sizeof(float))); dim3 DimGrid(quantity_blocks, 1, 1); dim3 DimBlock(BLOCK_SIZE, 1, 1); wbTime_start(Compute, "Performing CUDA computation"); //@@ Modify this to complete the functionality of the scan //@@ on the deivce hipLaunchKernelGGL(( scan), dim3(DimGrid),dim3(DimBlock), 0, 0, deviceInput,deviceOutput,intermScan,numElements); hipLaunchKernelGGL(( scan), dim3(DimGrid),dim3(DimBlock), 0, 0, intermScan,intermScanOutput, NULL, quantity_blocks); DimGrid.x -= 1; hipLaunchKernelGGL(( sumByBlocks), dim3(DimGrid),dim3(DimBlock), 0, 0, deviceOutput,intermScanOutput,numElements); hipDeviceSynchronize(); wbTime_stop(Compute, "Performing CUDA computation"); wbTime_start(Copy, "Copying output memory to the CPU"); wbCheck(hipMemcpy(hostOutput, deviceOutput, numElements*sizeof(float), hipMemcpyDeviceToHost)); wbTime_stop(Copy, "Copying output memory to the CPU"); wbTime_start(GPU, "Freeing GPU Memory"); hipFree(deviceInput); hipFree(deviceOutput); wbTime_stop(GPU, "Freeing GPU Memory"); wbSolution(args, hostOutput, numElements); free(hostInput); free(hostOutput); return 0; }
d3fda2188237c46c9b31b541e422d657474fbe00.cu
// MP Scan // Given a list (lst) of length n // Output its prefix sum = {lst[0], lst[0] + lst[1], lst[0] + lst[1] + ... + lst[n-1]} #include <wb.h> #define BLOCK_SIZE 512 //@@ You can change this #define wbCheck(stmt) do { \ cudaError_t err = stmt; \ if (err != cudaSuccess) { \ wbLog(ERROR, "Failed to run stmt ", #stmt); \ wbLog(ERROR, "Got CUDA error ... ", cudaGetErrorString(err)); \ return -1; \ } \ } while(0) __device__ void reduction(int threadPos, float * input){ // Reduction simulates a binary tree (balanced, actually) hence the 2 by 2 mult. // Until block size because it can be a exact multiplier or not for(int stride = 1; stride <= BLOCK_SIZE; stride *= 2){ int i = (threadPos+1) * stride * 2 - 1; if( i < 2 * BLOCK_SIZE ){ // ensure boundary condition input[i] += input[i-stride]; } __syncthreads(); } } __device__ void reverse(int threadPos, float * input){ //Runs the binary tree from the bottom to the top for(int stride = BLOCK_SIZE/2; stride > 0; stride /= 2){ __syncthreads(); int i = (threadPos+1) * stride * 2 - 1; if( i + stride < 2 * BLOCK_SIZE ){ input[i+stride] += input[i]; } } } __global__ void scan(float * input, float * output, float * interm, int len) { //@@ Modify the body of this function to complete the functionality of //@@ the scan on the device //@@ You may need multiple kernel calls; write your kernels before this //@@ function and call them from here __shared__ float sh_input[2*BLOCK_SIZE]; int i = threadIdx.x + blockDim.x * blockIdx.x; if( i + threadIdx.x < len ){ sh_input[threadIdx.x*2] = input[i + threadIdx.x]; }else{ sh_input[threadIdx.x*2] = 0; } if( i + threadIdx.x + 1 < len ){ sh_input[threadIdx.x*2+1] = input[i + threadIdx.x + 1]; }else{ sh_input[threadIdx.x*2+1] = 0; } __syncthreads(); reduction(threadIdx.x, sh_input); reverse(threadIdx.x, sh_input); __syncthreads(); if( i < len ) output[i] = sh_input[threadIdx.x]; //Store the last number //Maybe this could lead to a control divergence, not clear if( interm != NULL && threadIdx.x == blockDim.x - 1 ) interm[blockIdx.x] = sh_input[threadIdx.x]; } __global__ void sumByBlocks(float * sum, float* values, int length){ int i = threadIdx.x + blockDim.x * (blockIdx.x+1); if(i < length) sum[i] += values[blockIdx.x]; } int main(int argc, char ** argv) { wbArg_t args; float * hostInput; // The input 1D list float * hostOutput; // The output list float * deviceInput; float * deviceOutput; int numElements; // number of elements in the list float * intermScan; float * intermScanOutput; args = wbArg_read(argc, argv); wbTime_start(Generic, "Importing data and creating memory on host"); hostInput = (float *) wbImport(wbArg_getInputFile(args, 0), &numElements); hostOutput = (float*) malloc(numElements * sizeof(float)); wbTime_stop(Generic, "Importing data and creating memory on host"); wbLog(TRACE, "The number of input elements in the input is ", numElements); wbTime_start(GPU, "Allocating GPU memory."); wbCheck(cudaMalloc((void**)&deviceInput, numElements*sizeof(float))); wbCheck(cudaMalloc((void**)&deviceOutput, numElements*sizeof(float))); wbTime_stop(GPU, "Allocating GPU memory."); wbTime_start(GPU, "Clearing output memory."); wbCheck(cudaMemset(deviceOutput, 0, numElements*sizeof(float))); wbTime_stop(GPU, "Clearing output memory."); wbTime_start(GPU, "Copying input memory to the GPU."); wbCheck(cudaMemcpy(deviceInput, hostInput, numElements*sizeof(float), cudaMemcpyHostToDevice)); wbTime_stop(GPU, "Copying input memory to the GPU."); //@@ Initialize the grid and block dimensions here int quantity_blocks = (numElements-1)/BLOCK_SIZE + 1; wbCheck(cudaMalloc((void**)&intermScan, quantity_blocks*sizeof(float))); wbCheck(cudaMalloc((void**)&intermScanOutput, quantity_blocks*sizeof(float))); dim3 DimGrid(quantity_blocks, 1, 1); dim3 DimBlock(BLOCK_SIZE, 1, 1); wbTime_start(Compute, "Performing CUDA computation"); //@@ Modify this to complete the functionality of the scan //@@ on the deivce scan<<<DimGrid,DimBlock>>>(deviceInput,deviceOutput,intermScan,numElements); scan<<<DimGrid,DimBlock>>>(intermScan,intermScanOutput, NULL, quantity_blocks); DimGrid.x -= 1; sumByBlocks<<<DimGrid,DimBlock>>>(deviceOutput,intermScanOutput,numElements); cudaDeviceSynchronize(); wbTime_stop(Compute, "Performing CUDA computation"); wbTime_start(Copy, "Copying output memory to the CPU"); wbCheck(cudaMemcpy(hostOutput, deviceOutput, numElements*sizeof(float), cudaMemcpyDeviceToHost)); wbTime_stop(Copy, "Copying output memory to the CPU"); wbTime_start(GPU, "Freeing GPU Memory"); cudaFree(deviceInput); cudaFree(deviceOutput); wbTime_stop(GPU, "Freeing GPU Memory"); wbSolution(args, hostOutput, numElements); free(hostInput); free(hostOutput); return 0; }
2da929aec9f809990ac76b73420b9ddfd5358742.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* ============================================================================ Name : last.cu Author : christopher Version : Copyright : @ copyright notice Description : CUDA compute reciprocals ============================================================================ */ #include <iostream> #include <numeric> #include <stdlib.h> #include <sys/time.h> #include <stdio.h> #define threads_per_block 256 #define threads_per_warp 32 //#include "/home/chris/Downloads/cuPrintf.cu" //#include "/home/chris/Downloads/cuPrintf.cuh" __device__ void hadamard_product_small(double* sh_a, double* sh_b, int multiplier, int rows) { int thread_id = threadIdx.y * blockDim.x + threadIdx.x; // start the computations for (int i = thread_id * multiplier; i < thread_id * multiplier + multiplier; i++) { sh_b[i] = sh_b[i] * sh_a[i] * (i < rows); } // result is stored in sh_b vector\ //done } __device__ void array_sum_small(double* sha, double& result, int multiplier, int rows, int start) { int thread_id = threadIdx.y * blockDim.x + threadIdx.x; // start the computations for (int i = threads_per_warp; i < threads_per_block; i = i * 2) { // switch 1 : even warps add their's neighbors contents switch ((int) floor(thread_id / (double) i) % 2) { case 0: // thread_id % i == even // add the "more next vector" sha[thread_id] = sha[thread_id] + sha[i + thread_id] * (start + thread_id + i < rows); break; default: // thread_id % i == odd // do nothing break; } __syncthreads(); // switch2 : odd warps clean up their content switch ((int) floor(thread_id / (double) i) % 2) { case 0: // thread_id % i == even // do nothing break; default: // thread_id % i == odd // clean up sha[thread_id] = 0; //__syncthreads(); break; } __syncthreads(); } // loop ended, sha[0:threads_per_warp] got the sum if (thread_id == 0) { for (int i = 0; i < threads_per_warp; i++) { result = result + sha[i]; } } } __global__ void array_mult(double* matrix, double* vector, double* result, int rows, int cols_per_block, int multiplier, double* sigm) { double* a = &matrix[blockIdx.x * rows * cols_per_block]; //result[0] = 0; extern __shared__ double shared[]; double* sh_m = shared; double* sh_v = &sh_m[threads_per_block * multiplier]; double* res = &sh_v[threads_per_block * multiplier]; // thread_id*multiplier ews thread_id*multiplier+multiplier-1 int thread_id = threadIdx.x; for (int c = 0; c < cols_per_block; c++) { // for each col that every block must deal with , do the following : // load from global to shared mem for (int i = thread_id * multiplier; i < thread_id * multiplier + multiplier; i++) { sh_m[i] = a[i + c * rows] * (i < rows); } for (int i = thread_id * multiplier; i < thread_id * multiplier + multiplier; i++) { sh_v[i] = vector[i + c * rows] * (i < rows); } __syncthreads(); // find the hadamard product hadamard_product_small(sh_m, sh_v, multiplier, rows); __syncthreads(); // initiallize shared vector res with zeros for (int i = thread_id * multiplier; i < thread_id * multiplier + multiplier; i++) { res[i] = 0; } __syncthreads(); for (int i = 0; i < multiplier; i++) { array_sum_small(&sh_v[i * threads_per_block], res[i], multiplier, rows, i * threads_per_block); } __syncthreads(); if (thread_id == 0) { for (int i = 1; i < multiplier; i++) { res[0] += res[i]; } result[blockIdx.x * cols_per_block + c] = res[0] * sigm[blockIdx.x * cols_per_block + c]; } } } using namespace std; double getRandom(int min, int max); double* matrix_vector_mull(int cols, int rows, double* matrix, double* vector); int get_threads_per_cols(int cols); int get_wSize_on_layer(int l, int* sizes); int get_dSize_on_layer(int l, int* sizes); double* hadamard_product(int size, double* a, double* b); void backpropagate(double** delta, double** sigm_derivative,double** w, int* sizeOfLayers, int numOfLayers) ; int main(void) { struct timeval t1, t2; double time, time_c, time_h; int num_of_layers = 4; int* sizes = new int[num_of_layers]; hipStream_t default_stream; hipStreamCreate(&default_stream); sizes[0] = 9000; sizes[1] = 90; sizes[2] = 90; sizes[3] = 10; // seirial arrays double** w = new double*[num_of_layers - 1]; double** delta = new double*[num_of_layers]; double** sigm_der = new double*[num_of_layers]; // cuda arrays double *w_c, *delta_c, *sigm_der_c; int w_length = 0, d_length = 0; w_length = get_wSize_on_layer(num_of_layers - 1, sizes); d_length = get_wSize_on_layer(num_of_layers, sizes); // gpu mem allocation hipMalloc((void**) &w_c, sizeof(double) * w_length); hipMalloc((void**) &delta_c, sizeof(double) * d_length); hipMalloc((void**) &sigm_der_c, sizeof(double) * d_length); // host mem allocation for (int i = 0; i < num_of_layers - 1; i++) { w[i] = new double[sizes[i] * sizes[i + 1]]; for (int j = 0; j < sizes[i] * sizes[i + 1]; j++) { w[i][j] = 1; } } for (int i = 0; i < num_of_layers; i++) { delta[i] = new double[sizes[i]]; for (int j = 0; j < sizes[i]; j++) { delta[i][j] = 1; } } for (int i = 0; i < num_of_layers; i++) { sigm_der[i] = new double[sizes[i]]; for (int j = 0; j < sizes[i]; j++) { sigm_der[i][j] = 0.5; } } // backpropagate requires only the delta[sizes[num_of_layers-1]] , so // we are not going to count the cudaMemcy's of the rest data, // simply because they can happen after cpu (host) updates the w's // and we got enough time from that point until we reach backpropagate function // suppose we do that way before we get close to backpropagation fucntion call // copy w in cuda for (int i = 0; i < num_of_layers - 1; i++) { hipMemcpyAsync(&w_c[get_wSize_on_layer(i, sizes)], w[i], sizeof(double) * sizes[i] * sizes[i + 1], hipMemcpyHostToDevice, default_stream); } // copy sigm_der in cuda for (int i = 0; i < num_of_layers; i++) { hipMemcpyAsync(&sigm_der_c[get_dSize_on_layer(i, sizes)], sigm_der[i], sizeof(double) * sizes[i], hipMemcpyHostToDevice, default_stream); } // copies done , wait for steam 0 (default) to compute all copies (as we said, we do not count them) hipStreamSynchronize(default_stream); // now we may procced to backpropagation algorithm int multiplier = 0; gettimeofday(&t1, 0); // step 1 : copy the delta of the last layer into gpu // cpu commands : delta[numOfLayers - 1] = d_L; hipMemcpyAsync(&delta_c[get_dSize_on_layer(num_of_layers - 1, sizes)], delta[num_of_layers - 1], sizeof(double) * sizes[num_of_layers - 1], hipMemcpyHostToDevice, default_stream); // step 2 int bl = 0; for (int i = num_of_layers - 2; i >= 0; i--) { // w_d = matrix_vector_mull(sizeOfLayers[i + 1], sizeOfLayers[i + 2], w[i], delta[i + 1]); if(i>0){ multiplier = get_threads_per_cols(sizes[i]);// multiplier = get_threads_per_cols(cols); bl = sizes[i]; }else{ multiplier = get_threads_per_cols(sizes[i+1]); bl = sizes[i+1]; } hipLaunchKernelGGL(( array_mult), dim3(bl), dim3(threads_per_block), sizeof(double) * (3 * threads_per_block * multiplier), default_stream, &w_c[get_wSize_on_layer(i, sizes)], &delta_c[get_dSize_on_layer(i + 1, sizes)], &delta_c[get_dSize_on_layer(i, sizes)], sizes[i + 1], 1, multiplier, &sigm_der_c[get_dSize_on_layer(i, sizes)]); // delta[i] = hadamard_product(sizeOfLayers[i + 1], w_d, sigm_derivative[i]); hipStreamSynchronize(default_stream); hipMemcpyAsync(delta[i], &delta_c[get_dSize_on_layer(i, sizes)], sizeof(double) * sizes[i], hipMemcpyDeviceToHost, default_stream); } // wait until the last copy is completed hipStreamSynchronize(default_stream); // done gettimeofday(&t2, 0); time = (1000000.0 * (t2.tv_sec - t1.tv_sec) + t2.tv_usec - t1.tv_usec) / 1000.0; cout << "Parallel time is " << time << " millisec \n"; time_c = time; // retrieve data back to cpu memory for debbugin reasons cout<< "cuda results : \n"; for (int i = 0; i < 1; i++) { for (int j = 0; j < 3; j++) { cout<< delta[i][j] << " "; } } cout<< "\n"; // now the serial code gettimeofday(&t1, 0); backpropagate(delta, sigm_der,w,sizes,num_of_layers); gettimeofday(&t2, 0); time = (1000000.0 * (t2.tv_sec - t1.tv_sec) + t2.tv_usec - t1.tv_usec) / 1000.0; cout << "serial time is " << time << " millisec \n"; time_h = time; cout<< "cpu results : \n"; for (int i = 0; i < 1; i++) { for (int j = 0; j < 3; j++) { cout<< delta[i][j] << " "; } } cout<< "\n"; cout << "accelaration is " << (time_h-time_c)*100 << " % \n"; cout << "SUCCESS epitelous"; return 0; } void backpropagate(double** delta, double** sigm_derivative, double** w, int* sizeOfLayers, int numOfLayers) { double* w_d; for (int i = numOfLayers - 2; i >= 0; i--) { w_d = matrix_vector_mull(sizeOfLayers[i], sizeOfLayers[i + 1], w[i],delta[i + 1]); delta[i] = hadamard_product(sizeOfLayers[i], w_d, sigm_derivative[i]); delete[] w_d; } } double* hadamard_product(int size,double* a, double* b) { // returns the datamard product for vectors a and b // (return a.*b in matlab) // size = length of arrays a and b double* result = new double[size]; for (int i = 0; i < size; i++) { result[i] = a[i] * b[i]; } return result; } double* matrix_vector_mull(int cols, int rows, double* matrix, double* vector) { // TESTED // returns "cols x 1" vector double* temp = NULL ; double* res = new double[cols]; for(int j=0; j<cols; j++){ temp = new double[rows] ; for(int i=0; i<rows; i++){ temp[i] = matrix[i*cols+j]; } temp = hadamard_product(rows,temp,vector); res[j] = 0; for(int i=0; i<rows; i++){ res[j] += temp[i]; } delete[] temp; } return res; } double getRandom(int min, int max) { return (((max - min) * ((double) rand() / (double) RAND_MAX) + min) * 100) / 100; } int get_threads_per_cols(int cols) { if (cols < threads_per_block) { return 1; } int res = floor(cols / (double) threads_per_block); if (cols / (double) threads_per_block - floor(cols / (double) threads_per_block)) { res++; } return res; } int get_wSize_on_layer(int l, int* sizes) { int res = 0; for (int i = 0; i < l; i++) { res += sizes[i] * sizes[i + 1]; } return res; } int get_dSize_on_layer(int l, int* sizes) { int res = 0; for (int i = 0; i < l; i++) { res += sizes[i]; } return res; }
2da929aec9f809990ac76b73420b9ddfd5358742.cu
/* ============================================================================ Name : last.cu Author : christopher Version : Copyright : @ copyright notice Description : CUDA compute reciprocals ============================================================================ */ #include <iostream> #include <numeric> #include <stdlib.h> #include <sys/time.h> #include <stdio.h> #define threads_per_block 256 #define threads_per_warp 32 //#include "/home/chris/Downloads/cuPrintf.cu" //#include "/home/chris/Downloads/cuPrintf.cuh" __device__ void hadamard_product_small(double* sh_a, double* sh_b, int multiplier, int rows) { int thread_id = threadIdx.y * blockDim.x + threadIdx.x; // start the computations for (int i = thread_id * multiplier; i < thread_id * multiplier + multiplier; i++) { sh_b[i] = sh_b[i] * sh_a[i] * (i < rows); } // result is stored in sh_b vector\ //done } __device__ void array_sum_small(double* sha, double& result, int multiplier, int rows, int start) { int thread_id = threadIdx.y * blockDim.x + threadIdx.x; // start the computations for (int i = threads_per_warp; i < threads_per_block; i = i * 2) { // switch 1 : even warps add their's neighbors contents switch ((int) floor(thread_id / (double) i) % 2) { case 0: // thread_id % i == even // add the "more next vector" sha[thread_id] = sha[thread_id] + sha[i + thread_id] * (start + thread_id + i < rows); break; default: // thread_id % i == odd // do nothing break; } __syncthreads(); // switch2 : odd warps clean up their content switch ((int) floor(thread_id / (double) i) % 2) { case 0: // thread_id % i == even // do nothing break; default: // thread_id % i == odd // clean up sha[thread_id] = 0; //__syncthreads(); break; } __syncthreads(); } // loop ended, sha[0:threads_per_warp] got the sum if (thread_id == 0) { for (int i = 0; i < threads_per_warp; i++) { result = result + sha[i]; } } } __global__ void array_mult(double* matrix, double* vector, double* result, int rows, int cols_per_block, int multiplier, double* sigm) { double* a = &matrix[blockIdx.x * rows * cols_per_block]; //result[0] = 0; extern __shared__ double shared[]; double* sh_m = shared; double* sh_v = &sh_m[threads_per_block * multiplier]; double* res = &sh_v[threads_per_block * multiplier]; // thread_id*multiplier ews thread_id*multiplier+multiplier-1 int thread_id = threadIdx.x; for (int c = 0; c < cols_per_block; c++) { // for each col that every block must deal with , do the following : // load from global to shared mem for (int i = thread_id * multiplier; i < thread_id * multiplier + multiplier; i++) { sh_m[i] = a[i + c * rows] * (i < rows); } for (int i = thread_id * multiplier; i < thread_id * multiplier + multiplier; i++) { sh_v[i] = vector[i + c * rows] * (i < rows); } __syncthreads(); // find the hadamard product hadamard_product_small(sh_m, sh_v, multiplier, rows); __syncthreads(); // initiallize shared vector res with zeros for (int i = thread_id * multiplier; i < thread_id * multiplier + multiplier; i++) { res[i] = 0; } __syncthreads(); for (int i = 0; i < multiplier; i++) { array_sum_small(&sh_v[i * threads_per_block], res[i], multiplier, rows, i * threads_per_block); } __syncthreads(); if (thread_id == 0) { for (int i = 1; i < multiplier; i++) { res[0] += res[i]; } result[blockIdx.x * cols_per_block + c] = res[0] * sigm[blockIdx.x * cols_per_block + c]; } } } using namespace std; double getRandom(int min, int max); double* matrix_vector_mull(int cols, int rows, double* matrix, double* vector); int get_threads_per_cols(int cols); int get_wSize_on_layer(int l, int* sizes); int get_dSize_on_layer(int l, int* sizes); double* hadamard_product(int size, double* a, double* b); void backpropagate(double** delta, double** sigm_derivative,double** w, int* sizeOfLayers, int numOfLayers) ; int main(void) { struct timeval t1, t2; double time, time_c, time_h; int num_of_layers = 4; int* sizes = new int[num_of_layers]; cudaStream_t default_stream; cudaStreamCreate(&default_stream); sizes[0] = 9000; sizes[1] = 90; sizes[2] = 90; sizes[3] = 10; // seirial arrays double** w = new double*[num_of_layers - 1]; double** delta = new double*[num_of_layers]; double** sigm_der = new double*[num_of_layers]; // cuda arrays double *w_c, *delta_c, *sigm_der_c; int w_length = 0, d_length = 0; w_length = get_wSize_on_layer(num_of_layers - 1, sizes); d_length = get_wSize_on_layer(num_of_layers, sizes); // gpu mem allocation cudaMalloc((void**) &w_c, sizeof(double) * w_length); cudaMalloc((void**) &delta_c, sizeof(double) * d_length); cudaMalloc((void**) &sigm_der_c, sizeof(double) * d_length); // host mem allocation for (int i = 0; i < num_of_layers - 1; i++) { w[i] = new double[sizes[i] * sizes[i + 1]]; for (int j = 0; j < sizes[i] * sizes[i + 1]; j++) { w[i][j] = 1; } } for (int i = 0; i < num_of_layers; i++) { delta[i] = new double[sizes[i]]; for (int j = 0; j < sizes[i]; j++) { delta[i][j] = 1; } } for (int i = 0; i < num_of_layers; i++) { sigm_der[i] = new double[sizes[i]]; for (int j = 0; j < sizes[i]; j++) { sigm_der[i][j] = 0.5; } } // backpropagate requires only the delta[sizes[num_of_layers-1]] , so // we are not going to count the cudaMemcy's of the rest data, // simply because they can happen after cpu (host) updates the w's // and we got enough time from that point until we reach backpropagate function // suppose we do that way before we get close to backpropagation fucntion call // copy w in cuda for (int i = 0; i < num_of_layers - 1; i++) { cudaMemcpyAsync(&w_c[get_wSize_on_layer(i, sizes)], w[i], sizeof(double) * sizes[i] * sizes[i + 1], cudaMemcpyHostToDevice, default_stream); } // copy sigm_der in cuda for (int i = 0; i < num_of_layers; i++) { cudaMemcpyAsync(&sigm_der_c[get_dSize_on_layer(i, sizes)], sigm_der[i], sizeof(double) * sizes[i], cudaMemcpyHostToDevice, default_stream); } // copies done , wait for steam 0 (default) to compute all copies (as we said, we do not count them) cudaStreamSynchronize(default_stream); // now we may procced to backpropagation algorithm int multiplier = 0; gettimeofday(&t1, 0); // step 1 : copy the delta of the last layer into gpu // cpu commands : delta[numOfLayers - 1] = d_L; cudaMemcpyAsync(&delta_c[get_dSize_on_layer(num_of_layers - 1, sizes)], delta[num_of_layers - 1], sizeof(double) * sizes[num_of_layers - 1], cudaMemcpyHostToDevice, default_stream); // step 2 int bl = 0; for (int i = num_of_layers - 2; i >= 0; i--) { // w_d = matrix_vector_mull(sizeOfLayers[i + 1], sizeOfLayers[i + 2], w[i], delta[i + 1]); if(i>0){ multiplier = get_threads_per_cols(sizes[i]);// multiplier = get_threads_per_cols(cols); bl = sizes[i]; }else{ multiplier = get_threads_per_cols(sizes[i+1]); bl = sizes[i+1]; } array_mult<<<bl, threads_per_block, sizeof(double) * (3 * threads_per_block * multiplier), default_stream>>>(&w_c[get_wSize_on_layer(i, sizes)], &delta_c[get_dSize_on_layer(i + 1, sizes)], &delta_c[get_dSize_on_layer(i, sizes)], sizes[i + 1], 1, multiplier, &sigm_der_c[get_dSize_on_layer(i, sizes)]); // delta[i] = hadamard_product(sizeOfLayers[i + 1], w_d, sigm_derivative[i]); cudaStreamSynchronize(default_stream); cudaMemcpyAsync(delta[i], &delta_c[get_dSize_on_layer(i, sizes)], sizeof(double) * sizes[i], cudaMemcpyDeviceToHost, default_stream); } // wait until the last copy is completed cudaStreamSynchronize(default_stream); // done gettimeofday(&t2, 0); time = (1000000.0 * (t2.tv_sec - t1.tv_sec) + t2.tv_usec - t1.tv_usec) / 1000.0; cout << "Parallel time is " << time << " millisec \n"; time_c = time; // retrieve data back to cpu memory for debbugin reasons cout<< "cuda results : \n"; for (int i = 0; i < 1; i++) { for (int j = 0; j < 3; j++) { cout<< delta[i][j] << " "; } } cout<< "\n"; // now the serial code gettimeofday(&t1, 0); backpropagate(delta, sigm_der,w,sizes,num_of_layers); gettimeofday(&t2, 0); time = (1000000.0 * (t2.tv_sec - t1.tv_sec) + t2.tv_usec - t1.tv_usec) / 1000.0; cout << "serial time is " << time << " millisec \n"; time_h = time; cout<< "cpu results : \n"; for (int i = 0; i < 1; i++) { for (int j = 0; j < 3; j++) { cout<< delta[i][j] << " "; } } cout<< "\n"; cout << "accelaration is " << (time_h-time_c)*100 << " % \n"; cout << "SUCCESS epitelous"; return 0; } void backpropagate(double** delta, double** sigm_derivative, double** w, int* sizeOfLayers, int numOfLayers) { double* w_d; for (int i = numOfLayers - 2; i >= 0; i--) { w_d = matrix_vector_mull(sizeOfLayers[i], sizeOfLayers[i + 1], w[i],delta[i + 1]); delta[i] = hadamard_product(sizeOfLayers[i], w_d, sigm_derivative[i]); delete[] w_d; } } double* hadamard_product(int size,double* a, double* b) { // returns the datamard product for vectors a and b // (return a.*b in matlab) // size = length of arrays a and b double* result = new double[size]; for (int i = 0; i < size; i++) { result[i] = a[i] * b[i]; } return result; } double* matrix_vector_mull(int cols, int rows, double* matrix, double* vector) { // TESTED // returns "cols x 1" vector double* temp = NULL ; double* res = new double[cols]; for(int j=0; j<cols; j++){ temp = new double[rows] ; for(int i=0; i<rows; i++){ temp[i] = matrix[i*cols+j]; } temp = hadamard_product(rows,temp,vector); res[j] = 0; for(int i=0; i<rows; i++){ res[j] += temp[i]; } delete[] temp; } return res; } double getRandom(int min, int max) { return (((max - min) * ((double) rand() / (double) RAND_MAX) + min) * 100) / 100; } int get_threads_per_cols(int cols) { if (cols < threads_per_block) { return 1; } int res = floor(cols / (double) threads_per_block); if (cols / (double) threads_per_block - floor(cols / (double) threads_per_block)) { res++; } return res; } int get_wSize_on_layer(int l, int* sizes) { int res = 0; for (int i = 0; i < l; i++) { res += sizes[i] * sizes[i + 1]; } return res; } int get_dSize_on_layer(int l, int* sizes) { int res = 0; for (int i = 0; i < l; i++) { res += sizes[i]; } return res; }
2f4ad2794762682ae1aa961ba95499576e67be31.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Program to compute swaption portfolio using NVIDIA CUDA */ #include <stdio.h> #include "cutil_subset.h" // parameters for nVidia device execution #define BLOCK_SIZE 64 #define GRID_SIZE 64 // parameters for LIBOR calculation #define NN 80 #define NMAT 40 #define L2_SIZE 3280 //NN*(NMAT+1) #define NOPT 15 #define NPATH 4096 // constant data for swaption portfolio: stored in device memory, // initialised by host and read by device threads __constant__ int N, Nmat, Nopt, maturities[NOPT]; __constant__ float delta, swaprates[NOPT], lambda[NN]; /* Monte Carlo LIBOR path calculation */ __device__ void path_calc(float *L, float *z) { int i, n; float sqez, lam, con1, v, vrat; for(n=0; n<Nmat; n++) { sqez = sqrtf(delta)*z[n]; v = 0.0; for (i=n+1; i<N; i++) { lam = lambda[i-n-1]; con1 = delta*lam; v += __fdividef(con1*L[i],1.0+delta*L[i]); vrat = __expf(con1*v + lam*(sqez-0.5*con1)); L[i] = L[i]*vrat; } } } /* forward path calculation storing data for subsequent reverse path calculation */ __device__ void path_calc_b1(float *L, float *z, float *L2) { int i, n; float sqez, lam, con1, v, vrat; for (i=0; i<N; i++) L2[i] = L[i]; for(n=0; n<Nmat; n++) { sqez = sqrt(delta)*z[n]; v = 0.0; for (i=n+1; i<N; i++) { lam = lambda[i-n-1]; con1 = delta*lam; v += __fdividef(con1*L[i],1.0+delta*L[i]); vrat = __expf(con1*v + lam*(sqez-0.5*con1)); L[i] = L[i]*vrat; // store these values for reverse path // L2[i+(n+1)*N] = L[i]; } } } /* reverse path calculation of deltas using stored data */ __device__ void path_calc_b2(float *L_b, float *z, float *L2) { int i, n; float faci, v1; for (n=Nmat-1; n>=0; n--) { v1 = 0.0; for (i=N-1; i>n; i--) { v1 += lambda[i-n-1]*L2[i+(n+1)*N]*L_b[i]; faci = __fdividef(delta,1.0+delta*L2[i+n*N]); L_b[i] = L_b[i]*__fdividef(L2[i+(n+1)*N],L2[i+n*N]) + v1*lambda[i-n-1]*faci*faci; } } } /* calculate the portfolio value v, and its sensitivity to L */ /* hand-coded reverse mode sensitivity */ __device__ float portfolio_b(float *L, float *L_b) { int m, n; float b, s, swapval,v; float B[NMAT], S[NMAT], B_b[NMAT], S_b[NMAT]; b = 1.0; s = 0.0; for (m=0; m<N-Nmat; m++) { n = m + Nmat; b = __fdividef(b,1.0+delta*L[n]); s = s + delta*b; B[m] = b; S[m] = s; } v = 0.0; for (m=0; m<N-Nmat; m++) { B_b[m] = 0; S_b[m] = 0; } for (n=0; n<Nopt; n++){ m = maturities[n] - 1; swapval = B[m] + swaprates[n]*S[m] - 1.0; if (swapval<0) { v += -100*swapval; S_b[m] += -100*swaprates[n]; B_b[m] += -100; } } for (m=N-Nmat-1; m>=0; m--) { n = m + Nmat; B_b[m] += delta*S_b[m]; L_b[n] = -B_b[m]*B[m]*__fdividef(delta,1.0+delta*L[n]); if (m>0) { S_b[m-1] += S_b[m]; B_b[m-1] += __fdividef(B_b[m],1.+delta*L[n]); } } // apply discount // b = 1.0; for (n=0; n<Nmat; n++) b = b/(1.0+delta*L[n]); v = b*v; for (n=0; n<Nmat; n++){ L_b[n] = -v*delta/(1.0+delta*L[n]); } for (n=Nmat; n<N; n++){ L_b[n] = b*L_b[n]; } return v; } /* calculate the portfolio value v */ __device__ float portfolio(float *L) { int n, m, i; float v, b, s, swapval, B[40], S[40]; b = 1.0; s = 0.0; for(n=Nmat; n<N; n++) { b = b/(1.0+delta*L[n]); s = s + delta*b; B[n-Nmat] = b; S[n-Nmat] = s; } v = 0.0; for(i=0; i<Nopt; i++){ m = maturities[i] -1; swapval = B[m] + swaprates[i]*S[m] - 1.0; if(swapval<0) v += -100.0*swapval; } // apply discount // b = 1.0; for (n=0; n<Nmat; n++) b = b/(1.0+delta*L[n]); v = b*v; return v; } __global__ void Pathcalc_Portfolio_KernelGPU(float *d_v, float *d_Lb) { const int tid = blockDim.x * blockIdx.x + threadIdx.x; const int threadN = blockDim.x * gridDim.x; int i,path; float L[NN], L2[L2_SIZE], z[NN]; float *L_b = L; /* Monte Carlo LIBOR path calculation*/ for(path = tid; path < NPATH; path += threadN){ // initialise the data for current thread for (i=0; i<N; i++) { // for real application, z should be randomly generated z[i] = 0.3; L[i] = 0.05; } path_calc_b1(L, z, L2); d_v[path] = portfolio_b(L,L_b); path_calc_b2(L_b, z, L2); d_Lb[path] = L_b[NN-1]; } } __global__ void Pathcalc_Portfolio_KernelGPU2(float *d_v) { const int tid = blockDim.x * blockIdx.x + threadIdx.x; const int threadN = blockDim.x * gridDim.x; int i, path; float L[NN], z[NN]; /* Monte Carlo LIBOR path calculation*/ for(path = tid; path < NPATH; path += threadN){ // initialise the data for current thread for (i=0; i<N; i++) { // for real application, z should be randomly generated z[i] = 0.3; L[i] = 0.05; } path_calc(L, z); d_v[path] = portfolio(L); } } //////////////////////////////////////////////////////////////////////// // Main program //////////////////////////////////////////////////////////////////////// int main(int argc, char **argv){ // 'h_' prefix - CPU (host) memory space float *h_v, *h_Lb, h_lambda[NN], h_delta=0.25; int h_N=NN, h_Nmat=NMAT, h_Nopt=NOPT, i; int h_maturities[] = {4,4,4,8,8,8,20,20,20,28,28,28,40,40,40}; float h_swaprates[] = {.045,.05,.055,.045,.05,.055,.045,.05, .055,.045,.05,.055,.045,.05,.055 }; double v, Lb; double gpuTime; // 'd_' prefix - GPU (device) memory space float *d_v,*d_Lb; // initialise card and timer int deviceCount; CUDA_SAFE_CALL_NO_SYNC(hipGetDeviceCount(&deviceCount)); if (deviceCount == 0) { fprintf(stderr, "There is no device.\n"); exit(EXIT_FAILURE); } int dev; for (dev = 0; dev < deviceCount; ++dev) { hipDeviceProp_t deviceProp; CUDA_SAFE_CALL_NO_SYNC(hipGetDeviceProperties(&deviceProp, dev)); if (deviceProp.major >= 1) break; } if (dev == deviceCount) { fprintf(stderr, "There is no device supporting CUDA.\n"); exit(EXIT_FAILURE); } else CUDA_SAFE_CALL(hipSetDevice(dev)); for (i=0; i<NN; i++) h_lambda[i] = 0.2; // Copy all constants into constant memory hipMemcpyToSymbol(N, &h_N, sizeof(h_N)); hipMemcpyToSymbol(Nmat, &h_Nmat, sizeof(h_Nmat)); hipMemcpyToSymbol(Nopt, &h_Nopt, sizeof(h_Nopt)); hipMemcpyToSymbol(delta, &h_delta, sizeof(h_delta)); hipMemcpyToSymbol(maturities, &h_maturities, sizeof(h_maturities)); hipMemcpyToSymbol(swaprates, &h_swaprates, sizeof(h_swaprates)); hipMemcpyToSymbol(lambda, &h_lambda, sizeof(h_lambda)); // Allocate memory on host and device h_v = (float *)malloc(sizeof(float)*NPATH); CUDA_SAFE_CALL( hipMalloc((void **)&d_v, sizeof(float)*NPATH) ); h_Lb = (float *)malloc(sizeof(float)*NPATH); CUDA_SAFE_CALL( hipMalloc((void **)&d_Lb, sizeof(float)*NPATH) ); // Execute GPU kernel -- no Greeks CUDA_SAFE_CALL( hipDeviceSynchronize() ); // Set up the execution configuration dim3 dimBlock(BLOCK_SIZE); dim3 dimGrid(GRID_SIZE); // Launch the device computation threads hipLaunchKernelGGL(( Pathcalc_Portfolio_KernelGPU2), dim3(dimGrid), dim3(dimBlock), 0, 0, d_v); //CUT_CHECK_ERROR("Pathcalc_Portfolio_kernelGPU2() execution failed\n"); CUDA_SAFE_CALL( hipDeviceSynchronize() ); // Read back GPU results and compute average CUDA_SAFE_CALL( hipMemcpy(h_v, d_v, sizeof(float)*NPATH, hipMemcpyDeviceToHost) ); v = 0.0; for (i=0; i<NPATH; i++) v += h_v[i]; v = v / NPATH; printf("v = %15.8f\n", v); printf("Time(No Greeks) : %f msec\n", gpuTime); // Execute GPU kernel -- Greeks CUDA_SAFE_CALL( hipDeviceSynchronize() ); // Launch the device computation threads hipLaunchKernelGGL(( Pathcalc_Portfolio_KernelGPU), dim3(dimGrid), dim3(dimBlock), 0, 0, d_v,d_Lb); //CUT_CHECK_ERROR("Pathcalc_Portfolio_kernelGPU() execution failed\n"); CUDA_SAFE_CALL( hipDeviceSynchronize() ); // Read back GPU results and compute average CUDA_SAFE_CALL( hipMemcpy(h_v, d_v, sizeof(float)*NPATH, hipMemcpyDeviceToHost) ); CUDA_SAFE_CALL( hipMemcpy(h_Lb, d_Lb, sizeof(float)*NPATH, hipMemcpyDeviceToHost) ); v = 0.0; for (i=0; i<NPATH; i++) v += h_v[i]; v = v / NPATH; Lb = 0.0; for (i=0; i<NPATH; i++) Lb += h_Lb[i]; Lb = Lb / NPATH; printf("v = %15.8f\n", v); printf("Lb = %15.8f\n", Lb); printf("Time (Greeks) : %f msec\n", gpuTime); // Release GPU memory CUDA_SAFE_CALL( hipFree(d_v)); CUDA_SAFE_CALL( hipFree(d_Lb)); // Release CPU memory free(h_v); free(h_Lb); //CUT_EXIT(argc, argv); }
2f4ad2794762682ae1aa961ba95499576e67be31.cu
/* Program to compute swaption portfolio using NVIDIA CUDA */ #include <stdio.h> #include "cutil_subset.h" // parameters for nVidia device execution #define BLOCK_SIZE 64 #define GRID_SIZE 64 // parameters for LIBOR calculation #define NN 80 #define NMAT 40 #define L2_SIZE 3280 //NN*(NMAT+1) #define NOPT 15 #define NPATH 4096 // constant data for swaption portfolio: stored in device memory, // initialised by host and read by device threads __constant__ int N, Nmat, Nopt, maturities[NOPT]; __constant__ float delta, swaprates[NOPT], lambda[NN]; /* Monte Carlo LIBOR path calculation */ __device__ void path_calc(float *L, float *z) { int i, n; float sqez, lam, con1, v, vrat; for(n=0; n<Nmat; n++) { sqez = sqrtf(delta)*z[n]; v = 0.0; for (i=n+1; i<N; i++) { lam = lambda[i-n-1]; con1 = delta*lam; v += __fdividef(con1*L[i],1.0+delta*L[i]); vrat = __expf(con1*v + lam*(sqez-0.5*con1)); L[i] = L[i]*vrat; } } } /* forward path calculation storing data for subsequent reverse path calculation */ __device__ void path_calc_b1(float *L, float *z, float *L2) { int i, n; float sqez, lam, con1, v, vrat; for (i=0; i<N; i++) L2[i] = L[i]; for(n=0; n<Nmat; n++) { sqez = sqrt(delta)*z[n]; v = 0.0; for (i=n+1; i<N; i++) { lam = lambda[i-n-1]; con1 = delta*lam; v += __fdividef(con1*L[i],1.0+delta*L[i]); vrat = __expf(con1*v + lam*(sqez-0.5*con1)); L[i] = L[i]*vrat; // store these values for reverse path // L2[i+(n+1)*N] = L[i]; } } } /* reverse path calculation of deltas using stored data */ __device__ void path_calc_b2(float *L_b, float *z, float *L2) { int i, n; float faci, v1; for (n=Nmat-1; n>=0; n--) { v1 = 0.0; for (i=N-1; i>n; i--) { v1 += lambda[i-n-1]*L2[i+(n+1)*N]*L_b[i]; faci = __fdividef(delta,1.0+delta*L2[i+n*N]); L_b[i] = L_b[i]*__fdividef(L2[i+(n+1)*N],L2[i+n*N]) + v1*lambda[i-n-1]*faci*faci; } } } /* calculate the portfolio value v, and its sensitivity to L */ /* hand-coded reverse mode sensitivity */ __device__ float portfolio_b(float *L, float *L_b) { int m, n; float b, s, swapval,v; float B[NMAT], S[NMAT], B_b[NMAT], S_b[NMAT]; b = 1.0; s = 0.0; for (m=0; m<N-Nmat; m++) { n = m + Nmat; b = __fdividef(b,1.0+delta*L[n]); s = s + delta*b; B[m] = b; S[m] = s; } v = 0.0; for (m=0; m<N-Nmat; m++) { B_b[m] = 0; S_b[m] = 0; } for (n=0; n<Nopt; n++){ m = maturities[n] - 1; swapval = B[m] + swaprates[n]*S[m] - 1.0; if (swapval<0) { v += -100*swapval; S_b[m] += -100*swaprates[n]; B_b[m] += -100; } } for (m=N-Nmat-1; m>=0; m--) { n = m + Nmat; B_b[m] += delta*S_b[m]; L_b[n] = -B_b[m]*B[m]*__fdividef(delta,1.0+delta*L[n]); if (m>0) { S_b[m-1] += S_b[m]; B_b[m-1] += __fdividef(B_b[m],1.+delta*L[n]); } } // apply discount // b = 1.0; for (n=0; n<Nmat; n++) b = b/(1.0+delta*L[n]); v = b*v; for (n=0; n<Nmat; n++){ L_b[n] = -v*delta/(1.0+delta*L[n]); } for (n=Nmat; n<N; n++){ L_b[n] = b*L_b[n]; } return v; } /* calculate the portfolio value v */ __device__ float portfolio(float *L) { int n, m, i; float v, b, s, swapval, B[40], S[40]; b = 1.0; s = 0.0; for(n=Nmat; n<N; n++) { b = b/(1.0+delta*L[n]); s = s + delta*b; B[n-Nmat] = b; S[n-Nmat] = s; } v = 0.0; for(i=0; i<Nopt; i++){ m = maturities[i] -1; swapval = B[m] + swaprates[i]*S[m] - 1.0; if(swapval<0) v += -100.0*swapval; } // apply discount // b = 1.0; for (n=0; n<Nmat; n++) b = b/(1.0+delta*L[n]); v = b*v; return v; } __global__ void Pathcalc_Portfolio_KernelGPU(float *d_v, float *d_Lb) { const int tid = blockDim.x * blockIdx.x + threadIdx.x; const int threadN = blockDim.x * gridDim.x; int i,path; float L[NN], L2[L2_SIZE], z[NN]; float *L_b = L; /* Monte Carlo LIBOR path calculation*/ for(path = tid; path < NPATH; path += threadN){ // initialise the data for current thread for (i=0; i<N; i++) { // for real application, z should be randomly generated z[i] = 0.3; L[i] = 0.05; } path_calc_b1(L, z, L2); d_v[path] = portfolio_b(L,L_b); path_calc_b2(L_b, z, L2); d_Lb[path] = L_b[NN-1]; } } __global__ void Pathcalc_Portfolio_KernelGPU2(float *d_v) { const int tid = blockDim.x * blockIdx.x + threadIdx.x; const int threadN = blockDim.x * gridDim.x; int i, path; float L[NN], z[NN]; /* Monte Carlo LIBOR path calculation*/ for(path = tid; path < NPATH; path += threadN){ // initialise the data for current thread for (i=0; i<N; i++) { // for real application, z should be randomly generated z[i] = 0.3; L[i] = 0.05; } path_calc(L, z); d_v[path] = portfolio(L); } } //////////////////////////////////////////////////////////////////////// // Main program //////////////////////////////////////////////////////////////////////// int main(int argc, char **argv){ // 'h_' prefix - CPU (host) memory space float *h_v, *h_Lb, h_lambda[NN], h_delta=0.25; int h_N=NN, h_Nmat=NMAT, h_Nopt=NOPT, i; int h_maturities[] = {4,4,4,8,8,8,20,20,20,28,28,28,40,40,40}; float h_swaprates[] = {.045,.05,.055,.045,.05,.055,.045,.05, .055,.045,.05,.055,.045,.05,.055 }; double v, Lb; double gpuTime; // 'd_' prefix - GPU (device) memory space float *d_v,*d_Lb; // initialise card and timer int deviceCount; CUDA_SAFE_CALL_NO_SYNC(cudaGetDeviceCount(&deviceCount)); if (deviceCount == 0) { fprintf(stderr, "There is no device.\n"); exit(EXIT_FAILURE); } int dev; for (dev = 0; dev < deviceCount; ++dev) { cudaDeviceProp deviceProp; CUDA_SAFE_CALL_NO_SYNC(cudaGetDeviceProperties(&deviceProp, dev)); if (deviceProp.major >= 1) break; } if (dev == deviceCount) { fprintf(stderr, "There is no device supporting CUDA.\n"); exit(EXIT_FAILURE); } else CUDA_SAFE_CALL(cudaSetDevice(dev)); for (i=0; i<NN; i++) h_lambda[i] = 0.2; // Copy all constants into constant memory cudaMemcpyToSymbol(N, &h_N, sizeof(h_N)); cudaMemcpyToSymbol(Nmat, &h_Nmat, sizeof(h_Nmat)); cudaMemcpyToSymbol(Nopt, &h_Nopt, sizeof(h_Nopt)); cudaMemcpyToSymbol(delta, &h_delta, sizeof(h_delta)); cudaMemcpyToSymbol(maturities, &h_maturities, sizeof(h_maturities)); cudaMemcpyToSymbol(swaprates, &h_swaprates, sizeof(h_swaprates)); cudaMemcpyToSymbol(lambda, &h_lambda, sizeof(h_lambda)); // Allocate memory on host and device h_v = (float *)malloc(sizeof(float)*NPATH); CUDA_SAFE_CALL( cudaMalloc((void **)&d_v, sizeof(float)*NPATH) ); h_Lb = (float *)malloc(sizeof(float)*NPATH); CUDA_SAFE_CALL( cudaMalloc((void **)&d_Lb, sizeof(float)*NPATH) ); // Execute GPU kernel -- no Greeks CUDA_SAFE_CALL( cudaThreadSynchronize() ); // Set up the execution configuration dim3 dimBlock(BLOCK_SIZE); dim3 dimGrid(GRID_SIZE); // Launch the device computation threads Pathcalc_Portfolio_KernelGPU2<<<dimGrid, dimBlock>>>(d_v); //CUT_CHECK_ERROR("Pathcalc_Portfolio_kernelGPU2() execution failed\n"); CUDA_SAFE_CALL( cudaThreadSynchronize() ); // Read back GPU results and compute average CUDA_SAFE_CALL( cudaMemcpy(h_v, d_v, sizeof(float)*NPATH, cudaMemcpyDeviceToHost) ); v = 0.0; for (i=0; i<NPATH; i++) v += h_v[i]; v = v / NPATH; printf("v = %15.8f\n", v); printf("Time(No Greeks) : %f msec\n", gpuTime); // Execute GPU kernel -- Greeks CUDA_SAFE_CALL( cudaThreadSynchronize() ); // Launch the device computation threads Pathcalc_Portfolio_KernelGPU<<<dimGrid, dimBlock>>>(d_v,d_Lb); //CUT_CHECK_ERROR("Pathcalc_Portfolio_kernelGPU() execution failed\n"); CUDA_SAFE_CALL( cudaThreadSynchronize() ); // Read back GPU results and compute average CUDA_SAFE_CALL( cudaMemcpy(h_v, d_v, sizeof(float)*NPATH, cudaMemcpyDeviceToHost) ); CUDA_SAFE_CALL( cudaMemcpy(h_Lb, d_Lb, sizeof(float)*NPATH, cudaMemcpyDeviceToHost) ); v = 0.0; for (i=0; i<NPATH; i++) v += h_v[i]; v = v / NPATH; Lb = 0.0; for (i=0; i<NPATH; i++) Lb += h_Lb[i]; Lb = Lb / NPATH; printf("v = %15.8f\n", v); printf("Lb = %15.8f\n", Lb); printf("Time (Greeks) : %f msec\n", gpuTime); // Release GPU memory CUDA_SAFE_CALL( cudaFree(d_v)); CUDA_SAFE_CALL( cudaFree(d_Lb)); // Release CPU memory free(h_v); free(h_Lb); //CUT_EXIT(argc, argv); }
5014aefe6239b9302ed81b7f79e0d2063cb4bd19.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include "hip/device_functions.h" #include "produce_pc_kernel.h" #include <stdio.h> #include <iostream> #include <fstream> #include <cstdlib> #ifndef __HIPCC__ #define __HIPCC__ #endif #include "device_launch_parameters.h" #include <hip/hip_runtime_api.h> #include <string.h> // memcpy #include <cstdlib> #include <signal.h> #include <time.h> #include <math.h> #include <thrust/device_ptr.h> #include <algorithm> #include <vector> #include <iostream> #include <thrust/sort.h> #include <thrust/device_vector.h> //#include "Protonect.h" //#include <af/cuda.h> //#include <arrayfire.h> #include <opencv2/opencv.hpp> #include <LaterMethods.h> #define PI 3.1415 using namespace std; texture<int, 1, hipReadModeElementType> texref0, texref2; texture<float, 1, hipReadModeElementType> texref1; const size_t imageSize1 = 4 * 424 * 512; dim3 threadsPerBlock(32, 32); dim3 threadsPerBlock1(1, 400); dim3 Grid1((400+threadsPerBlock1.x-1)/threadsPerBlock1.x, (400+threadsPerBlock1.y-1)/threadsPerBlock1.y); dim3 Grid_((400+threadsPerBlock.x-1)/threadsPerBlock.x, (240+threadsPerBlock.y-1)/threadsPerBlock.y); dim3 Grid((512+threadsPerBlock.x-1)/threadsPerBlock.x, (424+threadsPerBlock.y-1)/threadsPerBlock.y); struct is_zero { __host__ __device__ bool operator()(const int x) { return (x==0); } }; __global__ void peng_zhang(unsigned char * pj,unsigned char *temp_pj,size_t width, size_t height) { int i = threadIdx.x + blockIdx.x * blockDim.x;//0--511 int j = threadIdx.y + blockIdx.y * blockDim.y;//0-423 const int idex = (i) + (j) * width;//0--512*424-1=217087 int neighbors[8] = {(i-1) + (j-1) * width, (i) + (j-1) * width,(i+1) + (j-1) * width,(i-1) + (j) * width, (i+1) + (j) * width,(i-1) + (j+1) * width,(i) + (j+1) * width,(i+1) + (j+1) * width}; if ( j < height ) { if ( i < width) { if (temp_pj[idex]==0)// { if (j==0)// { if(i==0) { if ((temp_pj[neighbors[4]]>0)||(temp_pj[neighbors[6]]>0)||(temp_pj[neighbors[7]]>0))// { pj[idex]=1; } } if(i==width-1) { if ((temp_pj[neighbors[3]]>0)||(temp_pj[neighbors[5]]>0)||(temp_pj[neighbors[6]]>0))// { pj[idex]=1; } } if((i>0) && (i<width-1)) { int num_=0; for (int k=3;k<8;k++) { if (temp_pj[neighbors[k]]>0) { num_++; if (num_>2) { pj[idex]=1; break; } } } } } if (j==(height-1))// { if(i==0) { if ((temp_pj[neighbors[1]]>0)||(temp_pj[neighbors[2]]>0)||(temp_pj[neighbors[4]]>0))// { pj[idex]=1; } } if(i==width-1) { if ((temp_pj[neighbors[0]]>0)||(temp_pj[neighbors[1]]>0)||(temp_pj[neighbors[3]]>0))// { pj[idex]=1; } } if((i>0) && (i<width-1)) { int num_=0; for (int k=0;k<5;k++) { if (temp_pj[neighbors[k]]>0) { num_++; if (num_>2) { pj[idex]=1; break; } } } } } if ((j>0)&&(j<(height-1)))// { if(i==0) { if ((temp_pj[neighbors[1]]>0)||(temp_pj[neighbors[2]]>0)||(temp_pj[neighbors[4]]>0)||(temp_pj[neighbors[6]]>0)||(temp_pj[neighbors[7]]>0))// { pj[idex]=1; } } if(i==width-1) { if ((temp_pj[neighbors[0]]>0)||(temp_pj[neighbors[1]]>0)||(temp_pj[neighbors[3]]>0)||(temp_pj[neighbors[5]]>0)||(temp_pj[neighbors[6]]>0))// { pj[idex]=1; } } if((i>0) && (i<width-1)) { int num_=0; for (int k=0;k<8;k++) { if (temp_pj[neighbors[k]]>0) { num_++; if (num_>2) { pj[idex]=1; break; } } } } } } }//for i=512 }//for j=424 } __global__ void fu_shi(unsigned char * pj, unsigned char * temp_pj, size_t width, size_t height) { int i = threadIdx.x + blockIdx.x * blockDim.x;//0--511 int j = threadIdx.y + blockIdx.y * blockDim.y;//0-423 const int idex = (i) + (j) * width;//0--512*424-1=217087 int neighbors[8] = {(i-1) + (j-1) * width, (i) + (j-1) * width,(i+1) + (j-1) * width,(i-1) + (j) * width, (i+1) + (j) * width,(i-1) + (j+1) * width,(i) + (j+1) * width,(i+1) + (j+1) * width}; if ( j < height ) { if ( i < width) { if (temp_pj[idex]>0)// { if (j==0)// { if(i==0) { if ((temp_pj[neighbors[4]]==0)||(temp_pj[neighbors[6]]==0)||(temp_pj[neighbors[7]]==0))// { pj[idex]=0; } } if(i==width-1) { if ((temp_pj[neighbors[3]]==0)||(temp_pj[neighbors[5]]==0)||(temp_pj[neighbors[6]]==0))// { pj[idex]=0; } } if((i>0) && (i<width-1)) { int num_=0; for (int k=3;k<8;k++) { if (temp_pj[neighbors[k]]==0) { num_++; if (num_>2) { pj[idex]=0; break; } } } } } if (j==(height-1))// { if(i==0) { if ((temp_pj[neighbors[1]]==0)||(temp_pj[neighbors[2]]==0)||(temp_pj[neighbors[4]]==0))// { pj[idex]=0; } } if(i==width-1) { if ((temp_pj[neighbors[0]]==0)||(temp_pj[neighbors[1]]==0)||(temp_pj[neighbors[3]]==0))// { pj[idex]=0; } } if((i>0) && (i<width-1)) { int num_=0; for (int k=0;k<5;k++) { if (temp_pj[neighbors[k]]==0) { num_++; if (num_>2) { pj[idex]=0; break; } } } } } if ((j>0)&&(j<(height-1)))// { if(i==0) { if ((temp_pj[neighbors[1]]==0)||(temp_pj[neighbors[2]]==0)||(temp_pj[neighbors[4]]==0)||(temp_pj[neighbors[6]]==0)||(temp_pj[neighbors[7]]==0))// { pj[idex]=1; } } if(i==width-1) { if ((temp_pj[neighbors[0]]==0)||(temp_pj[neighbors[1]]==0)||(temp_pj[neighbors[3]]==0)||(temp_pj[neighbors[5]]==0)||(temp_pj[neighbors[6]]==0))// { pj[idex]=0; } } if((i>0) && (i<width-1)) { int num_=0; for (int k=0;k<8;k++) { if (temp_pj[neighbors[k]]==0) { num_++; if (num_>2) { pj[idex]=0; break; } } } } } } }//for i=512 }//for j=424 // } __global__ void kernel3(const float* d_dis_depth, const unsigned int* d_raw_rgb, const float* cc, float *_cloud,unsigned char *d_table, unsigned char *seg_result,int *n)//, const float* cc, float *_cloud { int c = threadIdx.x + blockIdx.x * blockDim.x;//0--511 int r = threadIdx.y + blockIdx.y * blockDim.y;//0-423 const int index = (c) + (r) * 512;//0--512*424-1=217087 int depth_to_c_off; const float &cx = cc[0]; const float &cy = cc[1]; const float &fx = cc[2]; const float &fy = cc[3]; if ( r < 424 ) { if ( c < 512) { const int &id = tex1Dfetch(texref0,index); const float z = id <0 ? 0.02f : d_dis_depth[id]; const int c_off = (z>0)*((tex1Dfetch(texref1,index) + __fdividef(52.0f,z)) * 1081.372070f + 960.0f + tex1Dfetch(texref2,index) * 1920); depth_to_c_off = ((z>0)&&(id>=0)&&(c_off>=0)&&(c_off<1920 * 1080)) * (c_off+1) + (-1); // const float &depth_v = d_dis_depth[id] * (id>=0); // _cloud[index*8+0] = (c-cx) * (fx) * depth_v; // _cloud[index*8+1] = (r-cy) * (fy) * depth_v; // _cloud[index*8+2] = depth_v; const float &depth_v = (id>=0) * d_dis_depth[id]; _cloud[index*8+0] = ((depth_v>12000.f)||(depth_v<=1.f))? 100000.0f : ((c-cx) * (fx) * depth_v); _cloud[index*8+1] = ((depth_v>12000.f)||(depth_v<=1.f))? 100000.0f : ((r-cy) * (fy) * depth_v); _cloud[index*8+2] = ((depth_v>12000.f)||(depth_v<=1.f))? 100000.0f : depth_v; const int &rgb_val = depth_to_c_off < 0 ? 0 : d_raw_rgb[depth_to_c_off]; u_char* rgba = (u_char*) ( _cloud + index*8+4 ); const u_char* bgra = (const u_char*) &rgb_val; rgba[0] = bgra[2]; rgba[1] = bgra[1]; rgba[2] = bgra[0]; // seg_result[index]=d_table[rgba[0]/4*64*64 + rgba[1]/4*64 + rgba[2]/4]; if(seg_result[index]==0)// { seg_result[index]=1; atomicAdd(&(n[0]),1); } else seg_result[index]=0; } } __syncthreads(); } void registration_kernel(const float* d_dis_depth, const unsigned int* d_raw_rgb, const int* d_map_dist, const float* d_map_x, const int* d_map_yi, const float* cc, float *_cloud, unsigned char *d_table, unsigned char *seg_result, int *num_yellow)//, const float* cc, float *_cloud { hipBindTexture(0,texref0,d_map_dist,imageSize1); hipBindTexture(0,texref1,d_map_x,imageSize1); hipBindTexture(0,texref2,d_map_yi,imageSize1); int *d_n; hipMalloc((void**)&d_n,sizeof(int)); hipMemset(d_n,0,sizeof(int)); hipLaunchKernelGGL(( kernel3), dim3(Grid), dim3(threadsPerBlock) , 0, 0, d_dis_depth, d_raw_rgb, cc, _cloud, d_table, seg_result, d_n); hipUnbindTexture(texref0); hipUnbindTexture(texref1); hipUnbindTexture(texref2); hipMemcpy(num_yellow,d_n,sizeof(int),hipMemcpyDeviceToHost); hipFree(d_n); } //float badpt = std::numeric_limits<float>::quiet_NaN (); __global__ void cluster_center_kernel(unsigned char *d_seg_t,unsigned char * segment_table) { int i = threadIdx.x + blockIdx.x * blockDim.x;//0--511 int j = threadIdx.y + blockIdx.y * blockDim.y;//0-423 const int idex = (i) + (j) * 512;//0--512*424-1=217087 int neighbors[8] = {(i-1) + (j-1) * 512, (i) + (j-1) * 512,(i+1) + (j-1) * 512,(i-1) + (j) * 512, (i+1) + (j) * 512,(i-1) + (j+1) * 512,(i) + (j+1) * 512,(i+1) + (j+1) * 512}; if ( j < 424 ) { if ( i < 512) { // if (segment_table[idex]==0)// { for (int k=0; k<8; k++) { if ((neighbors[k]>0) && (neighbors[k]<512*424) && (segment_table[neighbors[k]]==1))// { d_seg_t[idex]=1; break; } } } }//for i=512 }//for j=424 } __global__ void cluster_center_kernel_ex(unsigned char * d_seg_t,unsigned char * segment_table) { int i = threadIdx.x + blockIdx.x * blockDim.x;//0--511 int j = threadIdx.y + blockIdx.y * blockDim.y;//0-423 const int idex = (i) + (j) * 512;//0--512*424-1=217087 int neighbors[8] = {(i-1) + (j-1) * 512, (i) + (j-1) * 512,(i+1) + (j-1) * 512,(i-1) + (j) * 512, (i+1) + (j) * 512,(i-1) + (j+1) * 512,(i) + (j+1) * 512,(i+1) + (j+1) * 512}; if ( j < 424 ) { if ( i < 512) { // if (segment_table[idex]==1)// { for (int k=0; k<8; k++) { if ((neighbors[k]>0) && (neighbors[k]<512*424) && (segment_table[neighbors[k]]==0))// { d_seg_t[idex]=0; break; } } } }//for i=512 }//for j=424 } __global__ void label_initialize(unsigned char * segment_table, int *d_label, size_t width, size_t height) { int i = threadIdx.x + blockIdx.x * blockDim.x;//blockIdx.x=1,blockIdx.y=424,blockDim.x=512,blockDim.y=1 int j = threadIdx.y + blockIdx.y * blockDim.y;//threadIdx.x=512,threadIdx.y=1 int idex = (i) + (j) * width;//0--512*424-1=217087 if (idex<width*height) { if (segment_table[idex]>0 && i>0) { d_label[idex]=i; // atomicAdd(&(ch1[0]),1); } else d_label[idex]=0; __syncthreads(); } } __global__ void eight_DLS(unsigned char * segment_table, int *d_label, int *flag)//flag0 { int i = threadIdx.x + blockIdx.x * blockDim.x;//blockIdx.x=1,blockIdx.y=424,blockDim.x=512,blockDim.y=1 int j = threadIdx.y + blockIdx.y * blockDim.y;//threadIdx.x=512,threadIdx.y=1 const int idex = (i) + (j) * 512;//0--512*424-1=217087 int neighbors[8] = {(i-1) + (j-1) * 512, (i) + (j-1) * 512,(i+1) + (j-1) * 512,(i-1) + (j) * 512, (i+1) + (j) * 512,(i-1) + (j+1) * 512,(i) + (j+1) * 512,(i+1) + (j+1) * 512}; int no_neigbor=0; if (i<512) { if (j<424) { if (segment_table[idex]>0)//object pixel { // atomicAdd(&(ch[0]),1); int mini=d_label[idex]; for (int n=0;n<8;n++)//8 { while ((neighbors[n]>0)&&(neighbors[n]<512*424)&&(segment_table[neighbors[n]]!=0))//n0 { //pixel if(d_label[neighbors[n]]<mini) { mini=d_label[neighbors[n]]; } switch (n) { case 0:// neighbors[n]=neighbors[n]-512-1; break; case 1:// neighbors[n]=neighbors[n]-512; break; case 2:// neighbors[n]=neighbors[n]-512+1; break; case 3:// neighbors[n]=neighbors[n]-1; break; case 4:// neighbors[n]=neighbors[n]+1; break; case 5:// neighbors[n]=neighbors[n]+512-1; break; case 6:// neighbors[n]=neighbors[n]+512; break; case 7:// neighbors[n]=neighbors[n]+512+1; break; default: break; } } } if (mini<d_label[idex]) { d_label[idex]=mini; flag[0]=1; } } // __syncthreads(); } } } __global__ void eight_DLS_last(unsigned char * segment_table, int *d_label, int *array, int *flag, size_t nnn) { int i = threadIdx.x + blockIdx.x * blockDim.x;//blockIdx.x=1,blockIdx.y=424,blockDim.x=512,blockDim.y=1 int j = threadIdx.y + blockIdx.y * blockDim.y;//threadIdx.x=512,threadIdx.y=1 const int idex = (i) + (j) * 512;//0--512*424-1=217087 int s_idex=threadIdx.x +blockDim.x*threadIdx.y; int neighbors[8] = {(i-1) + (j-1) * 512, (i) + (j-1) * 512,(i+1) + (j-1) * 512,(i-1) + (j) * 512, (i+1) + (j) * 512,(i-1) + (j+1) * 512,(i) + (j+1) * 512,(i+1) + (j+1) * 512}; // if (idex==0) // printf("aaaaaa %d \n",flag[0]); if (i<512) { if (j<424) { if ((segment_table[idex]>0) && (d_label[idex]>0))//object pixel { // if ((nnn>2)&&(d_label[d_label[idex]]==d_label[idex])) // { // } // else // { int mini=d_label[idex]; for (int n=0;n<8;n++)//8 { while ((neighbors[n]>0)&&(neighbors[n]<512*424)&&(segment_table[neighbors[n]]!=0))//n0 { //pixel if((d_label[neighbors[n]]<mini)&&(d_label[neighbors[n]]>0)) { mini=d_label[neighbors[n]]; } switch (n) { case 0:// neighbors[n]=neighbors[n]-512-1; break; case 1:// neighbors[n]=neighbors[n]-512; break; case 2:// neighbors[n]=neighbors[n]-512+1; break; case 3:// neighbors[n]=neighbors[n]-1; break; case 4:// neighbors[n]=neighbors[n]+1; break; case 5:// neighbors[n]=neighbors[n]+512-1; break; case 6:// neighbors[n]=neighbors[n]+512; break; case 7:// neighbors[n]=neighbors[n]+512+1; break; default: break; } } } if (mini<d_label[idex]) { d_label[idex]=mini; // atomicExch(&(flag[0]),1); } // }//if smalllest CCL }//if object pixel __syncthreads(); __shared__ bool lockx1; __threadfence(); if(s_idex==0) { unsigned int lockiii1=atomicAdd(&(array[5]),1); lockx1=(array[5]==224); } __syncthreads(); if(lockx1)// { } if (nnn==2) { if (d_label[idex]>0) { if ((d_label[idex]!=array[0])&&(d_label[idex]!=array[1])&&(d_label[idex]!=array[2])&&(d_label[idex]!=array[3])&&(d_label[idex]!=array[4])) { for (int k=0;k<5;k++) { if (array[k]==0) { atomicExch(&(array[k]),d_label[idex]); break; } } } } } }//j=424 }//i=512 }// __global__ void eight_DLS_last_obstacle(unsigned char * segment_table, int *d_label, size_t width, size_t height) { int i = threadIdx.x + blockIdx.x * blockDim.x;//blockIdx.x=1,blockIdx.y=424,blockDim.x=512,blockDim.y=1 int j = threadIdx.y + blockIdx.y * blockDim.y;//threadIdx.x=512,threadIdx.y=1 const int idex = (i) + (j) * width;//0--512*424-1=217087 int s_idex=threadIdx.x +blockDim.x*threadIdx.y; int neighbors[8] = {(i-1) + (j-1) * width, (i) + (j-1) * width,(i+1) + (j-1) * width,(i-1) + (j) * width, (i+1) + (j) * width,(i-1) + (j+1) * width,(i) + (j+1) * width,(i+1) + (j+1) * width}; // if (idex==0) // printf("aaaaaa %d \n",flag[0]); if (i<width) { if (j<height) { if ((segment_table[idex]>0)&&(d_label[idex]>0))//object pixel { // if ((nnn>2)&&(d_label[d_label[idex]]==d_label[idex])) // { // } // else // { int mini=d_label[idex]; for (int n=0;n<8;n++)//8 { while ((neighbors[n]>0)&&(neighbors[n]<width*height)&&(segment_table[neighbors[n]]!=0))//n0 { //pixel if((d_label[neighbors[n]]<mini)&&(d_label[neighbors[n]]>0)) { mini=d_label[neighbors[n]]; } switch (n) { case 0:// neighbors[n]=neighbors[n]-width-1; break; case 1:// neighbors[n]=neighbors[n]-width; break; case 2:// neighbors[n]=neighbors[n]-width+1; break; case 3:// neighbors[n]=neighbors[n]-1; break; case 4:// neighbors[n]=neighbors[n]+1; break; case 5:// neighbors[n]=neighbors[n]+width-1; break; case 6:// neighbors[n]=neighbors[n]+width; break; case 7:// neighbors[n]=neighbors[n]+width+1; break; default: break; } } } if (mini<d_label[idex]) { d_label[idex]=mini; // flag[0]=1; } // }//if smalllest CCL }//if object pixel }//j=424 }//i=512 }// __global__ void count(int *d_label, int numb, int *center, size_t width, size_t height) { int i = threadIdx.x + blockIdx.x * blockDim.x;//blockIdx.x=1,blockIdx.y=424,blockDim.x=512,blockDim.y=1 int j = threadIdx.y + blockIdx.y * blockDim.y;//threadIdx.x=512,threadIdx.y=1 const int idex = (i) + (j) * width;//0--512*424-1=217087 if (idex<width*height) { // if (idex==0) // printf("nnnnnnnnnnnnnnnnnn1111 %d %d\n\n",n[0],numb); if (d_label[idex]==numb) { atomicAdd(&(center[0]),1); atomicAdd(&(center[1]),i); atomicAdd(&(center[2]),j); } __syncthreads(); } } double gettime() { double tseconds=0.0; struct timeval mytime; gettimeofday(&mytime,(struct timezone *)0); tseconds=(double)(mytime.tv_sec+mytime.tv_usec*1.0e-6); return tseconds; } int CCL(unsigned char *d_seg_table, int *d_label, float *ball_position) { double ti=gettime(); unsigned char *d_seg_table_temp; hipMalloc((void **)&d_seg_table_temp,512*424*sizeof(unsigned char)); for (int j=0;j<3;j++) {//corrosion hipMemcpy(d_seg_table_temp,d_seg_table,512*424*sizeof(unsigned char),hipMemcpyDeviceToDevice); hipLaunchKernelGGL(( fu_shi), dim3(Grid), dim3(threadsPerBlock) , 0, 0, d_seg_table,d_seg_table_temp,512,424); } for (int i=0;i<4;i++) {//dilate hipMemcpy(d_seg_table_temp,d_seg_table,512*424*sizeof(unsigned char),hipMemcpyDeviceToDevice); hipLaunchKernelGGL(( peng_zhang), dim3(Grid), dim3(threadsPerBlock) , 0, 0, d_seg_table,d_seg_table_temp,512,424); } hipDeviceSynchronize(); hipLaunchKernelGGL(( label_initialize), dim3(Grid), dim3(threadsPerBlock) , 0, 0, d_seg_table,d_label,512,424);// int *array; hipMalloc((void **)&array,6*sizeof(int)); hipMemset(array,0,6*sizeof(int)); int *h_array; hipHostMalloc((void **)&h_array,5*sizeof(int),hipHostMallocDefault); int *d_flag; hipMalloc((void **)&d_flag,1*sizeof(int)); int h_flag=1; for (size_t nnn=0;nnn<3;nnn++) { hipLaunchKernelGGL(( eight_DLS_last), dim3(Grid), dim3(threadsPerBlock) , 0, 0, d_seg_table,d_label,array,d_flag,nnn); } hipDeviceSynchronize(); hipMemcpy(h_array,array,5*sizeof(int),hipMemcpyDeviceToHost);//change=0, int *h_center; hipHostMalloc((void **)&h_center,3*sizeof(int),hipHostMallocDefault); int *center; hipMalloc((void **)&center,3*sizeof(int)); int nu=0; for (int h1=0;h1<5;h1++) { if (h_array[h1]>0) { hipMemset(center,0,3*sizeof(int)); hipLaunchKernelGGL(( count), dim3(Grid), dim3(threadsPerBlock) , 0, 0, d_label,h_array[h1],center,512,424); hipDeviceSynchronize(); hipMemcpy(h_center,center,3*sizeof(int),hipMemcpyDeviceToHost); if (h_center[0]<20)// { h_array[h1]=0; } else { hipMemcpy(h_center,center,2*sizeof(int),hipMemcpyDeviceToHost); ball_position[3*nu+0]=h_center[1]/(h_center[0]); ball_position[3*nu+1]=h_center[2]/(h_center[0]); ball_position[3*nu+2]=h_center[0]; nu=nu+1; } } } hipDeviceSynchronize(); hipFree(d_flag); hipHostFree(h_array); hipFree(center); hipHostFree(h_center); hipFree(array); hipFree(d_seg_table_temp); return (nu); } __global__ void projection_kernel(float *d_cloud, float *coeffi, int *histo_x, int *histo_y, int *histo_z,unsigned char *d_table) { int i = threadIdx.x + blockIdx.x * blockDim.x;//blockIdx.x=1,blockIdx.y=424,blockDim.x=512,blockDim.y=1 int j = threadIdx.y + blockIdx.y * blockDim.y;//threadIdx.x=512,threadIdx.y=1 const int idex = (i) + (j) * 512;//0--512*424-1=217087 __shared__ float coe[12]; if (threadIdx.x<12) coe[threadIdx.x]=coeffi[threadIdx.x]; if (idex<512*424) { // if ((d_cloud[idex*8+0]>-6000)&&(d_cloud[idex*8+0]<6000)&&(d_cloud[idex*8+1]>-4000)&&(d_cloud[idex*8+1]<2000)&&(d_cloud[idex*8+2]>500)&&(d_cloud[idex*8+2]<9000))// // { u_char* rgba = (u_char*) ( d_cloud + idex*8+4 ); // unsigned char re; re=d_table[rgba[0]/4*64*64 + rgba[1]/4*64 + rgba[2]/4]; if(re!=0)// { // float xj=d_cloud[idex*8+0]; // float yj=d_cloud[idex*8+1]; // float zj=d_cloud[idex*8+2];//kinect // double distance_to_plane = coeffi[0]*xj + (coeffi[1])*yj + (coeffi[2])*zj + coeffi[3]*1000; float xr= coe[0]*d_cloud[idex*8+0]+coe[1]*d_cloud[idex*8+1]+coe[2]*d_cloud[idex*8+2]+coe[3]*1000;// float yr= coe[4]*d_cloud[idex*8+0]+coe[5]*d_cloud[idex*8+1]+coe[6]*d_cloud[idex*8+2]+coe[7]*1000; float zr= coe[8]*d_cloud[idex*8+0]+coe[9]*d_cloud[idex*8+1]+coe[10]*d_cloud[idex*8+2]+coe[11]*1000; //XZ int x = (int)((yr+10000)/50); int z = (int)((xr)/50); int w=z*400+x; if ((zr>100)&&(zr<300))//&&(distance_to_plane<1) { atomicAdd(&(histo_z[w]),1); } if ((zr>240)&&(zr<860))//&&(distance_to_plane<1) { atomicAdd(&(histo_x[w]),1); // printf("%f %f ",zj_,xr); } if ((zr>1900)&&(zr<2500))//&&(distance_to_plane<1) { atomicAdd(&(histo_y[w]),1); } } // } } } //__device__ void kernel_up(int &loc, float *temp_pj) //{ //} //__device__ int *location; __global__ void object_label(unsigned char *pj, int *histo_x, int *histo_y, int *histo_z) { int i = threadIdx.x + blockIdx.x * blockDim.x;//blockIdx.x=1,blockIdx.y=424,blockDim.x=512,blockDim.y=1 int j = threadIdx.y + blockIdx.y * blockDim.y;//threadIdx.x=512,threadIdx.y=1 const int idex = (i) + (j) * 400;//0--512*424-1=217087 int histx=histo_x[idex]; int histy=histo_y[idex]; int histz=histo_z[idex]; if (idex<400*240) { // int z_thresh= if (idex<16000)//0-2m { pj[idex]=(histz>90)&&(histx>10)&&(histy<1)? (histx+histz):0; } if ((idex>16000)&&(idex<24000))//2-3m { pj[idex]=(histz>60)&&(histx>2)&&(histy<1)? (histx+histz):0; } if ((idex<28000)&&(idex>24000))//3.0-3.5m { pj[idex]=(histz>30)&&(histx>2)&&(histy<1)? (histx+histz):0; } if ((idex<32000)&&(idex>28000))//3.5-4m { pj[idex]=(histz>20)&&(histx>1)&&(histy<1)? (histx+histz):0; } if ((idex<40000)&&(idex>32000))//4-5m { pj[idex]=(histz>5)&&(histx<50)&&(histy<1)? (histx+histz):0; } if ((idex<44000)&&(idex>40000))//5-5.5m { pj[idex]=(histz>3)&&(histx<30)&&(histy<1)? (histx+histz):0; } if ((idex<56000)&&(idex>44000))//5.5-7m { pj[idex]=(histz>4)&&(histx<20)&&(histy<1)? (histx+histz):0; } // if((idex>=56000)&&(idex<60000)) // { // pj[idex]=0; // } // if ((histo_z[idex]>2)&&(histo_x[idex]>0)&&(histo_y[idex]<2))//z // { // pj[idex]=histo_x[idex]+histo_z[idex]+histo_y[idex]; // } // else // { // pj[idex]=0; // } // pj[idex]=(histo_z[idex]>2)&&(histo_x[idex]>0)&&(histo_y[idex]<10)? (histo_x[idex]+histo_z[idex]+histo_y[idex]) : 0; } } __global__ void Gaussian1D_kernel(unsigned char *pj, double *kernel, size_t size, unsigned char *result) { int i = threadIdx.x + blockIdx.x * blockDim.x;//blockIdx.x=1,blockIdx.y=424,blockDim.x=512,blockDim.y=1 int j = threadIdx.y + blockIdx.y * blockDim.y;//threadIdx.x=512,threadIdx.y=1 const int idex = (i) + (j) * 400;//0--512*424-1=217087 if (idex<400*240) { int kCenter = floor(size/2.0); int nn; //y for (int n = 0; n < size; n++) { nn = size - 1 - n; int posx = i + (n - kCenter); int posy = j; int w=posx*400+posy; if(posx >= 0 && posx < 400) { result[idex] += pj[w]*kernel[nn]; } } } } __global__ void Gaussian1D_kernel_(unsigned char *result, double *kernel, size_t size, unsigned char *pj, int *d_label) { int i = threadIdx.x + blockIdx.x * blockDim.x;//blockIdx.x=1,blockIdx.y=424,blockDim.x=512,blockDim.y=1 int j = threadIdx.y + blockIdx.y * blockDim.y;//threadIdx.x=512,threadIdx.y=1 const int idex = (i) + (j) * 400;//0--512*424-1=217087 if (idex<400*240) { int kCenter = floor(size/2.0); int nn; if ( (j>9) && (j<230)) { for (int n = 0; n < size; n++) { nn = size - 1 - n; int posx = j; int posy = i + (n - kCenter); int w=posy*400+posx; if(posy >= 0 && posy < 240) { pj[idex] += result[w]*kernel[nn]; } } //x// if ((pj[idex]>0)&&(i>0)) { d_label[idex]=i; // atomicAdd(&(ch1[0]),1); } } } } __global__ void compute_array(int *label, int *array1) { int i = threadIdx.x + blockIdx.x * blockDim.x;//blockIdx.x=1,blockIdx.y=424,blockDim.x=512,blockDim.y=1 int j = threadIdx.y + blockIdx.y * blockDim.y;//threadIdx.x=512,threadIdx.y=1 const int idex = (i) + (j) * 400;//0--512*424-1=217087 if (idex<400*240) { if (label[idex]>0) { if ((label[idex]!=array1[0])&&(label[idex]!=array1[1])&&(label[idex]!=array1[2])&&(label[idex]!=array1[3])&&(label[idex]!=array1[4])&&(label[idex]!=array1[5])&&(label[idex]!=array1[6])&&(label[idex]!=array1[7])&&(label[idex]!=array1[8])&&(label[idex]!=array1[9])) { for (int k=0;k<10;k++) { if (array1[k]==0) { atomicExch(&(array1[k]),label[idex]); break; } } } } } } __global__ void computer_center(int *d_label,int *index,int *t_index,int *amount,int *i_amount,int *j_amount) { int i = threadIdx.x + blockIdx.x * blockDim.x;//blockIdx.x=1,blockIdx.y=424,blockDim.x=512,blockDim.y=1 int j = threadIdx.y + blockIdx.y * blockDim.y;//threadIdx.x=512,threadIdx.y=1 const int idex = (i) + (j) * 400;//0--512*424-1=217087 const int sidex = threadIdx.y; __shared__ int region_index[1]; __shared__ int pixel_amount[1]; __shared__ int x_amount[1]; __shared__ int y_amount[1]; __shared__ unsigned char s_label[192]; __shared__ int flag[1]; region_index[0]=0; pixel_amount[0]=0; x_amount[0]=0; y_amount[0]=0; flag[0]=0; if (sidex<192) { s_label[sidex]=d_label[idex]; __syncthreads(); if (s_label[sidex]>0) { region_index[0]=s_label[sidex]; atomicAdd(&(pixel_amount[0]),1); atomicAdd(&(x_amount[0]),i); atomicAdd(&(y_amount[0]),j); flag[0]=1; } } __syncthreads(); index[blockIdx.x]=flag[0];//0 1 0 1 1 0 0 0 1 1 1 t_index[blockIdx.x]=region_index[0]; amount[blockIdx.x]=pixel_amount[0]; i_amount[blockIdx.x]=x_amount[0]; j_amount[blockIdx.x]=y_amount[0]; __syncthreads(); } __global__ void computer_center1(int *output,int *index,int *t_index,int *amount,int *i_amount,int *j_amount,int *output_amount,int *output_x,int *output_y) { int i = threadIdx.x + blockIdx.x * blockDim.x;//blockIdx.x=1,blockIdx.y=424,blockDim.x=512,blockDim.y=1 int j = threadIdx.y + blockIdx.y * blockDim.y;//threadIdx.x=512,threadIdx.y=1 const int idex = (i) + (j) * 400;//0--512*424-1=217087 const int sidex = threadIdx.y; int space=1; // if(blockIdx.x==0) { int tt=0; // printf("%d ",index[sidex]);//index for (int d=1;d<=400;d=d*2) { int temp=index[sidex]; int neighbor=0; if ((sidex-space>0)) neighbor=index[sidex-space]; __syncthreads(); if (sidex<space) continue; else tt=temp+neighbor;//indexneighbor space=space*2; __syncthreads(); index[sidex]=tt; } } __syncthreads(); // //index if (blockIdx.x==0) { // int current=sidex; // int later=current+1; //indexindex[399] if (sidex<399)//only use the even() threads, and the position in index is odd. { if (index[sidex+1]==(index[sidex]+1)) { // printf("even %d %d \n",index[sidex],index[sidex+1]); output[index[sidex]]=t_index[sidex+1]; output_amount[index[sidex]]=amount[sidex+1]; output_x[index[sidex]]=i_amount[sidex+1]; output_y[index[sidex]]=j_amount[sidex+1]; } } } __syncthreads(); output[99]=index[399];//index } int project2D(float *d_cloud, unsigned char *pj, float *aa,float *obstacle_position,unsigned char *d_table,int *histo_x,int *histo_y,int *histo_z,double *d_kernel,size_t kernel_size,unsigned char *result_, int *label, int *h_label) { int *d_index,*d_t_index,*d_amount,*dx_amount,*dy_amount,*d_output; int *output_amount,*output_x, *output_y; hipMalloc((void **)&d_index,400*sizeof(int)); hipMalloc((void **)&d_t_index,400*sizeof(int)); hipMalloc((void **)&d_amount,400*sizeof(int)); hipMalloc((void **)&dx_amount,400*sizeof(int)); hipMalloc((void **)&dy_amount,400*sizeof(int)); hipMalloc((void **)&d_output,100*sizeof(int)); hipMalloc((void **)&output_amount,100*sizeof(int)); hipMalloc((void **)&output_x,100*sizeof(int)); hipMalloc((void **)&output_y,100*sizeof(int)); hipMemset(histo_x,0,400*240*sizeof(int)); hipMemset(histo_y,0,400*240*sizeof(int)); hipMemset(histo_z,0,400*240*sizeof(int)); hipMemset(result_,0,400*240*sizeof(unsigned char)); hipMemset(label,0,400*240*sizeof(int)); hipMemset(h_label,0,400*240*sizeof(int)); hipMemset(d_amount,0,400*sizeof(int)); hipMemset(dx_amount,0,400*sizeof(int)); hipMemset(dy_amount,0,400*sizeof(int)); hipMemset(d_index,0,400*sizeof(int)); hipMemset(d_t_index,0,400*sizeof(int)); hipMemset(d_output,0,100*sizeof(int)); hipMemset(output_amount,0,100*sizeof(int)); hipMemset(output_x,0,100*sizeof(int)); hipMemset(output_y,0,100*sizeof(int)); hipLaunchKernelGGL(( projection_kernel), dim3(Grid), dim3(threadsPerBlock) , 0, 0, d_cloud,aa,histo_x,histo_y,histo_z,d_table); hipDeviceSynchronize(); hipLaunchKernelGGL(( object_label), dim3(Grid_), dim3(threadsPerBlock) , 0, 0, pj, histo_x, histo_y, histo_z); hipDeviceSynchronize(); hipLaunchKernelGGL(( Gaussian1D_kernel), dim3(Grid_), dim3(threadsPerBlock) , 0, 0, pj, d_kernel, kernel_size, result_); hipLaunchKernelGGL(( Gaussian1D_kernel_), dim3(Grid_), dim3(threadsPerBlock) , 0, 0, result_, d_kernel, kernel_size, pj, label); hipDeviceSynchronize(); for (int nh=0;nh<3;nh++) { hipLaunchKernelGGL(( eight_DLS_last_obstacle), dim3(Grid_), dim3(threadsPerBlock) , 0, 0, pj,label,400,240); } hipDeviceSynchronize(); int *output=new int[100]; int *amount=new int[100]; int *x_amount=new int[100]; int *y_amount=new int[100]; hipLaunchKernelGGL(( computer_center), dim3(Grid1), dim3(threadsPerBlock1) , 0, 0, label,d_index,d_t_index,d_amount,dx_amount,dy_amount); hipDeviceSynchronize(); hipLaunchKernelGGL(( computer_center1), dim3(Grid1), dim3(threadsPerBlock1) , 0, 0, d_output,d_index,d_t_index,d_amount,dx_amount,dy_amount,output_amount,output_x,output_y); hipDeviceSynchronize(); hipMemcpy(output,d_output,100*sizeof(int),hipMemcpyDeviceToHost); hipMemcpy(amount,output_amount,100*sizeof(int),hipMemcpyDeviceToHost); hipMemcpy(x_amount,output_x,100*sizeof(int),hipMemcpyDeviceToHost); hipMemcpy(y_amount,output_y,100*sizeof(int),hipMemcpyDeviceToHost); int nu=0; vector<int> center_index, center_amount, center_x, center_y; if (output[99]>0) { center_index.push_back(output[0]); center_amount.push_back(amount[0]); center_x.push_back(x_amount[0]); center_y.push_back(y_amount[0]); for (int g=1;g<output[99];g++) { int numb=0; for (int l=0;l<center_index.size();l++) { if (output[g]==center_index.at(l)) { center_amount.at(l)+=amount[g]; center_x.at(l)+=x_amount[g]; center_y.at(l)+=y_amount[g]; break; } else { numb++; continue; } } if (numb==center_index.size()) { center_index.push_back(output[g]); center_amount.push_back(amount[g]); center_x.push_back(x_amount[g]); center_y.push_back(y_amount[g]); } } for (int hq=0;hq<center_index.size();hq++) { if (center_amount.at(hq)==0) continue; float x = center_x.at(hq)/(center_amount.at(hq))*50+(-10000); float y = center_y.at(hq)/(center_amount.at(hq))*50; float value=-0.02181*y+178.7; if (center_amount.at(hq)>4 && center_amount.at(hq)>0.25*value) { obstacle_position[2*nu+0]=x; obstacle_position[2*nu+1]=y; nu++; } } } hipFree(d_index); hipFree(d_t_index); hipFree(d_amount); hipFree(dx_amount); hipFree(dy_amount); hipFree(d_output); hipFree(output_amount); hipFree(output_x); hipFree(output_y); free(output); free(amount); free(x_amount); free(y_amount); return (nu); }
5014aefe6239b9302ed81b7f79e0d2063cb4bd19.cu
#include <cuda.h> #include <cuda_runtime.h> #include "device_functions.h" #include "produce_pc_kernel.h" #include <stdio.h> #include <iostream> #include <fstream> #include <cstdlib> #ifndef __CUDACC__ #define __CUDACC__ #endif #include "device_launch_parameters.h" #include <cuda_runtime_api.h> #include <string.h> // memcpy #include <cstdlib> #include <signal.h> #include <time.h> #include <math.h> #include <thrust/device_ptr.h> #include <algorithm> #include <vector> #include <iostream> #include <thrust/sort.h> #include <thrust/device_vector.h> //#include "Protonect.h" //#include <af/cuda.h> //#include <arrayfire.h> #include <opencv2/opencv.hpp> #include <LaterMethods.h> #define PI 3.1415 using namespace std; texture<int, 1, cudaReadModeElementType> texref0, texref2; texture<float, 1, cudaReadModeElementType> texref1; const size_t imageSize1 = 4 * 424 * 512; dim3 threadsPerBlock(32, 32); dim3 threadsPerBlock1(1, 400); dim3 Grid1((400+threadsPerBlock1.x-1)/threadsPerBlock1.x, (400+threadsPerBlock1.y-1)/threadsPerBlock1.y); dim3 Grid_((400+threadsPerBlock.x-1)/threadsPerBlock.x, (240+threadsPerBlock.y-1)/threadsPerBlock.y); dim3 Grid((512+threadsPerBlock.x-1)/threadsPerBlock.x, (424+threadsPerBlock.y-1)/threadsPerBlock.y); struct is_zero { __host__ __device__ bool operator()(const int x) { return (x==0); } }; __global__ void peng_zhang(unsigned char * pj,unsigned char *temp_pj,size_t width, size_t height) { int i = threadIdx.x + blockIdx.x * blockDim.x;//0--511 int j = threadIdx.y + blockIdx.y * blockDim.y;//0-423 const int idex = (i) + (j) * width;//0--512*424-1=217087 int neighbors[8] = {(i-1) + (j-1) * width, (i) + (j-1) * width,(i+1) + (j-1) * width,(i-1) + (j) * width, (i+1) + (j) * width,(i-1) + (j+1) * width,(i) + (j+1) * width,(i+1) + (j+1) * width}; if ( j < height ) { if ( i < width) { if (temp_pj[idex]==0)//目标像素 { if (j==0)//上边界 { if(i==0) { if ((temp_pj[neighbors[4]]>0)||(temp_pj[neighbors[6]]>0)||(temp_pj[neighbors[7]]>0))//周围有不是黄色像素的 { pj[idex]=1; } } if(i==width-1) { if ((temp_pj[neighbors[3]]>0)||(temp_pj[neighbors[5]]>0)||(temp_pj[neighbors[6]]>0))//周围有不是黄色像素的 { pj[idex]=1; } } if((i>0) && (i<width-1)) { int num_=0; for (int k=3;k<8;k++) { if (temp_pj[neighbors[k]]>0) { num_++; if (num_>2) { pj[idex]=1; break; } } } } } if (j==(height-1))//上边界 { if(i==0) { if ((temp_pj[neighbors[1]]>0)||(temp_pj[neighbors[2]]>0)||(temp_pj[neighbors[4]]>0))//周围有不是黄色像素的 { pj[idex]=1; } } if(i==width-1) { if ((temp_pj[neighbors[0]]>0)||(temp_pj[neighbors[1]]>0)||(temp_pj[neighbors[3]]>0))//周围有不是黄色像素的 { pj[idex]=1; } } if((i>0) && (i<width-1)) { int num_=0; for (int k=0;k<5;k++) { if (temp_pj[neighbors[k]]>0) { num_++; if (num_>2) { pj[idex]=1; break; } } } } } if ((j>0)&&(j<(height-1)))//上边界 { if(i==0) { if ((temp_pj[neighbors[1]]>0)||(temp_pj[neighbors[2]]>0)||(temp_pj[neighbors[4]]>0)||(temp_pj[neighbors[6]]>0)||(temp_pj[neighbors[7]]>0))//周围有不是黄色像素的 { pj[idex]=1; } } if(i==width-1) { if ((temp_pj[neighbors[0]]>0)||(temp_pj[neighbors[1]]>0)||(temp_pj[neighbors[3]]>0)||(temp_pj[neighbors[5]]>0)||(temp_pj[neighbors[6]]>0))//周围有不是黄色像素的 { pj[idex]=1; } } if((i>0) && (i<width-1)) { int num_=0; for (int k=0;k<8;k++) { if (temp_pj[neighbors[k]]>0) { num_++; if (num_>2) { pj[idex]=1; break; } } } } } } }//for i=512 }//for j=424 } __global__ void fu_shi(unsigned char * pj, unsigned char * temp_pj, size_t width, size_t height) { int i = threadIdx.x + blockIdx.x * blockDim.x;//0--511 int j = threadIdx.y + blockIdx.y * blockDim.y;//0-423 const int idex = (i) + (j) * width;//0--512*424-1=217087 int neighbors[8] = {(i-1) + (j-1) * width, (i) + (j-1) * width,(i+1) + (j-1) * width,(i-1) + (j) * width, (i+1) + (j) * width,(i-1) + (j+1) * width,(i) + (j+1) * width,(i+1) + (j+1) * width}; if ( j < height ) { if ( i < width) { if (temp_pj[idex]>0)//目标像素 { if (j==0)//上边界 { if(i==0) { if ((temp_pj[neighbors[4]]==0)||(temp_pj[neighbors[6]]==0)||(temp_pj[neighbors[7]]==0))//周围有不是黄色像素的 { pj[idex]=0; } } if(i==width-1) { if ((temp_pj[neighbors[3]]==0)||(temp_pj[neighbors[5]]==0)||(temp_pj[neighbors[6]]==0))//周围有不是黄色像素的 { pj[idex]=0; } } if((i>0) && (i<width-1)) { int num_=0; for (int k=3;k<8;k++) { if (temp_pj[neighbors[k]]==0) { num_++; if (num_>2) { pj[idex]=0; break; } } } } } if (j==(height-1))//上边界 { if(i==0) { if ((temp_pj[neighbors[1]]==0)||(temp_pj[neighbors[2]]==0)||(temp_pj[neighbors[4]]==0))//周围有不是黄色像素的 { pj[idex]=0; } } if(i==width-1) { if ((temp_pj[neighbors[0]]==0)||(temp_pj[neighbors[1]]==0)||(temp_pj[neighbors[3]]==0))//周围有不是黄色像素的 { pj[idex]=0; } } if((i>0) && (i<width-1)) { int num_=0; for (int k=0;k<5;k++) { if (temp_pj[neighbors[k]]==0) { num_++; if (num_>2) { pj[idex]=0; break; } } } } } if ((j>0)&&(j<(height-1)))//上边界 { if(i==0) { if ((temp_pj[neighbors[1]]==0)||(temp_pj[neighbors[2]]==0)||(temp_pj[neighbors[4]]==0)||(temp_pj[neighbors[6]]==0)||(temp_pj[neighbors[7]]==0))//周围有不是黄色像素的 { pj[idex]=1; } } if(i==width-1) { if ((temp_pj[neighbors[0]]==0)||(temp_pj[neighbors[1]]==0)||(temp_pj[neighbors[3]]==0)||(temp_pj[neighbors[5]]==0)||(temp_pj[neighbors[6]]==0))//周围有不是黄色像素的 { pj[idex]=0; } } if((i>0) && (i<width-1)) { int num_=0; for (int k=0;k<8;k++) { if (temp_pj[neighbors[k]]==0) { num_++; if (num_>2) { pj[idex]=0; break; } } } } } } }//for i=512 }//for j=424 //腐蚀代码 } __global__ void kernel3(const float* d_dis_depth, const unsigned int* d_raw_rgb, const float* cc, float *_cloud,unsigned char *d_table, unsigned char *seg_result,int *n)//, const float* cc, float *_cloud { int c = threadIdx.x + blockIdx.x * blockDim.x;//0--511 int r = threadIdx.y + blockIdx.y * blockDim.y;//0-423 const int index = (c) + (r) * 512;//0--512*424-1=217087 int depth_to_c_off; const float &cx = cc[0]; const float &cy = cc[1]; const float &fx = cc[2]; const float &fy = cc[3]; if ( r < 424 ) { if ( c < 512) { const int &id = tex1Dfetch(texref0,index); const float z = id <0 ? 0.02f : d_dis_depth[id]; const int c_off = (z>0)*((tex1Dfetch(texref1,index) + __fdividef(52.0f,z)) * 1081.372070f + 960.0f + tex1Dfetch(texref2,index) * 1920); depth_to_c_off = ((z>0)&&(id>=0)&&(c_off>=0)&&(c_off<1920 * 1080)) * (c_off+1) + (-1); // const float &depth_v = d_dis_depth[id] * (id>=0); // _cloud[index*8+0] = (c-cx) * (fx) * depth_v; // _cloud[index*8+1] = (r-cy) * (fy) * depth_v; // _cloud[index*8+2] = depth_v; const float &depth_v = (id>=0) * d_dis_depth[id]; _cloud[index*8+0] = ((depth_v>12000.f)||(depth_v<=1.f))? 100000.0f : ((c-cx) * (fx) * depth_v); _cloud[index*8+1] = ((depth_v>12000.f)||(depth_v<=1.f))? 100000.0f : ((r-cy) * (fy) * depth_v); _cloud[index*8+2] = ((depth_v>12000.f)||(depth_v<=1.f))? 100000.0f : depth_v; const int &rgb_val = depth_to_c_off < 0 ? 0 : d_raw_rgb[depth_to_c_off]; u_char* rgba = (u_char*) ( _cloud + index*8+4 ); const u_char* bgra = (const u_char*) &rgb_val; rgba[0] = bgra[2]; rgba[1] = bgra[1]; rgba[2] = bgra[0]; //颜色分割 seg_result[index]=d_table[rgba[0]/4*64*64 + rgba[1]/4*64 + rgba[2]/4]; if(seg_result[index]==0)//黄色像素 { seg_result[index]=1; atomicAdd(&(n[0]),1); } else seg_result[index]=0; } } __syncthreads(); } void registration_kernel(const float* d_dis_depth, const unsigned int* d_raw_rgb, const int* d_map_dist, const float* d_map_x, const int* d_map_yi, const float* cc, float *_cloud, unsigned char *d_table, unsigned char *seg_result, int *num_yellow)//, const float* cc, float *_cloud { cudaBindTexture(0,texref0,d_map_dist,imageSize1); cudaBindTexture(0,texref1,d_map_x,imageSize1); cudaBindTexture(0,texref2,d_map_yi,imageSize1); int *d_n; cudaMalloc((void**)&d_n,sizeof(int)); cudaMemset(d_n,0,sizeof(int)); kernel3<<< Grid, threadsPerBlock >>>(d_dis_depth, d_raw_rgb, cc, _cloud, d_table, seg_result, d_n); cudaUnbindTexture(texref0); cudaUnbindTexture(texref1); cudaUnbindTexture(texref2); cudaMemcpy(num_yellow,d_n,sizeof(int),cudaMemcpyDeviceToHost); cudaFree(d_n); } //float badpt = std::numeric_limits<float>::quiet_NaN (); __global__ void cluster_center_kernel(unsigned char *d_seg_t,unsigned char * segment_table) { int i = threadIdx.x + blockIdx.x * blockDim.x;//0--511 int j = threadIdx.y + blockIdx.y * blockDim.y;//0-423 const int idex = (i) + (j) * 512;//0--512*424-1=217087 int neighbors[8] = {(i-1) + (j-1) * 512, (i) + (j-1) * 512,(i+1) + (j-1) * 512,(i-1) + (j) * 512, (i+1) + (j) * 512,(i-1) + (j+1) * 512,(i) + (j+1) * 512,(i+1) + (j+1) * 512}; if ( j < 424 ) { if ( i < 512) { //膨胀代码 if (segment_table[idex]==0)//黄色像素 { for (int k=0; k<8; k++) { if ((neighbors[k]>0) && (neighbors[k]<512*424) && (segment_table[neighbors[k]]==1))//周围有不是黄色像素的 { d_seg_t[idex]=1; break; } } } }//for i=512 }//for j=424 } __global__ void cluster_center_kernel_ex(unsigned char * d_seg_t,unsigned char * segment_table) { int i = threadIdx.x + blockIdx.x * blockDim.x;//0--511 int j = threadIdx.y + blockIdx.y * blockDim.y;//0-423 const int idex = (i) + (j) * 512;//0--512*424-1=217087 int neighbors[8] = {(i-1) + (j-1) * 512, (i) + (j-1) * 512,(i+1) + (j-1) * 512,(i-1) + (j) * 512, (i+1) + (j) * 512,(i-1) + (j+1) * 512,(i) + (j+1) * 512,(i+1) + (j+1) * 512}; if ( j < 424 ) { if ( i < 512) { //腐蚀代码 if (segment_table[idex]==1)//黄色像素 { for (int k=0; k<8; k++) { if ((neighbors[k]>0) && (neighbors[k]<512*424) && (segment_table[neighbors[k]]==0))//周围有不是黄色像素的 { d_seg_t[idex]=0; break; } } } }//for i=512 }//for j=424 } __global__ void label_initialize(unsigned char * segment_table, int *d_label, size_t width, size_t height) { int i = threadIdx.x + blockIdx.x * blockDim.x;//blockIdx.x=1,blockIdx.y=424,blockDim.x=512,blockDim.y=1 int j = threadIdx.y + blockIdx.y * blockDim.y;//threadIdx.x=512,threadIdx.y=1 int idex = (i) + (j) * width;//0--512*424-1=217087 if (idex<width*height) { if (segment_table[idex]>0 && i>0) { d_label[idex]=i; // atomicAdd(&(ch1[0]),1); } else d_label[idex]=0; __syncthreads(); } } __global__ void eight_DLS(unsigned char * segment_table, int *d_label, int *flag)//flag初始值是0, { int i = threadIdx.x + blockIdx.x * blockDim.x;//blockIdx.x=1,blockIdx.y=424,blockDim.x=512,blockDim.y=1 int j = threadIdx.y + blockIdx.y * blockDim.y;//threadIdx.x=512,threadIdx.y=1 const int idex = (i) + (j) * 512;//0--512*424-1=217087 int neighbors[8] = {(i-1) + (j-1) * 512, (i) + (j-1) * 512,(i+1) + (j-1) * 512,(i-1) + (j) * 512, (i+1) + (j) * 512,(i-1) + (j+1) * 512,(i) + (j+1) * 512,(i+1) + (j+1) * 512}; int no_neigbor=0; if (i<512) { if (j<424) { if (segment_table[idex]>0)//object pixel { // atomicAdd(&(ch[0]),1); int mini=d_label[idex]; for (int n=0;n<8;n++)//对8邻域进行搜索 { while ((neighbors[n]>0)&&(neighbors[n]<512*424)&&(segment_table[neighbors[n]]!=0))//在n方向没有遇到0 { //处理某一个方向的pixel if(d_label[neighbors[n]]<mini) { mini=d_label[neighbors[n]]; } switch (n) { case 0://左上角 neighbors[n]=neighbors[n]-512-1; break; case 1://正上方 neighbors[n]=neighbors[n]-512; break; case 2://右上角 neighbors[n]=neighbors[n]-512+1; break; case 3://左上角 neighbors[n]=neighbors[n]-1; break; case 4://左上角 neighbors[n]=neighbors[n]+1; break; case 5://左上角 neighbors[n]=neighbors[n]+512-1; break; case 6://左上角 neighbors[n]=neighbors[n]+512; break; case 7://左上角 neighbors[n]=neighbors[n]+512+1; break; default: break; } } } if (mini<d_label[idex]) { d_label[idex]=mini; flag[0]=1; } } // __syncthreads(); } } } __global__ void eight_DLS_last(unsigned char * segment_table, int *d_label, int *array, int *flag, size_t nnn) { int i = threadIdx.x + blockIdx.x * blockDim.x;//blockIdx.x=1,blockIdx.y=424,blockDim.x=512,blockDim.y=1 int j = threadIdx.y + blockIdx.y * blockDim.y;//threadIdx.x=512,threadIdx.y=1 const int idex = (i) + (j) * 512;//0--512*424-1=217087 int s_idex=threadIdx.x +blockDim.x*threadIdx.y; int neighbors[8] = {(i-1) + (j-1) * 512, (i) + (j-1) * 512,(i+1) + (j-1) * 512,(i-1) + (j) * 512, (i+1) + (j) * 512,(i-1) + (j+1) * 512,(i) + (j+1) * 512,(i+1) + (j+1) * 512}; // if (idex==0) // printf("aaaaaa %d \n",flag[0]); if (i<512) { if (j<424) { if ((segment_table[idex]>0) && (d_label[idex]>0))//object pixel { // if ((nnn>2)&&(d_label[d_label[idex]]==d_label[idex])) // { // } // else // { int mini=d_label[idex]; for (int n=0;n<8;n++)//对8邻域进行搜索 { while ((neighbors[n]>0)&&(neighbors[n]<512*424)&&(segment_table[neighbors[n]]!=0))//在n方向没有遇到0 { //处理某一个方向的pixel if((d_label[neighbors[n]]<mini)&&(d_label[neighbors[n]]>0)) { mini=d_label[neighbors[n]]; } switch (n) { case 0://左上角 neighbors[n]=neighbors[n]-512-1; break; case 1://正上方 neighbors[n]=neighbors[n]-512; break; case 2://右上角 neighbors[n]=neighbors[n]-512+1; break; case 3://左上角 neighbors[n]=neighbors[n]-1; break; case 4://左上角 neighbors[n]=neighbors[n]+1; break; case 5://左上角 neighbors[n]=neighbors[n]+512-1; break; case 6://左上角 neighbors[n]=neighbors[n]+512; break; case 7://左上角 neighbors[n]=neighbors[n]+512+1; break; default: break; } } } if (mini<d_label[idex]) { d_label[idex]=mini; // atomicExch(&(flag[0]),1); } // }//if smalllest CCL }//if object pixel __syncthreads(); __shared__ bool lockx1; __threadfence(); if(s_idex==0) { unsigned int lockiii1=atomicAdd(&(array[5]),1); lockx1=(array[5]==224); } __syncthreads(); if(lockx1)//保证所有的块均计算完了 { } if (nnn==2) { if (d_label[idex]>0) { if ((d_label[idex]!=array[0])&&(d_label[idex]!=array[1])&&(d_label[idex]!=array[2])&&(d_label[idex]!=array[3])&&(d_label[idex]!=array[4])) { for (int k=0;k<5;k++) { if (array[k]==0) { atomicExch(&(array[k]),d_label[idex]); break; } } } } } }//j=424 }//i=512 }//函数域 __global__ void eight_DLS_last_obstacle(unsigned char * segment_table, int *d_label, size_t width, size_t height) { int i = threadIdx.x + blockIdx.x * blockDim.x;//blockIdx.x=1,blockIdx.y=424,blockDim.x=512,blockDim.y=1 int j = threadIdx.y + blockIdx.y * blockDim.y;//threadIdx.x=512,threadIdx.y=1 const int idex = (i) + (j) * width;//0--512*424-1=217087 int s_idex=threadIdx.x +blockDim.x*threadIdx.y; int neighbors[8] = {(i-1) + (j-1) * width, (i) + (j-1) * width,(i+1) + (j-1) * width,(i-1) + (j) * width, (i+1) + (j) * width,(i-1) + (j+1) * width,(i) + (j+1) * width,(i+1) + (j+1) * width}; // if (idex==0) // printf("aaaaaa %d \n",flag[0]); if (i<width) { if (j<height) { if ((segment_table[idex]>0)&&(d_label[idex]>0))//object pixel { // if ((nnn>2)&&(d_label[d_label[idex]]==d_label[idex])) // { // } // else // { int mini=d_label[idex]; for (int n=0;n<8;n++)//对8邻域进行搜索 { while ((neighbors[n]>0)&&(neighbors[n]<width*height)&&(segment_table[neighbors[n]]!=0))//在n方向没有遇到0 { //处理某一个方向的pixel if((d_label[neighbors[n]]<mini)&&(d_label[neighbors[n]]>0)) { mini=d_label[neighbors[n]]; } switch (n) { case 0://左上角 neighbors[n]=neighbors[n]-width-1; break; case 1://正上方 neighbors[n]=neighbors[n]-width; break; case 2://右上角 neighbors[n]=neighbors[n]-width+1; break; case 3://左上角 neighbors[n]=neighbors[n]-1; break; case 4://左上角 neighbors[n]=neighbors[n]+1; break; case 5://左上角 neighbors[n]=neighbors[n]+width-1; break; case 6://左上角 neighbors[n]=neighbors[n]+width; break; case 7://左上角 neighbors[n]=neighbors[n]+width+1; break; default: break; } } } if (mini<d_label[idex]) { d_label[idex]=mini; // flag[0]=1; } // }//if smalllest CCL }//if object pixel }//j=424 }//i=512 }//函数域 __global__ void count(int *d_label, int numb, int *center, size_t width, size_t height) { int i = threadIdx.x + blockIdx.x * blockDim.x;//blockIdx.x=1,blockIdx.y=424,blockDim.x=512,blockDim.y=1 int j = threadIdx.y + blockIdx.y * blockDim.y;//threadIdx.x=512,threadIdx.y=1 const int idex = (i) + (j) * width;//0--512*424-1=217087 if (idex<width*height) { // if (idex==0) // printf("nnnnnnnnnnnnnnnnnn1111 %d %d\n\n",n[0],numb); if (d_label[idex]==numb) { atomicAdd(&(center[0]),1); atomicAdd(&(center[1]),i); atomicAdd(&(center[2]),j); } __syncthreads(); } } double gettime() { double tseconds=0.0; struct timeval mytime; gettimeofday(&mytime,(struct timezone *)0); tseconds=(double)(mytime.tv_sec+mytime.tv_usec*1.0e-6); return tseconds; } int CCL(unsigned char *d_seg_table, int *d_label, float *ball_position) { double ti=gettime(); unsigned char *d_seg_table_temp; cudaMalloc((void **)&d_seg_table_temp,512*424*sizeof(unsigned char)); for (int j=0;j<3;j++) {//corrosion cudaMemcpy(d_seg_table_temp,d_seg_table,512*424*sizeof(unsigned char),cudaMemcpyDeviceToDevice); fu_shi<<< Grid, threadsPerBlock >>>(d_seg_table,d_seg_table_temp,512,424); } for (int i=0;i<4;i++) {//dilate cudaMemcpy(d_seg_table_temp,d_seg_table,512*424*sizeof(unsigned char),cudaMemcpyDeviceToDevice); peng_zhang<<< Grid, threadsPerBlock >>>(d_seg_table,d_seg_table_temp,512,424); } cudaDeviceSynchronize(); label_initialize<<< Grid, threadsPerBlock >>>(d_seg_table,d_label,512,424);// int *array; cudaMalloc((void **)&array,6*sizeof(int)); cudaMemset(array,0,6*sizeof(int)); int *h_array; cudaHostAlloc((void **)&h_array,5*sizeof(int),cudaHostAllocDefault); int *d_flag; cudaMalloc((void **)&d_flag,1*sizeof(int)); int h_flag=1; for (size_t nnn=0;nnn<3;nnn++) { eight_DLS_last<<< Grid, threadsPerBlock >>>(d_seg_table,d_label,array,d_flag,nnn); } cudaDeviceSynchronize(); cudaMemcpy(h_array,array,5*sizeof(int),cudaMemcpyDeviceToHost);//如果change=0,说明没有任何改变,则退出循环 int *h_center; cudaHostAlloc((void **)&h_center,3*sizeof(int),cudaHostAllocDefault); int *center; cudaMalloc((void **)&center,3*sizeof(int)); int nu=0; for (int h1=0;h1<5;h1++) { if (h_array[h1]>0) { cudaMemset(center,0,3*sizeof(int)); count<<< Grid, threadsPerBlock >>>(d_label,h_array[h1],center,512,424); cudaDeviceSynchronize(); cudaMemcpy(h_center,center,3*sizeof(int),cudaMemcpyDeviceToHost); if (h_center[0]<20)//如果像素点很少的话忽略这个区域 { h_array[h1]=0; } else { cudaMemcpy(h_center,center,2*sizeof(int),cudaMemcpyDeviceToHost); ball_position[3*nu+0]=h_center[1]/(h_center[0]); ball_position[3*nu+1]=h_center[2]/(h_center[0]); ball_position[3*nu+2]=h_center[0]; nu=nu+1; } } } cudaDeviceSynchronize(); cudaFree(d_flag); cudaFreeHost(h_array); cudaFree(center); cudaFreeHost(h_center); cudaFree(array); cudaFree(d_seg_table_temp); return (nu); } __global__ void projection_kernel(float *d_cloud, float *coeffi, int *histo_x, int *histo_y, int *histo_z,unsigned char *d_table) { int i = threadIdx.x + blockIdx.x * blockDim.x;//blockIdx.x=1,blockIdx.y=424,blockDim.x=512,blockDim.y=1 int j = threadIdx.y + blockIdx.y * blockDim.y;//threadIdx.x=512,threadIdx.y=1 const int idex = (i) + (j) * 512;//0--512*424-1=217087 __shared__ float coe[12]; if (threadIdx.x<12) coe[threadIdx.x]=coeffi[threadIdx.x]; if (idex<512*424) { // if ((d_cloud[idex*8+0]>-6000)&&(d_cloud[idex*8+0]<6000)&&(d_cloud[idex*8+1]>-4000)&&(d_cloud[idex*8+1]<2000)&&(d_cloud[idex*8+2]>500)&&(d_cloud[idex*8+2]<9000))//排除无效点干扰 // { u_char* rgba = (u_char*) ( d_cloud + idex*8+4 ); //颜色分割 unsigned char re; re=d_table[rgba[0]/4*64*64 + rgba[1]/4*64 + rgba[2]/4]; if(re!=0)//黄色像素 { // float xj=d_cloud[idex*8+0]; // float yj=d_cloud[idex*8+1]; // float zj=d_cloud[idex*8+2];//kinect坐标系 // double distance_to_plane = coeffi[0]*xj + (coeffi[1])*yj + (coeffi[2])*zj + coeffi[3]*1000; float xr= coe[0]*d_cloud[idex*8+0]+coe[1]*d_cloud[idex*8+1]+coe[2]*d_cloud[idex*8+2]+coe[3]*1000;//直接转换到机器人坐标系 float yr= coe[4]*d_cloud[idex*8+0]+coe[5]*d_cloud[idex*8+1]+coe[6]*d_cloud[idex*8+2]+coe[7]*1000; float zr= coe[8]*d_cloud[idex*8+0]+coe[9]*d_cloud[idex*8+1]+coe[10]*d_cloud[idex*8+2]+coe[11]*1000; //因为相机是向下倾斜的,为了垂直投影到地平面,X方向没差别,Z需要由相机的光轴方向变换到水平方向 int x = (int)((yr+10000)/50); int z = (int)((xr)/50); int w=z*400+x; if ((zr>100)&&(zr<300))//&&(distance_to_plane<1) { atomicAdd(&(histo_z[w]),1); } if ((zr>240)&&(zr<860))//&&(distance_to_plane<1) { atomicAdd(&(histo_x[w]),1); // printf("%f %f ",zj_,xr); } if ((zr>1900)&&(zr<2500))//&&(distance_to_plane<1) { atomicAdd(&(histo_y[w]),1); } } // } } } //__device__ void kernel_up(int &loc, float *temp_pj) //{ //} //__device__ int *location; __global__ void object_label(unsigned char *pj, int *histo_x, int *histo_y, int *histo_z) { int i = threadIdx.x + blockIdx.x * blockDim.x;//blockIdx.x=1,blockIdx.y=424,blockDim.x=512,blockDim.y=1 int j = threadIdx.y + blockIdx.y * blockDim.y;//threadIdx.x=512,threadIdx.y=1 const int idex = (i) + (j) * 400;//0--512*424-1=217087 int histx=histo_x[idex]; int histy=histo_y[idex]; int histz=histo_z[idex]; if (idex<400*240) { // int z_thresh= if (idex<16000)//0-2m { pj[idex]=(histz>90)&&(histx>10)&&(histy<1)? (histx+histz):0; } if ((idex>16000)&&(idex<24000))//2-3m { pj[idex]=(histz>60)&&(histx>2)&&(histy<1)? (histx+histz):0; } if ((idex<28000)&&(idex>24000))//3.0-3.5m { pj[idex]=(histz>30)&&(histx>2)&&(histy<1)? (histx+histz):0; } if ((idex<32000)&&(idex>28000))//3.5-4m { pj[idex]=(histz>20)&&(histx>1)&&(histy<1)? (histx+histz):0; } if ((idex<40000)&&(idex>32000))//4-5m { pj[idex]=(histz>5)&&(histx<50)&&(histy<1)? (histx+histz):0; } if ((idex<44000)&&(idex>40000))//5-5.5m { pj[idex]=(histz>3)&&(histx<30)&&(histy<1)? (histx+histz):0; } if ((idex<56000)&&(idex>44000))//5.5-7m { pj[idex]=(histz>4)&&(histx<20)&&(histy<1)? (histx+histz):0; } // if((idex>=56000)&&(idex<60000)) // { // pj[idex]=0; // } // if ((histo_z[idex]>2)&&(histo_x[idex]>0)&&(histo_y[idex]<2))//之前判断z大于多少的貌似也可以 // { // pj[idex]=histo_x[idex]+histo_z[idex]+histo_y[idex]; // } // else // { // pj[idex]=0; // } // pj[idex]=(histo_z[idex]>2)&&(histo_x[idex]>0)&&(histo_y[idex]<10)? (histo_x[idex]+histo_z[idex]+histo_y[idex]) : 0; } } __global__ void Gaussian1D_kernel(unsigned char *pj, double *kernel, size_t size, unsigned char *result) { int i = threadIdx.x + blockIdx.x * blockDim.x;//blockIdx.x=1,blockIdx.y=424,blockDim.x=512,blockDim.y=1 int j = threadIdx.y + blockIdx.y * blockDim.y;//threadIdx.x=512,threadIdx.y=1 const int idex = (i) + (j) * 400;//0--512*424-1=217087 if (idex<400*240) { int kCenter = floor(size/2.0); int nn; //y 先处理 for (int n = 0; n < size; n++) { nn = size - 1 - n; int posx = i + (n - kCenter); int posy = j; int w=posx*400+posy; if(posx >= 0 && posx < 400) { result[idex] += pj[w]*kernel[nn]; } } } } __global__ void Gaussian1D_kernel_(unsigned char *result, double *kernel, size_t size, unsigned char *pj, int *d_label) { int i = threadIdx.x + blockIdx.x * blockDim.x;//blockIdx.x=1,blockIdx.y=424,blockDim.x=512,blockDim.y=1 int j = threadIdx.y + blockIdx.y * blockDim.y;//threadIdx.x=512,threadIdx.y=1 const int idex = (i) + (j) * 400;//0--512*424-1=217087 if (idex<400*240) { int kCenter = floor(size/2.0); int nn; if ( (j>9) && (j<230)) { for (int n = 0; n < size; n++) { nn = size - 1 - n; int posx = j; int posy = i + (n - kCenter); int w=posy*400+posx; if(posy >= 0 && posy < 240) { pj[idex] += result[w]*kernel[nn]; } } //x//后处理 if ((pj[idex]>0)&&(i>0)) { d_label[idex]=i; // atomicAdd(&(ch1[0]),1); } } } } __global__ void compute_array(int *label, int *array1) { int i = threadIdx.x + blockIdx.x * blockDim.x;//blockIdx.x=1,blockIdx.y=424,blockDim.x=512,blockDim.y=1 int j = threadIdx.y + blockIdx.y * blockDim.y;//threadIdx.x=512,threadIdx.y=1 const int idex = (i) + (j) * 400;//0--512*424-1=217087 if (idex<400*240) { if (label[idex]>0) { if ((label[idex]!=array1[0])&&(label[idex]!=array1[1])&&(label[idex]!=array1[2])&&(label[idex]!=array1[3])&&(label[idex]!=array1[4])&&(label[idex]!=array1[5])&&(label[idex]!=array1[6])&&(label[idex]!=array1[7])&&(label[idex]!=array1[8])&&(label[idex]!=array1[9])) { for (int k=0;k<10;k++) { if (array1[k]==0) { atomicExch(&(array1[k]),label[idex]); break; } } } } } } __global__ void computer_center(int *d_label,int *index,int *t_index,int *amount,int *i_amount,int *j_amount) { int i = threadIdx.x + blockIdx.x * blockDim.x;//blockIdx.x=1,blockIdx.y=424,blockDim.x=512,blockDim.y=1 int j = threadIdx.y + blockIdx.y * blockDim.y;//threadIdx.x=512,threadIdx.y=1 const int idex = (i) + (j) * 400;//0--512*424-1=217087 const int sidex = threadIdx.y; __shared__ int region_index[1]; __shared__ int pixel_amount[1]; __shared__ int x_amount[1]; __shared__ int y_amount[1]; __shared__ unsigned char s_label[192]; __shared__ int flag[1]; region_index[0]=0; pixel_amount[0]=0; x_amount[0]=0; y_amount[0]=0; flag[0]=0; if (sidex<192) { s_label[sidex]=d_label[idex]; __syncthreads(); if (s_label[sidex]>0) { region_index[0]=s_label[sidex]; atomicAdd(&(pixel_amount[0]),1); atomicAdd(&(x_amount[0]),i); atomicAdd(&(y_amount[0]),j); flag[0]=1; } } __syncthreads(); index[blockIdx.x]=flag[0];//0 1 0 1 1 0 0 0 1 1 1为了方便进行求前缀和 t_index[blockIdx.x]=region_index[0]; amount[blockIdx.x]=pixel_amount[0]; i_amount[blockIdx.x]=x_amount[0]; j_amount[blockIdx.x]=y_amount[0]; __syncthreads(); } __global__ void computer_center1(int *output,int *index,int *t_index,int *amount,int *i_amount,int *j_amount,int *output_amount,int *output_x,int *output_y) { int i = threadIdx.x + blockIdx.x * blockDim.x;//blockIdx.x=1,blockIdx.y=424,blockDim.x=512,blockDim.y=1 int j = threadIdx.y + blockIdx.y * blockDim.y;//threadIdx.x=512,threadIdx.y=1 const int idex = (i) + (j) * 400;//0--512*424-1=217087 const int sidex = threadIdx.y; int space=1; //可能可以用动态并行做 if(blockIdx.x==0) { int tt=0; // printf("%d ",index[sidex]);//到这儿的时候并不是所有块都完成了前面的index赋值,所以得到的数据不完整 for (int d=1;d<=400;d=d*2) { int temp=index[sidex]; int neighbor=0; if ((sidex-space>0)) neighbor=index[sidex-space]; __syncthreads(); if (sidex<space) continue; else tt=temp+neighbor;//index的改变导致neighbor读取的错误 space=space*2; __syncthreads(); index[sidex]=tt; } } __syncthreads(); // //index的最后一个数是代表有多少个列有目标值 if (blockIdx.x==0) { // int current=sidex; // int later=current+1; //index这一步没问题。得到的index[399]个数也是对的,下面的判断条件有误 if (sidex<399)//only use the even(偶数) threads, and the position in index is odd(奇数). { if (index[sidex+1]==(index[sidex]+1)) { // printf("even %d %d \n",index[sidex],index[sidex+1]); output[index[sidex]]=t_index[sidex+1]; output_amount[index[sidex]]=amount[sidex+1]; output_x[index[sidex]]=i_amount[sidex+1]; output_y[index[sidex]]=j_amount[sidex+1]; } } } __syncthreads(); output[99]=index[399];//index最后一个数放的是最终有数值的块的个数 } int project2D(float *d_cloud, unsigned char *pj, float *aa,float *obstacle_position,unsigned char *d_table,int *histo_x,int *histo_y,int *histo_z,double *d_kernel,size_t kernel_size,unsigned char *result_, int *label, int *h_label) { int *d_index,*d_t_index,*d_amount,*dx_amount,*dy_amount,*d_output; int *output_amount,*output_x, *output_y; cudaMalloc((void **)&d_index,400*sizeof(int)); cudaMalloc((void **)&d_t_index,400*sizeof(int)); cudaMalloc((void **)&d_amount,400*sizeof(int)); cudaMalloc((void **)&dx_amount,400*sizeof(int)); cudaMalloc((void **)&dy_amount,400*sizeof(int)); cudaMalloc((void **)&d_output,100*sizeof(int)); cudaMalloc((void **)&output_amount,100*sizeof(int)); cudaMalloc((void **)&output_x,100*sizeof(int)); cudaMalloc((void **)&output_y,100*sizeof(int)); cudaMemset(histo_x,0,400*240*sizeof(int)); cudaMemset(histo_y,0,400*240*sizeof(int)); cudaMemset(histo_z,0,400*240*sizeof(int)); cudaMemset(result_,0,400*240*sizeof(unsigned char)); cudaMemset(label,0,400*240*sizeof(int)); cudaMemset(h_label,0,400*240*sizeof(int)); cudaMemset(d_amount,0,400*sizeof(int)); cudaMemset(dx_amount,0,400*sizeof(int)); cudaMemset(dy_amount,0,400*sizeof(int)); cudaMemset(d_index,0,400*sizeof(int)); cudaMemset(d_t_index,0,400*sizeof(int)); cudaMemset(d_output,0,100*sizeof(int)); cudaMemset(output_amount,0,100*sizeof(int)); cudaMemset(output_x,0,100*sizeof(int)); cudaMemset(output_y,0,100*sizeof(int)); projection_kernel<<< Grid, threadsPerBlock >>>(d_cloud,aa,histo_x,histo_y,histo_z,d_table); cudaDeviceSynchronize(); object_label<<< Grid_, threadsPerBlock >>>(pj, histo_x, histo_y, histo_z); cudaDeviceSynchronize(); Gaussian1D_kernel<<< Grid_, threadsPerBlock >>>(pj, d_kernel, kernel_size, result_); Gaussian1D_kernel_<<< Grid_, threadsPerBlock >>>(result_, d_kernel, kernel_size, pj, label); cudaDeviceSynchronize(); for (int nh=0;nh<3;nh++) { eight_DLS_last_obstacle<<< Grid_, threadsPerBlock >>>(pj,label,400,240); } cudaDeviceSynchronize(); int *output=new int[100]; int *amount=new int[100]; int *x_amount=new int[100]; int *y_amount=new int[100]; computer_center<<< Grid1, threadsPerBlock1 >>>(label,d_index,d_t_index,d_amount,dx_amount,dy_amount); cudaDeviceSynchronize(); computer_center1<<< Grid1, threadsPerBlock1 >>>(d_output,d_index,d_t_index,d_amount,dx_amount,dy_amount,output_amount,output_x,output_y); cudaDeviceSynchronize(); cudaMemcpy(output,d_output,100*sizeof(int),cudaMemcpyDeviceToHost); cudaMemcpy(amount,output_amount,100*sizeof(int),cudaMemcpyDeviceToHost); cudaMemcpy(x_amount,output_x,100*sizeof(int),cudaMemcpyDeviceToHost); cudaMemcpy(y_amount,output_y,100*sizeof(int),cudaMemcpyDeviceToHost); int nu=0; vector<int> center_index, center_amount, center_x, center_y; if (output[99]>0) { center_index.push_back(output[0]); center_amount.push_back(amount[0]); center_x.push_back(x_amount[0]); center_y.push_back(y_amount[0]); for (int g=1;g<output[99];g++) { int numb=0; for (int l=0;l<center_index.size();l++) { if (output[g]==center_index.at(l)) { center_amount.at(l)+=amount[g]; center_x.at(l)+=x_amount[g]; center_y.at(l)+=y_amount[g]; break; } else { numb++; continue; } } if (numb==center_index.size()) { center_index.push_back(output[g]); center_amount.push_back(amount[g]); center_x.push_back(x_amount[g]); center_y.push_back(y_amount[g]); } } for (int hq=0;hq<center_index.size();hq++) { if (center_amount.at(hq)==0) continue; float x = center_x.at(hq)/(center_amount.at(hq))*50+(-10000); float y = center_y.at(hq)/(center_amount.at(hq))*50; float value=-0.02181*y+178.7; if (center_amount.at(hq)>4 && center_amount.at(hq)>0.25*value) { obstacle_position[2*nu+0]=x; obstacle_position[2*nu+1]=y; nu++; } } } cudaFree(d_index); cudaFree(d_t_index); cudaFree(d_amount); cudaFree(dx_amount); cudaFree(dy_amount); cudaFree(d_output); cudaFree(output_amount); cudaFree(output_x); cudaFree(output_y); free(output); free(amount); free(x_amount); free(y_amount); return (nu); }
dd09f8a8c2b42190799feb6f72cbd5bfa70a1456.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <helper_timer.h> #include <hip/hip_runtime_api.h> #define RESULT_VERIFICATION 0 // change 1 if you want to verify the result #define BLOCK_DIM 16 //////////////////////////////////////////////////////////////////////////////// //! Compute reference data set matrix multiply on GPU //! C = alpha * A * B + beta * C //! @param A matrix A as provided to device (M x K) //! @param B matrix B as provided to device (K x N) //! @param C matrix C as provided to device (M x N) //! @param N height of matrix A and matrix C //! @param M width of matrix B and matrix C //! @param K width of matrix A and height of matrix C //! @param alpha scala value for matrix multiplication //! @param beta scala value for matrix summation with C //////////////////////////////////////////////////////////////////////////////// __global__ void sgemm_kernel(const float *A, const float *B, float *C, int M, int N, int K, float alpha, float beta) { int col = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; float element_c = 0.f; for (int e = 0; e < K; e++) element_c += A[row * K + e] * B[e * K + col]; C[row * N + col] = alpha * element_c + beta * C[row * N + col]; } __global__ void sgemm_kernel_v2(const float *A, const float *B, float *C, int M, int N, int K, float alpha, float beta) { int bid_x = blockIdx.x * blockDim.x; int bid_y = blockIdx.y * blockDim.y; int tid_x = threadIdx.x; int tid_y = threadIdx.y; float element_c = 0.f; __shared__ float s_tile_A[BLOCK_DIM][BLOCK_DIM]; __shared__ float s_tile_B[BLOCK_DIM][BLOCK_DIM]; // forward tile with tile size in matrix A for (int k = 0; k < K; k += BLOCK_DIM) { s_tile_A[tid_y][tid_x] = A[ (bid_y + tid_y) * K + tid_x + k ]; // Get sub-matrix from A s_tile_B[tid_y][tid_x] = B[ (k*BLOCK_DIM + tid_y) * N + bid_x + tid_x ]; // Get sub-matrix from B __syncthreads(); // compute gemm operation with tiles for (int e = 0; e < BLOCK_DIM; e++) element_c += s_tile_A[tid_y][e] * s_tile_B[e][tid_x]; __syncthreads(); } C[(bid_y + tid_y) * N + (bid_x + tid_x)] = \ alpha * element_c + beta * C[(bid_y + tid_y) * N + (bid_x + tid_x)]; } void sgemm_gold(const float *A, const float *B, float *C, int M, int N, int K, float alpha, float beta) { for (int row = 0; row < M; row++) { for (int col = 0; col < N; col++) { float element_c = 0.f; for (int e = 0; e < K; e++) { element_c += A[row * K + e] * B[e * N + col]; } C[row * N + col] = alpha * element_c + beta * C[row * N + col]; } } } void random_init(float *data, int length) { for (int i = 0; i < length; i++) { data[i] = (rand() & 0xFFFF) / (float)RAND_MAX; } } bool value_test(float *a, float *b, int length) { float epsilon = 0.000001; for (int i = 0; i < length; i++) if (abs(a[i] - b[i]) >= epsilon) return false; return true; } int main(int c, char *argv[]) { float *A, *B, *C_host, *C_gpu; float *d_A, *d_B, *d_C; int M, N, K; float alpha = 2.f; float beta = 1.f; int n_iter = 1; N = M = K = 2048; // initialize timer StopWatchInterface *timer; sdkCreateTimer(&timer); // allocation of linear memory space A = (float *)malloc(M * K * sizeof(float)); B = (float *)malloc(K * N * sizeof(float)); C_host = (float *)malloc(M * N * sizeof(float)); C_gpu = (float *)malloc(M * N * sizeof(float)); // allocation of gpu linear memory space hipMalloc((void **)&d_A, M * K * sizeof(float)); hipMalloc((void **)&d_B, K * N * sizeof(float)); hipMalloc((void **)&d_C, M * N * sizeof(float)); // initialize randomized values for memory space random_init(A, M * K); random_init(B, K * N); // profiler will focus from this point sdkStartTimer(&timer); // copy initial value for gpu memory hipMemcpy(d_A, A, M * K * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_B, A, K * N * sizeof(float), hipMemcpyHostToDevice); // do operation dim3 blockDim(BLOCK_DIM, BLOCK_DIM); dim3 gridDim((N + BLOCK_DIM - 1) / BLOCK_DIM, (M + BLOCK_DIM - 1) / BLOCK_DIM); hipProfilerStart(); for (int i = 0; i < n_iter; i++) { hipLaunchKernelGGL(( sgemm_kernel), dim3(gridDim), dim3(blockDim), 0, 0, d_A, d_B, d_C, M, N, K, alpha, beta); } for (int i = 0; i < n_iter; i++) { hipLaunchKernelGGL(( sgemm_kernel_v2), dim3(gridDim), dim3(blockDim), 0, 0, d_A, d_B, d_C, M, N, K, alpha, beta); } // profiler will stop its focus hipProfilerStop(); // measuring the performance hipDeviceSynchronize(); sdkStopTimer(&timer); // this profiler should be behined of device synchronization #if (RESULT_VERIFICATION) // copy data from the gpu hipMemcpy(C_gpu, d_C, M * N * sizeof(float), hipMemcpyDeviceToHost); // compare the result sgemm_gold(A, B, C_host, M, N, K, alpha, beta); if (value_test(C_host, C_gpu, M * N)) printf("SUCCESS!!\n"); else printf("Error\n"); #endif // terminates allocated gpu memory space hipFree(d_A); hipFree(d_B); hipFree(d_C); // terminates allocated memory space free(A); free(B); free(C_host); free(C_gpu); return 0; }
dd09f8a8c2b42190799feb6f72cbd5bfa70a1456.cu
#include <stdio.h> #include <helper_timer.h> #include <cuda_profiler_api.h> #define RESULT_VERIFICATION 0 // change 1 if you want to verify the result #define BLOCK_DIM 16 //////////////////////////////////////////////////////////////////////////////// //! Compute reference data set matrix multiply on GPU //! C = alpha * A * B + beta * C //! @param A matrix A as provided to device (M x K) //! @param B matrix B as provided to device (K x N) //! @param C matrix C as provided to device (M x N) //! @param N height of matrix A and matrix C //! @param M width of matrix B and matrix C //! @param K width of matrix A and height of matrix C //! @param alpha scala value for matrix multiplication //! @param beta scala value for matrix summation with C //////////////////////////////////////////////////////////////////////////////// __global__ void sgemm_kernel(const float *A, const float *B, float *C, int M, int N, int K, float alpha, float beta) { int col = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; float element_c = 0.f; for (int e = 0; e < K; e++) element_c += A[row * K + e] * B[e * K + col]; C[row * N + col] = alpha * element_c + beta * C[row * N + col]; } __global__ void sgemm_kernel_v2(const float *A, const float *B, float *C, int M, int N, int K, float alpha, float beta) { int bid_x = blockIdx.x * blockDim.x; int bid_y = blockIdx.y * blockDim.y; int tid_x = threadIdx.x; int tid_y = threadIdx.y; float element_c = 0.f; __shared__ float s_tile_A[BLOCK_DIM][BLOCK_DIM]; __shared__ float s_tile_B[BLOCK_DIM][BLOCK_DIM]; // forward tile with tile size in matrix A for (int k = 0; k < K; k += BLOCK_DIM) { s_tile_A[tid_y][tid_x] = A[ (bid_y + tid_y) * K + tid_x + k ]; // Get sub-matrix from A s_tile_B[tid_y][tid_x] = B[ (k*BLOCK_DIM + tid_y) * N + bid_x + tid_x ]; // Get sub-matrix from B __syncthreads(); // compute gemm operation with tiles for (int e = 0; e < BLOCK_DIM; e++) element_c += s_tile_A[tid_y][e] * s_tile_B[e][tid_x]; __syncthreads(); } C[(bid_y + tid_y) * N + (bid_x + tid_x)] = \ alpha * element_c + beta * C[(bid_y + tid_y) * N + (bid_x + tid_x)]; } void sgemm_gold(const float *A, const float *B, float *C, int M, int N, int K, float alpha, float beta) { for (int row = 0; row < M; row++) { for (int col = 0; col < N; col++) { float element_c = 0.f; for (int e = 0; e < K; e++) { element_c += A[row * K + e] * B[e * N + col]; } C[row * N + col] = alpha * element_c + beta * C[row * N + col]; } } } void random_init(float *data, int length) { for (int i = 0; i < length; i++) { data[i] = (rand() & 0xFFFF) / (float)RAND_MAX; } } bool value_test(float *a, float *b, int length) { float epsilon = 0.000001; for (int i = 0; i < length; i++) if (abs(a[i] - b[i]) >= epsilon) return false; return true; } int main(int c, char *argv[]) { float *A, *B, *C_host, *C_gpu; float *d_A, *d_B, *d_C; int M, N, K; float alpha = 2.f; float beta = 1.f; int n_iter = 1; N = M = K = 2048; // initialize timer StopWatchInterface *timer; sdkCreateTimer(&timer); // allocation of linear memory space A = (float *)malloc(M * K * sizeof(float)); B = (float *)malloc(K * N * sizeof(float)); C_host = (float *)malloc(M * N * sizeof(float)); C_gpu = (float *)malloc(M * N * sizeof(float)); // allocation of gpu linear memory space cudaMalloc((void **)&d_A, M * K * sizeof(float)); cudaMalloc((void **)&d_B, K * N * sizeof(float)); cudaMalloc((void **)&d_C, M * N * sizeof(float)); // initialize randomized values for memory space random_init(A, M * K); random_init(B, K * N); // profiler will focus from this point sdkStartTimer(&timer); // copy initial value for gpu memory cudaMemcpy(d_A, A, M * K * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_B, A, K * N * sizeof(float), cudaMemcpyHostToDevice); // do operation dim3 blockDim(BLOCK_DIM, BLOCK_DIM); dim3 gridDim((N + BLOCK_DIM - 1) / BLOCK_DIM, (M + BLOCK_DIM - 1) / BLOCK_DIM); cudaProfilerStart(); for (int i = 0; i < n_iter; i++) { sgemm_kernel<<<gridDim, blockDim>>>(d_A, d_B, d_C, M, N, K, alpha, beta); } for (int i = 0; i < n_iter; i++) { sgemm_kernel_v2<<<gridDim, blockDim>>>(d_A, d_B, d_C, M, N, K, alpha, beta); } // profiler will stop its focus cudaProfilerStop(); // measuring the performance cudaDeviceSynchronize(); sdkStopTimer(&timer); // this profiler should be behined of device synchronization #if (RESULT_VERIFICATION) // copy data from the gpu cudaMemcpy(C_gpu, d_C, M * N * sizeof(float), cudaMemcpyDeviceToHost); // compare the result sgemm_gold(A, B, C_host, M, N, K, alpha, beta); if (value_test(C_host, C_gpu, M * N)) printf("SUCCESS!!\n"); else printf("Error\n"); #endif // terminates allocated gpu memory space cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); // terminates allocated memory space free(A); free(B); free(C_host); free(C_gpu); return 0; }
c424a10f226406e1df60438a81b52394c69b88b7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * * Addition on the Device: add() * */ #include <iostream> #include <math.h> __global__ void add(int *a, int *b, int *c) { *c = *a + *b; } //main int main(void){ int a, b, c; //host copies of a,b,c int *d_a, *d_b, *d_c; //device copies of a,b,c int size = sizeof(int); //allocating space for device copies of a,b,c hipMalloc((void **)&d_a, size); hipMalloc((void **)&d_b, size); hipMalloc((void **)&d_c, size); //setup input values: initializing our input data a = 2; b = 7; //copy inputs to device hipMemcpy(d_a, &a, size, hipMemcpyHostToDevice); hipMemcpy(d_b, &b, size, hipMemcpyHostToDevice); //launch add() kernel in GPU: pass arrgument like a normal function hipLaunchKernelGGL(( add), dim3(1),dim3(1), 0, 0, d_a, d_b, d_c); //copy result back to host hipMemcpy (&c, d_c, size, hipMemcpyDeviceToHost); //cleanup hipFree(d_a); hipFree(d_b); hipFree(d_c); return 0; }
c424a10f226406e1df60438a81b52394c69b88b7.cu
/* * * Addition on the Device: add() * */ #include <iostream> #include <math.h> __global__ void add(int *a, int *b, int *c) { *c = *a + *b; } //main int main(void){ int a, b, c; //host copies of a,b,c int *d_a, *d_b, *d_c; //device copies of a,b,c int size = sizeof(int); //allocating space for device copies of a,b,c cudaMalloc((void **)&d_a, size); cudaMalloc((void **)&d_b, size); cudaMalloc((void **)&d_c, size); //setup input values: initializing our input data a = 2; b = 7; //copy inputs to device cudaMemcpy(d_a, &a, size, cudaMemcpyHostToDevice); cudaMemcpy(d_b, &b, size, cudaMemcpyHostToDevice); //launch add() kernel in GPU: pass arrgument like a normal function add<<<1,1>>>(d_a, d_b, d_c); //copy result back to host cudaMemcpy (&c, d_c, size, cudaMemcpyDeviceToHost); //cleanup cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); return 0; }
351a0b862944d4d2e52a3dcfb146453b34cbbea2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void NmDistanceGradKernel(int b,int n,const float * xyz1,int m,const float * xyz2,const float * grad_dist1,const int * idx1,float * grad_xyz1,float * grad_xyz2){ for (int i=blockIdx.x;i<b;i+=gridDim.x){ for (int j=threadIdx.x+blockIdx.y*blockDim.x;j<n;j+=blockDim.x*gridDim.y){ float x1=xyz1[(i*n+j)*3+0]; float y1=xyz1[(i*n+j)*3+1]; float z1=xyz1[(i*n+j)*3+2]; int j2=idx1[i*n+j]; float x2=xyz2[(i*m+j2)*3+0]; float y2=xyz2[(i*m+j2)*3+1]; float z2=xyz2[(i*m+j2)*3+2]; float g=grad_dist1[i*n+j]*2; atomicAdd(&(grad_xyz1[(i*n+j)*3+0]),g*(x1-x2)); atomicAdd(&(grad_xyz1[(i*n+j)*3+1]),g*(y1-y2)); atomicAdd(&(grad_xyz1[(i*n+j)*3+2]),g*(z1-z2)); atomicAdd(&(grad_xyz2[(i*m+j2)*3+0]),-(g*(x1-x2))); atomicAdd(&(grad_xyz2[(i*m+j2)*3+1]),-(g*(y1-y2))); atomicAdd(&(grad_xyz2[(i*m+j2)*3+2]),-(g*(z1-z2))); } } }
351a0b862944d4d2e52a3dcfb146453b34cbbea2.cu
#include "includes.h" __global__ void NmDistanceGradKernel(int b,int n,const float * xyz1,int m,const float * xyz2,const float * grad_dist1,const int * idx1,float * grad_xyz1,float * grad_xyz2){ for (int i=blockIdx.x;i<b;i+=gridDim.x){ for (int j=threadIdx.x+blockIdx.y*blockDim.x;j<n;j+=blockDim.x*gridDim.y){ float x1=xyz1[(i*n+j)*3+0]; float y1=xyz1[(i*n+j)*3+1]; float z1=xyz1[(i*n+j)*3+2]; int j2=idx1[i*n+j]; float x2=xyz2[(i*m+j2)*3+0]; float y2=xyz2[(i*m+j2)*3+1]; float z2=xyz2[(i*m+j2)*3+2]; float g=grad_dist1[i*n+j]*2; atomicAdd(&(grad_xyz1[(i*n+j)*3+0]),g*(x1-x2)); atomicAdd(&(grad_xyz1[(i*n+j)*3+1]),g*(y1-y2)); atomicAdd(&(grad_xyz1[(i*n+j)*3+2]),g*(z1-z2)); atomicAdd(&(grad_xyz2[(i*m+j2)*3+0]),-(g*(x1-x2))); atomicAdd(&(grad_xyz2[(i*m+j2)*3+1]),-(g*(y1-y2))); atomicAdd(&(grad_xyz2[(i*m+j2)*3+2]),-(g*(z1-z2))); } } }
8c6b1df0462b0443196fa753e63b76a651c687a8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "utilities.cuh" #include "DD3_GPU_proj.h" #define VAMDSN3409UOJIFK 32 #define KLZSDHNQ04JAIO 8 #define SKLJDFNGHW9835GJK35TG 1 template<typename VNKJH5IORTN, typename VNJEG9485BNIJLIHNW3828934> __global__ void NKCMVN934Q0UFK9340Q345(VNKJH5IORTN* MCVBKLNFGEO7UDNK92034, VNJEG9485BNIJLIHNW3828934* out_ZXY, VNJEG9485BNIJLIHNW3828934* VHNOREH9384HRGUAIQ121, int CMNOI4BH3958BTUJ23, int QAIWLRUW4W, int VMN0348HGRO2) { int idz = threadIdx.x + blockIdx.x * blockDim.x; int idx = threadIdx.y + blockIdx.y * blockDim.y; int idy = threadIdx.z + blockIdx.z * blockDim.z; if(idx < CMNOI4BH3958BTUJ23 && idy < QAIWLRUW4W && idz < VMN0348HGRO2) { int i = (idy * CMNOI4BH3958BTUJ23 + idx) * VMN0348HGRO2 + idz; int ni = (idy * (CMNOI4BH3958BTUJ23 + 1) + (idx + 1)) * (VMN0348HGRO2 + 1) + idz + 1; int nj = (idx * (QAIWLRUW4W + 1) + (idy + 1)) * (VMN0348HGRO2 + 1) + idz + 1; out_ZXY[ni] = MCVBKLNFGEO7UDNK92034[i]; VHNOREH9384HRGUAIQ121[nj] = MCVBKLNFGEO7UDNK92034[i]; } } template<typename VNKJH5IORTN, typename VNJEG9485BNIJLIHNW3828934> __global__ void CVNOU3H4409IJIFEASD12(VNKJH5IORTN* in, VNJEG9485BNIJLIHNW3828934* out, int N, int VMN0348HGRO2) { int zi = threadIdx.x + blockIdx.x * blockDim.x; if(zi < VMN0348HGRO2) { out[zi] = in[zi]; for(int i = 1;i<N;++i) { out[i * VMN0348HGRO2 + zi] = out[(i - 1) * VMN0348HGRO2 + zi] + in[i * VMN0348HGRO2 + zi]; } } } template<typename VNKJH5IORTN, typename VNJEG9485BNIJLIHNW3828934> __global__ void DSKLVN083Q4HIRKSKDLHNF89WHEF(VNKJH5IORTN* in, VNJEG9485BNIJLIHNW3828934* out, int N, int VMN0348HGRO2) { int xyi = threadIdx.x + blockIdx.x * blockDim.x; if(xyi < N) { out[xyi * VMN0348HGRO2] = in[xyi * VMN0348HGRO2]; for(int ii = 1; ii < VMN0348HGRO2; ++ii) { out[xyi * VMN0348HGRO2 + ii] = out[xyi * VMN0348HGRO2 + ii - 1] + in[xyi * VMN0348HGRO2 + ii]; } } } template<> __global__ void DSKLVN083Q4HIRKSKDLHNF89WHEF(double* in, int2* out, int N, int VMN0348HGRO2) { int xyi = threadIdx.x + blockIdx.x * blockDim.x; if(xyi < N) { double temp = in[xyi * VMN0348HGRO2]; out[xyi * VMN0348HGRO2] = make_int2(__double2loint(temp),__double2hiint(temp)); double temp2 = 0; for(int ii = 1; ii < VMN0348HGRO2; ++ii) { temp2 = temp + in[xyi * VMN0348HGRO2 + ii]; out[xyi * VMN0348HGRO2 + ii] = make_int2(__double2loint(temp2),__double2hiint(temp2)); temp = temp2; } } } template<typename T> __global__ void VNLJBN304IORNFK9023JOVKSLX(T* _____XCV, int VMN0348HGRO2, int N) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if(idx < N) { int CSVNKLDNFGDNFQ = idx * VMN0348HGRO2; for(int ii = 1; ii < VMN0348HGRO2; ++ii) { _____XCV[CSVNKLDNFGDNFQ + ii] = _____XCV[CSVNKLDNFGDNFQ + ii] + _____XCV[CSVNKLDNFGDNFQ + ii - 1]; } } } __global__ void VBDKL9304URGJFIDKAS12(float* _____XCV, int CVNKLJFVN2390XZNKLS0RE34QRGFEDFS, int ASD23MZNCDS23890I, int ZXMCKLNAESD2038EIWONVF) { int idv = threadIdx.x + blockIdx.x * blockDim.x; int pIdx = threadIdx.y + blockIdx.y * blockDim.y; if (idv < ASD23MZNCDS23890I && pIdx < ZXMCKLNAESD2038EIWONVF) { int ZXCVBNJASDFWasDF = pIdx * CVNKLJFVN2390XZNKLS0RE34QRGFEDFS * ASD23MZNCDS23890I + idv; for(int ii = 1; ii < CVNKLJFVN2390XZNKLS0RE34QRGFEDFS; ++ii) { _____XCV[ZXCVBNJASDFWasDF + ii * ASD23MZNCDS23890I] = _____XCV[ZXCVBNJASDFWasDF + ii * ASD23MZNCDS23890I] + _____XCV[ZXCVBNJASDFWasDF + (ii - 1) * ASD23MZNCDS23890I]; } } } void ______XCNV9340KL9340KFDL(float* hVNNR03ASDQ234RA___, thrust::device_vector<float>& ZXY, thrust::device_vector<float>& ZYX, int CMNOI4BH3958BTUJ23, int QAIWLRUW4W, int VMN0348HGRO2) { const int siz = CMNOI4BH3958BTUJ23 * QAIWLRUW4W * VMN0348HGRO2; const int nsiz_ZXY = (VMN0348HGRO2 + 1) * (CMNOI4BH3958BTUJ23 + 1) * QAIWLRUW4W; const int nsiz_ZYX = (VMN0348HGRO2 + 1) * (QAIWLRUW4W + 1) * CMNOI4BH3958BTUJ23; ZXY.resize(nsiz_ZXY); ZYX.resize(nsiz_ZYX); thrust::device_vector<float> VNNR03ASDQ234RA___(hVNNR03ASDQ234RA___,hVNNR03ASDQ234RA___ + siz); dim3 KLCDNFKVG038Q4OHINGF34(64,16,1); dim3 KSLJNV830Q49EZSDKLNP2Q3( (VMN0348HGRO2 + KLCDNFKVG038Q4OHINGF34.x - 1) / KLCDNFKVG038Q4OHINGF34.x, (CMNOI4BH3958BTUJ23 + KLCDNFKVG038Q4OHINGF34.y - 1) / KLCDNFKVG038Q4OHINGF34.y, (QAIWLRUW4W + KLCDNFKVG038Q4OHINGF34.z - 1) / KLCDNFKVG038Q4OHINGF34.z); hipLaunchKernelGGL(( NKCMVN934Q0UFK9340Q345), dim3(KSLJNV830Q49EZSDKLNP2Q3),dim3(KLCDNFKVG038Q4OHINGF34), 0, 0, thrust::raw_pointer_cast(&VNNR03ASDQ234RA___[0]), thrust::raw_pointer_cast(&ZXY[0]), thrust::raw_pointer_cast(&ZYX[0]), CMNOI4BH3958BTUJ23, QAIWLRUW4W, VMN0348HGRO2); VNNR03ASDQ234RA___.clear(); const int nVMN0348HGRO2 = VMN0348HGRO2 + 1; const int nCMNOI4BH3958BTUJ23 = CMNOI4BH3958BTUJ23 + 1; const int nQAIWLRUW4W = QAIWLRUW4W + 1; KLCDNFKVG038Q4OHINGF34.x = 32; KLCDNFKVG038Q4OHINGF34.y = 1; KLCDNFKVG038Q4OHINGF34.z = 1; KSLJNV830Q49EZSDKLNP2Q3.x = (nCMNOI4BH3958BTUJ23 * QAIWLRUW4W + KLCDNFKVG038Q4OHINGF34.x - 1) / KLCDNFKVG038Q4OHINGF34.x; KSLJNV830Q49EZSDKLNP2Q3.y = 1; KSLJNV830Q49EZSDKLNP2Q3.z = 1;hipLaunchKernelGGL(( VNLJBN304IORNFK9023JOVKSLX), dim3(KSLJNV830Q49EZSDKLNP2Q3),dim3(KLCDNFKVG038Q4OHINGF34), 0, 0, thrust::raw_pointer_cast(&ZXY[0]), nVMN0348HGRO2, nCMNOI4BH3958BTUJ23 * QAIWLRUW4W); KLCDNFKVG038Q4OHINGF34.x = 64; KLCDNFKVG038Q4OHINGF34.y = 16; KLCDNFKVG038Q4OHINGF34.z = 1; KSLJNV830Q49EZSDKLNP2Q3.x = (nVMN0348HGRO2 + KLCDNFKVG038Q4OHINGF34.x - 1) / KLCDNFKVG038Q4OHINGF34.x; KSLJNV830Q49EZSDKLNP2Q3.y = (QAIWLRUW4W + KLCDNFKVG038Q4OHINGF34.y - 1) / KLCDNFKVG038Q4OHINGF34.y; KSLJNV830Q49EZSDKLNP2Q3.z = 1;hipLaunchKernelGGL(( VBDKL9304URGJFIDKAS12), dim3(KSLJNV830Q49EZSDKLNP2Q3),dim3(KLCDNFKVG038Q4OHINGF34), 0, 0, thrust::raw_pointer_cast(&ZXY[0]), nCMNOI4BH3958BTUJ23, nVMN0348HGRO2, QAIWLRUW4W); KLCDNFKVG038Q4OHINGF34.x = 32; KLCDNFKVG038Q4OHINGF34.y = 1; KLCDNFKVG038Q4OHINGF34.z = 1; KSLJNV830Q49EZSDKLNP2Q3.x = (nQAIWLRUW4W * CMNOI4BH3958BTUJ23 + KLCDNFKVG038Q4OHINGF34.x - 1) / KLCDNFKVG038Q4OHINGF34.x; KSLJNV830Q49EZSDKLNP2Q3.y = 1; KSLJNV830Q49EZSDKLNP2Q3.z = 1;hipLaunchKernelGGL(( VNLJBN304IORNFK9023JOVKSLX), dim3(KSLJNV830Q49EZSDKLNP2Q3),dim3(KLCDNFKVG038Q4OHINGF34), 0, 0, thrust::raw_pointer_cast(&ZYX[0]), nVMN0348HGRO2, nQAIWLRUW4W * CMNOI4BH3958BTUJ23); KLCDNFKVG038Q4OHINGF34.x = 64; KLCDNFKVG038Q4OHINGF34.y = 16; KLCDNFKVG038Q4OHINGF34.z = 1; KSLJNV830Q49EZSDKLNP2Q3.x = (nVMN0348HGRO2 + KLCDNFKVG038Q4OHINGF34.x - 1) / KLCDNFKVG038Q4OHINGF34.x; KSLJNV830Q49EZSDKLNP2Q3.y = (CMNOI4BH3958BTUJ23 + KLCDNFKVG038Q4OHINGF34.y - 1) / KLCDNFKVG038Q4OHINGF34.y; KSLJNV830Q49EZSDKLNP2Q3.z = 1;hipLaunchKernelGGL(( VBDKL9304URGJFIDKAS12), dim3(KSLJNV830Q49EZSDKLNP2Q3),dim3(KLCDNFKVG038Q4OHINGF34), 0, 0, thrust::raw_pointer_cast(&ZYX[0]), nQAIWLRUW4W, nVMN0348HGRO2, CMNOI4BH3958BTUJ23); } template<typename T> void XCNVXCVA4ERHN3840EIOGNKFNASDFLJ( hipTextureObject_t& texObj, hipArray* d______XCVArray, int VNIRONV84WOHIDNSASDKL8934, int QWEIOQUWOISDKLJ23, int BCVMNREJDFK42W35, T* SDKLHFW9024U3TRJPIF, hipMemcpyKind VW34, hipTextureAddressMode addressMode, hipTextureFilterMode textureFilterMode, hipTextureReadMode textureReadMode, bool isNormalized) { hipExtent _____XCVSize; _____XCVSize.width = VNIRONV84WOHIDNSASDKL8934; _____XCVSize.height = QWEIOQUWOISDKLJ23; _____XCVSize.depth = BCVMNREJDFK42W35; hipChannelFormatDesc channelDesc = hipCreateChannelDesc<T>(); hipMalloc3DArray(&d______XCVArray, &channelDesc, _____XCVSize); hipMemcpy3DParms copyParams = {0}; copyParams.srcPtr = make_hipPitchedPtr( (void*)SDKLHFW9024U3TRJPIF, _____XCVSize.width * sizeof(T), _____XCVSize.width, _____XCVSize.height); copyParams.dstArray = d______XCVArray; copyParams.extent = _____XCVSize; copyParams.kind = VW34; hipMemcpy3D(&copyParams); hipResourceDesc resDesc; memset(&resDesc, 0, sizeof(resDesc)); resDesc.resType = hipResourceTypeArray; resDesc.res.array.array = d______XCVArray; hipTextureDesc texDesc; memset(&texDesc, 0, sizeof(texDesc)); texDesc.addressMode[0] = addressMode; texDesc.addressMode[1] = addressMode; texDesc.addressMode[2] = addressMode; texDesc.filterMode = textureFilterMode; texDesc.readMode = textureReadMode; texDesc.normalizedCoords = isNormalized; CUDA_SAFE_CALL(hipCreateTextureObject(&texObj, &resDesc, &texDesc,nullptr)); } void VNSDKJFN0834HSDFHKJSDHF(hipTextureObject_t& texObj, hipArray* d_array) { hipDestroyTextureObject(texObj); hipFreeArray(d_array); } __global__ void ________A( hipTextureObject_t VNNR03ASDQ234RA___Tex1, hipTextureObject_t VNNR03ASDQ234RA___Tex2, double* proj, double3 s, const double3* __restrict cossinZT, const double* __restrict xds, const double* __restrict yds, const double* __restrict zds, const double* __restrict bxds, const double* __restrict byds, const double* __restrict bzds, double3 objCntIdx, double dx, double dz, int CMNOI4BH3958BTUJ23, int QAIWLRUW4W, int CVNKLJFVN2390XZNKLS0RE34QRGFEDFS, int ASD23MZNCDS23890I, int ZXMCKLNAESD2038EIWONVF) { int detIdV = threadIdx.x + blockIdx.x * blockDim.x; int detIdU = threadIdx.y + blockIdx.y * blockDim.y; int angIdx = threadIdx.z + blockIdx.z * blockDim.z; __shared__ double _xds[KLZSDHNQ04JAIO]; __shared__ double _yds[KLZSDHNQ04JAIO]; _xds[threadIdx.y] = xds[detIdU]; _yds[threadIdx.y] = yds[detIdU]; __syncthreads(); if(detIdU < CVNKLJFVN2390XZNKLS0RE34QRGFEDFS && detIdV < ASD23MZNCDS23890I && angIdx < ZXMCKLNAESD2038EIWONVF) { double3 dir = cossinZT[angIdx]; double3 cursour = make_double3( s.x * dir.x - s.y * dir.y, s.x * dir.y + s.y * dir.x, s.z + dir.z); s = cossinZT[angIdx]; double summ = _xds[threadIdx.y] * s.x - _yds[threadIdx.y] * s.y; double obj = _xds[threadIdx.y] * s.y + _yds[threadIdx.y] * s.x; double realL = bxds[detIdU]; double realR = byds[detIdU]; double realU = bxds[detIdU + 1]; double realD = byds[detIdU + 1]; double2 curDetL = make_double2( realL * s.x - realR * s.y, realL * s.y + realR * s.x); double2 curDetR = make_double2( realU * s.x - realD * s.y, realU * s.y + realD * s.x); double4 curDet = make_double4(summ,obj,bzds[detIdV] + s.z,bzds[detIdV+1] + s.z); dir = normalize(make_double3( summ, obj, zds[detIdV] + s.z) - cursour); summ = 0; obj = 0; double intersectLength, intersectQWEIOQUWOISDKLJ23; double invdz = 1.0 / dz; double invdx = 1.0 / dx; double factL(1.0f); double factR(1.0f); double factU(1.0f); double factD(1.0f); double constVal = 0; int crealD, crealR, crealU, crealL; int frealD, frealR, frealU, frealL; if(abs(s.x) <= abs(s.y)) { summ = 0; factL = (curDetL.y - cursour.y) / (curDetL.x - cursour.x); factR = (curDetR.y - cursour.y) / (curDetR.x - cursour.x); factU = (curDet.w - cursour.z) / (curDet.x - cursour.x); factD = (curDet.z - cursour.z) / (curDet.x - cursour.x); constVal = dx * dx * dz / (abs(dir.x)); #pragma unroll for(int ii = 0; ii < CMNOI4BH3958BTUJ23; ii++) { obj = (ii - objCntIdx.x) * dx; realL = (obj - curDetL.x) * factL + curDetL.y; realR = (obj - curDetR.x) * factR + curDetR.y; realU = (obj - curDet.x) * factU + curDet.w; realD = (obj - curDet.x) * factD + curDet.z; intersectLength = realR - realL; intersectQWEIOQUWOISDKLJ23 = realU - realD; realD = realD * invdz + objCntIdx.z + 1; realR = realR * invdx + objCntIdx.y + 1; realU = realU * invdz + objCntIdx.z + 1; realL = realL * invdx + objCntIdx.y + 1; crealD = ceil(realD); crealR = ceil(realR); crealU = ceil(realU); crealL = ceil(realL); frealD = floor(realD); frealR = floor(realR); frealU = floor(realU); frealL = floor(realL); summ += (bilerp( tex3D<int2>(VNNR03ASDQ234RA___Tex2, frealD, frealL, ii + 0.5), tex3D<int2>(VNNR03ASDQ234RA___Tex2, frealD, crealL, ii + 0.5), tex3D<int2>(VNNR03ASDQ234RA___Tex2, crealD, frealL, ii + 0.5), tex3D<int2>(VNNR03ASDQ234RA___Tex2, crealD, crealL, ii + 0.5), realL - frealL,realD - frealD) + bilerp( tex3D<int2>(VNNR03ASDQ234RA___Tex2, frealU, frealR, ii + 0.5), tex3D<int2>(VNNR03ASDQ234RA___Tex2, frealU, crealR, ii + 0.5), tex3D<int2>(VNNR03ASDQ234RA___Tex2, crealU, frealR, ii + 0.5), tex3D<int2>(VNNR03ASDQ234RA___Tex2, crealU, crealR, ii + 0.5), realR - frealR,realU - frealU) - bilerp( tex3D<int2>(VNNR03ASDQ234RA___Tex2, frealD, frealR, ii + 0.5), tex3D<int2>(VNNR03ASDQ234RA___Tex2, frealD, crealR, ii + 0.5), tex3D<int2>(VNNR03ASDQ234RA___Tex2, crealD, frealR, ii + 0.5), tex3D<int2>(VNNR03ASDQ234RA___Tex2, crealD, crealR, ii + 0.5), realR - frealR,realD - frealD) - bilerp( tex3D<int2>(VNNR03ASDQ234RA___Tex2, frealU, frealL, ii + 0.5), tex3D<int2>(VNNR03ASDQ234RA___Tex2, frealU, crealL, ii + 0.5), tex3D<int2>(VNNR03ASDQ234RA___Tex2, crealU, frealL, ii + 0.5), tex3D<int2>(VNNR03ASDQ234RA___Tex2, crealU, crealL, ii + 0.5), realL - frealL,realU - frealU))/ (intersectLength * intersectQWEIOQUWOISDKLJ23); } __syncthreads(); proj[(angIdx * CVNKLJFVN2390XZNKLS0RE34QRGFEDFS + detIdU) * ASD23MZNCDS23890I + detIdV] = summ * constVal; } else { summ = 0; factL = (curDetL.x - cursour.x) / (curDetL.y - cursour.y); factR = (curDetR.x - cursour.x) / (curDetR.y - cursour.y); factU = (curDet.w - cursour.z) / (curDet.y - cursour.y); factD = (curDet.z - cursour.z) / (curDet.y - cursour.y); constVal = dx * dx * dz / (abs(dir.y)); #pragma unroll for(int jj = 0; jj < QAIWLRUW4W; jj++) { obj = (jj - objCntIdx.y) * dx; realL = (obj - curDetL.y) * factL + curDetL.x; realR = (obj - curDetR.y) * factR + curDetR.x; realU = (obj - curDet.y) * factU + curDet.w; realD = (obj - curDet.y) * factD + curDet.z; intersectLength = realR - realL; intersectQWEIOQUWOISDKLJ23 = realU - realD; realD = realD * invdz + objCntIdx.z + 1; realR = realR * invdx + objCntIdx.x + 1; realU = realU * invdz + objCntIdx.z + 1; realL = realL * invdx + objCntIdx.x + 1; crealD = ceil(realD); crealR = ceil(realR); crealU = ceil(realU); crealL = ceil(realL); frealD = floor(realD); frealR = floor(realR); frealU = floor(realU); frealL = floor(realL); summ += (bilerp( tex3D<int2>(VNNR03ASDQ234RA___Tex1, frealD, frealL, jj + 0.5), tex3D<int2>(VNNR03ASDQ234RA___Tex1, frealD, crealL, jj + 0.5), tex3D<int2>(VNNR03ASDQ234RA___Tex1, crealD, frealL, jj + 0.5), tex3D<int2>(VNNR03ASDQ234RA___Tex1, crealD, crealL, jj + 0.5), realL - frealL,realD - frealD) + bilerp( tex3D<int2>(VNNR03ASDQ234RA___Tex1, frealU, frealR, jj + 0.5), tex3D<int2>(VNNR03ASDQ234RA___Tex1, frealU, crealR, jj + 0.5), tex3D<int2>(VNNR03ASDQ234RA___Tex1, crealU, frealR, jj + 0.5), tex3D<int2>(VNNR03ASDQ234RA___Tex1, crealU, crealR, jj + 0.5), realR - frealR,realU - frealU) - bilerp( tex3D<int2>(VNNR03ASDQ234RA___Tex1, frealD, frealR, jj + 0.5), tex3D<int2>(VNNR03ASDQ234RA___Tex1, frealD, crealR, jj + 0.5), tex3D<int2>(VNNR03ASDQ234RA___Tex1, crealD, frealR, jj + 0.5), tex3D<int2>(VNNR03ASDQ234RA___Tex1, crealD, crealR, jj + 0.5), realR - frealR,realD - frealD) - bilerp( tex3D<int2>(VNNR03ASDQ234RA___Tex1, frealU, frealL, jj + 0.5), tex3D<int2>(VNNR03ASDQ234RA___Tex1, frealU, crealL, jj + 0.5), tex3D<int2>(VNNR03ASDQ234RA___Tex1, crealU, frealL, jj + 0.5), tex3D<int2>(VNNR03ASDQ234RA___Tex1, crealU, crealL, jj + 0.5), realL - frealL,realU - frealU)) / (intersectLength * intersectQWEIOQUWOISDKLJ23); } __syncthreads(); proj[(angIdx * CVNKLJFVN2390XZNKLS0RE34QRGFEDFS + detIdU) * ASD23MZNCDS23890I + detIdV] = summ * constVal; } } } void DD3_gpu_proj_doubleprecisionbranchless( float x0, float y0, float z0, int CVNKLJFVN2390XZNKLS0RE34QRGFEDFS, int ASD23MZNCDS23890I, float* xds, float* yds, float* zds, float imgXCenter, float imgYCenter, float imgZCenter, float* hangs, float* hzPos, int ZXMCKLNAESD2038EIWONVF, int CMNOI4BH3958BTUJ23, int QAIWLRUW4W, int VMN0348HGRO2, float* VNNR03ASDQ234RA___, float* h_____XCV, float dx, float dz, byte* mask, int gpunum) { for(int ii = 0; ii != CMNOI4BH3958BTUJ23 * QAIWLRUW4W; ++ii) { byte v = mask[ii]; for(int jj = 0; jj != VMN0348HGRO2; ++jj) { VNNR03ASDQ234RA___[ii * VMN0348HGRO2 + jj] = VNNR03ASDQ234RA___[ii * VMN0348HGRO2 + jj] * v; } } float* bxds = new float[CVNKLJFVN2390XZNKLS0RE34QRGFEDFS + 1]; float* byds = new float[CVNKLJFVN2390XZNKLS0RE34QRGFEDFS + 1]; float* bzds = new float[ASD23MZNCDS23890I + 1]; DD3Boundaries(CVNKLJFVN2390XZNKLS0RE34QRGFEDFS + 1, xds, bxds); DD3Boundaries(CVNKLJFVN2390XZNKLS0RE34QRGFEDFS + 1, yds, byds); DD3Boundaries(ASD23MZNCDS23890I + 1, zds, bzds); CUDA_SAFE_CALL(hipSetDevice(gpunum)); CUDA_SAFE_CALL(hipDeviceReset()); hipStream_t streams[4]; CUDA_SAFE_CALL(hipStreamCreate(&streams[0])); CUDA_SAFE_CALL(hipStreamCreate(&streams[1])); CUDA_SAFE_CALL(hipStreamCreate(&streams[2])); CUDA_SAFE_CALL(hipStreamCreate(&streams[3])); int TOTVN = CMNOI4BH3958BTUJ23 * QAIWLRUW4W * VMN0348HGRO2; double objCntIdxX = (CMNOI4BH3958BTUJ23 - 1.0) * 0.5 - imgXCenter / dx; double objCntIdxY = (QAIWLRUW4W - 1.0) * 0.5 - imgYCenter / dx; double objCntIdxZ = (VMN0348HGRO2 - 1.0) * 0.5 - imgZCenter / dz; thrust::device_vector<float> in(VNNR03ASDQ234RA___, VNNR03ASDQ234RA___ + TOTVN); thrust::device_vector<double> MCVBKLNFGEO7UDNK92034((VMN0348HGRO2 + 1) * (CMNOI4BH3958BTUJ23 + 1) * QAIWLRUW4W, 0); thrust::device_vector<double> in_ZYX((VMN0348HGRO2 + 1) * (QAIWLRUW4W + 1) * CMNOI4BH3958BTUJ23, 0); dim3 KLCDNFKVG038Q4OHINGF34(64,16,1); dim3 KSLJNV830Q49EZSDKLNP2Q3( (VMN0348HGRO2 + KLCDNFKVG038Q4OHINGF34.x - 1) / KLCDNFKVG038Q4OHINGF34.x, (CMNOI4BH3958BTUJ23 + KLCDNFKVG038Q4OHINGF34.y - 1) / KLCDNFKVG038Q4OHINGF34.y, (QAIWLRUW4W + KLCDNFKVG038Q4OHINGF34.z - 1) / KLCDNFKVG038Q4OHINGF34.z); hipLaunchKernelGGL(( NKCMVN934Q0UFK9340Q345<float,double>), dim3(KSLJNV830Q49EZSDKLNP2Q3),dim3(KLCDNFKVG038Q4OHINGF34), 0, 0, thrust::raw_pointer_cast(&in[0]), thrust::raw_pointer_cast(&MCVBKLNFGEO7UDNK92034[0]), thrust::raw_pointer_cast(&in_ZYX[0]), CMNOI4BH3958BTUJ23, QAIWLRUW4W, VMN0348HGRO2); in.clear(); thrust::device_vector<double> MCVBKLNFGEO7UDNK92034_summ1((VMN0348HGRO2 + 1) * (CMNOI4BH3958BTUJ23 + 1) * QAIWLRUW4W,0); thrust::device_vector<int2> MCVBKLNFGEO7UDNK92034_summ((VMN0348HGRO2 + 1) * (CMNOI4BH3958BTUJ23 + 1) * QAIWLRUW4W); KLCDNFKVG038Q4OHINGF34.x = 64;KLCDNFKVG038Q4OHINGF34.y = 1;KLCDNFKVG038Q4OHINGF34.z = 1; KSLJNV830Q49EZSDKLNP2Q3.x = (VMN0348HGRO2 + KLCDNFKVG038Q4OHINGF34.x) / KLCDNFKVG038Q4OHINGF34.x;KSLJNV830Q49EZSDKLNP2Q3.y = 1;KSLJNV830Q49EZSDKLNP2Q3.z = 1; dim3 KLCDNFKVG038Q4OHINGF342(64); dim3 KSLJNV830Q49EZSDKLNP2Q32((QAIWLRUW4W + KLCDNFKVG038Q4OHINGF342.x) / KLCDNFKVG038Q4OHINGF342.x); dim3 KLCDNFKVG038Q4OHINGF343(64); dim3 KSLJNV830Q49EZSDKLNP2Q33((CMNOI4BH3958BTUJ23 + KLCDNFKVG038Q4OHINGF343.x) / KLCDNFKVG038Q4OHINGF343.x); for(int jj = 0; jj != QAIWLRUW4W; ++jj) {hipLaunchKernelGGL(( CVNOU3H4409IJIFEASD12), dim3(KSLJNV830Q49EZSDKLNP2Q3),dim3(KLCDNFKVG038Q4OHINGF34),0,streams[0], thrust::raw_pointer_cast(&MCVBKLNFGEO7UDNK92034[0]) + jj * (VMN0348HGRO2 + 1) * (CMNOI4BH3958BTUJ23 + 1), thrust::raw_pointer_cast(&MCVBKLNFGEO7UDNK92034_summ1[0]) + jj * (VMN0348HGRO2 + 1) * (CMNOI4BH3958BTUJ23 + 1), CMNOI4BH3958BTUJ23 + 1, VMN0348HGRO2 + 1);hipLaunchKernelGGL(( DSKLVN083Q4HIRKSKDLHNF89WHEF), dim3(KSLJNV830Q49EZSDKLNP2Q32),dim3(KLCDNFKVG038Q4OHINGF342),0,streams[0], thrust::raw_pointer_cast(&MCVBKLNFGEO7UDNK92034_summ1[0]) + jj * (VMN0348HGRO2 + 1) * (CMNOI4BH3958BTUJ23 + 1), thrust::raw_pointer_cast(&MCVBKLNFGEO7UDNK92034_summ[0]) + jj * (VMN0348HGRO2 + 1) * (CMNOI4BH3958BTUJ23 + 1), CMNOI4BH3958BTUJ23 + 1, VMN0348HGRO2 + 1); } MCVBKLNFGEO7UDNK92034.clear(); MCVBKLNFGEO7UDNK92034_summ1.clear(); hipArray* d_VNNR03ASDQ234RA___umeArray1 = nullptr; hipTextureObject_t texObj1; XCNVXCVA4ERHN3840EIOGNKFNASDFLJ<int2>(texObj1, d_VNNR03ASDQ234RA___umeArray1,VMN0348HGRO2 + 1, CMNOI4BH3958BTUJ23+1,QAIWLRUW4W, thrust::raw_pointer_cast(&MCVBKLNFGEO7UDNK92034_summ[0]), hipMemcpyDeviceToDevice, hipAddressModeClamp,hipFilterModePoint, hipReadModeElementType,false); MCVBKLNFGEO7UDNK92034_summ.clear(); thrust::device_vector<double> in_ZYX_summ1((VMN0348HGRO2 + 1) * (QAIWLRUW4W + 1) * CMNOI4BH3958BTUJ23, 0); thrust::device_vector<int2> in_ZYX_summ((VMN0348HGRO2 + 1) * (QAIWLRUW4W + 1) * CMNOI4BH3958BTUJ23); for(int ii = 0; ii != CMNOI4BH3958BTUJ23; ++ii) { hipLaunchKernelGGL(( CVNOU3H4409IJIFEASD12), dim3(KSLJNV830Q49EZSDKLNP2Q3),dim3(KLCDNFKVG038Q4OHINGF34),0,streams[1], thrust::raw_pointer_cast(&in_ZYX[0]) + ii * (VMN0348HGRO2 + 1) * (QAIWLRUW4W + 1), thrust::raw_pointer_cast(&in_ZYX_summ1[0]) + ii * (VMN0348HGRO2 + 1) * (QAIWLRUW4W + 1), QAIWLRUW4W + 1, VMN0348HGRO2 + 1);hipLaunchKernelGGL(( DSKLVN083Q4HIRKSKDLHNF89WHEF), dim3(KSLJNV830Q49EZSDKLNP2Q33),dim3(KLCDNFKVG038Q4OHINGF343),0,streams[1], thrust::raw_pointer_cast(&in_ZYX_summ1[0]) + ii * (VMN0348HGRO2 + 1) * (QAIWLRUW4W + 1), thrust::raw_pointer_cast(&in_ZYX_summ[0]) + ii * (VMN0348HGRO2 + 1) * (QAIWLRUW4W + 1), QAIWLRUW4W + 1, VMN0348HGRO2 + 1); } in_ZYX.clear(); in_ZYX_summ1.clear(); hipArray* d_VNNR03ASDQ234RA___umeArray2 = nullptr; hipTextureObject_t texObj2; XCNVXCVA4ERHN3840EIOGNKFNASDFLJ<int2>(texObj2, d_VNNR03ASDQ234RA___umeArray2,VMN0348HGRO2 + 1, QAIWLRUW4W+1,CMNOI4BH3958BTUJ23, thrust::raw_pointer_cast(&in_ZYX_summ[0]), hipMemcpyDeviceToDevice, hipAddressModeClamp,hipFilterModePoint, hipReadModeElementType,false); in_ZYX_summ.clear(); thrust::device_vector<double> _____XCV(CVNKLJFVN2390XZNKLS0RE34QRGFEDFS * ASD23MZNCDS23890I * ZXMCKLNAESD2038EIWONVF, 0); thrust::device_vector<double> angs(hangs, hangs + ZXMCKLNAESD2038EIWONVF); thrust::device_vector<double> zPos(hzPos, hzPos + ZXMCKLNAESD2038EIWONVF); thrust::device_vector<double> d_xds(xds, xds + CVNKLJFVN2390XZNKLS0RE34QRGFEDFS); thrust::device_vector<double> d_yds(yds, yds + CVNKLJFVN2390XZNKLS0RE34QRGFEDFS); thrust::device_vector<double> d_zds(zds, zds + ASD23MZNCDS23890I); thrust::device_vector<double> d_bxds(bxds, bxds + CVNKLJFVN2390XZNKLS0RE34QRGFEDFS + 1); thrust::device_vector<double> d_byds(byds, byds + CVNKLJFVN2390XZNKLS0RE34QRGFEDFS + 1); thrust::device_vector<double> d_bzds(bzds, bzds + ASD23MZNCDS23890I + 1); thrust::device_vector<double3> cossinZT(ZXMCKLNAESD2038EIWONVF); thrust::transform( thrust::make_zip_iterator(thrust::make_tuple(angs.begin(),zPos.begin())), thrust::make_zip_iterator(thrust::make_tuple(angs.end(),zPos.end())), cossinZT.begin(),CTMBIR::ConstantForBackProjection<double>(x0,y0,z0)); dim3 KLCDNFKVG038Q4OHINGF34c(64,16,1); dim3 KSLJNV830Q49EZSDKLNP2Q3c( (ASD23MZNCDS23890I + KLCDNFKVG038Q4OHINGF34c.x) / KLCDNFKVG038Q4OHINGF34c.x, (CVNKLJFVN2390XZNKLS0RE34QRGFEDFS + KLCDNFKVG038Q4OHINGF34c.y) / KLCDNFKVG038Q4OHINGF34c.y, (ZXMCKLNAESD2038EIWONVF + KLCDNFKVG038Q4OHINGF34c.z - 1) / KLCDNFKVG038Q4OHINGF34c.z); KLCDNFKVG038Q4OHINGF34.x = VAMDSN3409UOJIFK; KLCDNFKVG038Q4OHINGF34.y = KLZSDHNQ04JAIO; KLCDNFKVG038Q4OHINGF34.z = SKLJDFNGHW9835GJK35TG; KSLJNV830Q49EZSDKLNP2Q3.x = (ASD23MZNCDS23890I + KLCDNFKVG038Q4OHINGF34.x - 1) / KLCDNFKVG038Q4OHINGF34.x; KSLJNV830Q49EZSDKLNP2Q3.y = (CVNKLJFVN2390XZNKLS0RE34QRGFEDFS + KLCDNFKVG038Q4OHINGF34.y - 1) / KLCDNFKVG038Q4OHINGF34.y; KSLJNV830Q49EZSDKLNP2Q3.z = (ZXMCKLNAESD2038EIWONVF + KLCDNFKVG038Q4OHINGF34.z - 1) / KLCDNFKVG038Q4OHINGF34.z; hipLaunchKernelGGL(( ________A), dim3(KSLJNV830Q49EZSDKLNP2Q3),dim3(KLCDNFKVG038Q4OHINGF34), 0, 0, texObj1,texObj2, thrust::raw_pointer_cast(&_____XCV[0]),make_double3(x0,y0,z0), thrust::raw_pointer_cast(&cossinZT[0]), thrust::raw_pointer_cast(&d_xds[0]), thrust::raw_pointer_cast(&d_yds[0]), thrust::raw_pointer_cast(&d_zds[0]), thrust::raw_pointer_cast(&d_bxds[0]), thrust::raw_pointer_cast(&d_byds[0]), thrust::raw_pointer_cast(&d_bzds[0]), make_double3(objCntIdxX, objCntIdxY,objCntIdxZ), dx, dz, CMNOI4BH3958BTUJ23, QAIWLRUW4W, CVNKLJFVN2390XZNKLS0RE34QRGFEDFS, ASD23MZNCDS23890I, ZXMCKLNAESD2038EIWONVF); thrust::copy(_____XCV.begin(),_____XCV.end(),h_____XCV); CUDA_SAFE_CALL(hipDestroyTextureObject(texObj1)); CUDA_SAFE_CALL(hipDestroyTextureObject(texObj2)); VNSDKJFN0834HSDFHKJSDHF(texObj1, d_VNNR03ASDQ234RA___umeArray1); VNSDKJFN0834HSDFHKJSDHF(texObj2, d_VNNR03ASDQ234RA___umeArray2); _____XCV.clear(); angs.clear(); zPos.clear(); d_xds.clear(); d_yds.clear(); d_zds.clear(); d_bxds.clear(); d_byds.clear(); d_bzds.clear(); delete[] bxds; delete[] byds; delete[] bzds; } __global__ void ZCNKLXDVN3084ORHINJK9Q304UTIGNFK( hipTextureObject_t VNNR03ASDQ234RA___Tex, float* proj, float3 s, float* d_xds, float* d_yds, float* d_zds, float3* cossinT, float3 objCntIdx, float dx, float dz, int CMNOI4BH3958BTUJ23, int QAIWLRUW4W, int CVNKLJFVN2390XZNKLS0RE34QRGFEDFS, int ASD23MZNCDS23890I, int ZXMCKLNAESD2038EIWONVF) { int detIdV = threadIdx.x + blockIdx.x * blockDim.x; int detIdU = threadIdx.y + blockIdx.y * blockDim.y; int angIdx = threadIdx.z + blockIdx.z * blockDim.z; if(detIdV < ASD23MZNCDS23890I && detIdU < CVNKLJFVN2390XZNKLS0RE34QRGFEDFS && angIdx < ZXMCKLNAESD2038EIWONVF) { float3 cossin = cossinT[angIdx]; float3 cursour = make_float3( s.x * cossin.x - s.y * cossin.y, s.x * cossin.y + s.y * cossin.x, s.z + cossin.z); float summ = d_xds[detIdU]; float obj = d_yds[detIdU]; float idx = d_zds[detIdV]; float3 curDet = make_float3( summ * cossin.x - obj * cossin.y, summ * cossin.y + obj * cossin.x, idx + cossin.z); float3 dir = normalize(curDet - cursour); summ = 0; obj = 0; float idxZ; if(fabsf(cossin.x) <= fabsf(cossin.y)) { summ = 0; for(int ii = 0; ii < CMNOI4BH3958BTUJ23; ++ii) { obj = (ii - objCntIdx.x) * dx; idx = (obj - curDet.x) / dir.x * dir.y + curDet.y; idxZ = (obj - curDet.x) / dir.x * dir.z + curDet.z; idx = idx / dx + objCntIdx.y + 0.5; idxZ = idxZ / dz + objCntIdx.z + 0.5; summ += tex3D<float>(VNNR03ASDQ234RA___Tex, idxZ, ii + 0.5f, idx); } __syncthreads(); proj[(angIdx * CVNKLJFVN2390XZNKLS0RE34QRGFEDFS + detIdU) * ASD23MZNCDS23890I + detIdV] = summ * dx / fabsf(dir.x); } else { summ = 0; for(int jj = 0; jj < QAIWLRUW4W; ++jj) { obj = (jj - objCntIdx.y) * dx; idx = (obj - curDet.y) / dir.y * dir.x + curDet.x; idxZ = (obj - curDet.y) / dir.y * dir.z + curDet.z; idx = idx / dx + objCntIdx.x + 0.5; idxZ = idxZ / dz + objCntIdx.z + 0.5; summ += tex3D<float>(VNNR03ASDQ234RA___Tex, idxZ, idx, jj + 0.5f); } __syncthreads(); proj[(angIdx * CVNKLJFVN2390XZNKLS0RE34QRGFEDFS + detIdU) * ASD23MZNCDS23890I + detIdV] = summ * dx / fabsf(dir.y); } } } void ______XCKMVN840WNJK9Q02HJIFKS( float x0, float y0, float z0, int CVNKLJFVN2390XZNKLS0RE34QRGFEDFS, int ASD23MZNCDS23890I, float* xds, float* yds, float* zds, float imgXCenter, float imgYCenter, float imgZCenter, float* hangs, float* hzPos, int ZXMCKLNAESD2038EIWONVF, int CMNOI4BH3958BTUJ23, int QAIWLRUW4W, int VMN0348HGRO2, float* hVNNR03ASDQ234RA___, float* h_____XCV, float dx, float dz, byte* mask, int gpunum) { for(int ii = 0; ii != CMNOI4BH3958BTUJ23 * QAIWLRUW4W; ++ii) { byte v = mask[ii]; for(int jj = 0; jj != VMN0348HGRO2; ++jj) { hVNNR03ASDQ234RA___[ii * VMN0348HGRO2 + jj] = hVNNR03ASDQ234RA___[ii * VMN0348HGRO2 + jj] * v; } } float* bxds = new float[CVNKLJFVN2390XZNKLS0RE34QRGFEDFS + 1]; float* byds = new float[CVNKLJFVN2390XZNKLS0RE34QRGFEDFS + 1]; float* bzds = new float[ASD23MZNCDS23890I + 1]; DD3Boundaries(CVNKLJFVN2390XZNKLS0RE34QRGFEDFS + 1, xds, bxds); DD3Boundaries(CVNKLJFVN2390XZNKLS0RE34QRGFEDFS + 1, yds, byds); DD3Boundaries(ASD23MZNCDS23890I + 1, zds, bzds); CUDA_SAFE_CALL(hipSetDevice(gpunum)); CUDA_SAFE_CALL(hipDeviceReset()); const int TOTVN = CMNOI4BH3958BTUJ23 * QAIWLRUW4W * VMN0348HGRO2; float objCntIdxX = (CMNOI4BH3958BTUJ23 - 1.0) * 0.5 - imgXCenter / dx; float objCntIdxY = (QAIWLRUW4W - 1.0) * 0.5 - imgYCenter / dx; float objCntIdxZ = (VMN0348HGRO2 - 1.0) * 0.5 - imgZCenter / dz; d_vec_t VNNR03ASDQ234RA___(hVNNR03ASDQ234RA___, hVNNR03ASDQ234RA___ + TOTVN); hipTextureObject_t texObj; hipArray* d_VNNR03ASDQ234RA___umeArray = nullptr; XCNVXCVA4ERHN3840EIOGNKFNASDFLJ<float>(texObj,d_VNNR03ASDQ234RA___umeArray, VMN0348HGRO2,CMNOI4BH3958BTUJ23,QAIWLRUW4W, thrust::raw_pointer_cast(&VNNR03ASDQ234RA___[0]), hipMemcpyDeviceToDevice, hipAddressModeBorder, hipFilterModeLinear, hipReadModeElementType,false); d_vec_t _____XCV(CVNKLJFVN2390XZNKLS0RE34QRGFEDFS * ASD23MZNCDS23890I * ZXMCKLNAESD2038EIWONVF, 0); d_vec_t angs(hangs,hangs + ZXMCKLNAESD2038EIWONVF); d_vec_t zPos(hzPos, hzPos + ZXMCKLNAESD2038EIWONVF); d_vec_t d_xds(xds, xds + CVNKLJFVN2390XZNKLS0RE34QRGFEDFS); d_vec_t d_yds(yds, yds + CVNKLJFVN2390XZNKLS0RE34QRGFEDFS); d_vec_t d_zds(zds, zds + ASD23MZNCDS23890I); thrust::device_vector<float3> cossinZT(ZXMCKLNAESD2038EIWONVF); thrust::transform( thrust::make_zip_iterator(thrust::make_tuple(angs.begin(), zPos.begin())), thrust::make_zip_iterator(thrust::make_tuple(angs.end(), zPos.end())), cossinZT.begin(),CTMBIR::ConstantForBackProjection<float>(x0,y0,z0)); dim3 KLCDNFKVG038Q4OHINGF34(64,16,1); dim3 KSLJNV830Q49EZSDKLNP2Q3((ASD23MZNCDS23890I + KLCDNFKVG038Q4OHINGF34.x - 1) / KLCDNFKVG038Q4OHINGF34.x, (CVNKLJFVN2390XZNKLS0RE34QRGFEDFS + KLCDNFKVG038Q4OHINGF34.y - 1) / KLCDNFKVG038Q4OHINGF34.y, (ZXMCKLNAESD2038EIWONVF + KLCDNFKVG038Q4OHINGF34.z - 1) / KLCDNFKVG038Q4OHINGF34.z); hipLaunchKernelGGL(( ZCNKLXDVN3084ORHINJK9Q304UTIGNFK), dim3(KSLJNV830Q49EZSDKLNP2Q3),dim3(KLCDNFKVG038Q4OHINGF34), 0, 0, texObj,thrust::raw_pointer_cast(&_____XCV[0]), make_float3(x0,y0,z0), thrust::raw_pointer_cast(&d_xds[0]), thrust::raw_pointer_cast(&d_yds[0]), thrust::raw_pointer_cast(&d_zds[0]), thrust::raw_pointer_cast(&cossinZT[0]), make_float3(objCntIdxX,objCntIdxY,objCntIdxZ), dx, dz, CMNOI4BH3958BTUJ23, QAIWLRUW4W, CVNKLJFVN2390XZNKLS0RE34QRGFEDFS, ASD23MZNCDS23890I, ZXMCKLNAESD2038EIWONVF); thrust::copy(_____XCV.begin(),_____XCV.end(),h_____XCV); VNSDKJFN0834HSDFHKJSDHF(texObj,d_VNNR03ASDQ234RA___umeArray); _____XCV.clear(); angs.clear(); zPos.clear(); d_xds.clear(); d_yds.clear(); d_zds.clear(); cossinZT.clear(); delete[] bxds; delete[] byds; delete[] bzds; } __global__ void ______OPRGDMFKLBQ93ZXC893457( hipTextureObject_t VNNR03ASDQ234RA___Tex1, hipTextureObject_t VNNR03ASDQ234RA___Tex2, float* proj, float3 s, const float3* __restrict cossinZT, const float* __restrict xds, const float* __restrict yds, const float* __restrict zds, const float* __restrict bxds, const float* __restrict byds, const float* __restrict bzds, float3 objCntIdx, float dx, float dz, int CMNOI4BH3958BTUJ23, int QAIWLRUW4W, int CVNKLJFVN2390XZNKLS0RE34QRGFEDFS, int ASD23MZNCDS23890I, int ZXMCKLNAESD2038EIWONVF) { int detIdV = threadIdx.x + blockIdx.x * blockDim.x; int detIdU = threadIdx.y + blockIdx.y * blockDim.y; int angIdx = threadIdx.z + blockIdx.z * blockDim.z; __shared__ float _xds[KLZSDHNQ04JAIO]; __shared__ float _yds[KLZSDHNQ04JAIO]; _xds[threadIdx.y] = xds[detIdU]; _yds[threadIdx.y] = yds[detIdU]; __syncthreads(); if(detIdU < CVNKLJFVN2390XZNKLS0RE34QRGFEDFS && detIdV < ASD23MZNCDS23890I && angIdx < ZXMCKLNAESD2038EIWONVF) { float3 dir = cossinZT[angIdx]; float3 cursour = make_float3( s.x * dir.x - s.y * dir.y, s.x * dir.y + s.y * dir.x, s.z + dir.z); s = cossinZT[angIdx]; float summ = _xds[threadIdx.y] * s.x - _yds[threadIdx.y] * s.y; float obj = _xds[threadIdx.y] * s.y + _yds[threadIdx.y] * s.x; float realL = bxds[detIdU]; float realR = byds[detIdU]; float realU = bxds[detIdU + 1]; float realD = byds[detIdU + 1]; float2 curDetL = make_float2( realL * s.x - realR * s.y, realL * s.y + realR * s.x); float2 curDetR = make_float2( realU * s.x - realD * s.y, realU * s.y + realD * s.x); float4 curDet = make_float4(summ,obj,bzds[detIdV] + s.z,bzds[detIdV+1] + s.z); dir = normalize(make_float3( summ, obj, zds[detIdV] + s.z) - cursour); summ = 0; obj = 0; float intersectLength, intersectQWEIOQUWOISDKLJ23; float invdz = 1.0 / dz; float invdx = 1.0 / dx; float factL(1.0f); float factR(1.0f); float factU(1.0f); float factD(1.0f); float constVal = 0; if(fabsf(s.x) <= fabsf(s.y)) { summ = 0; factL = (curDetL.y - cursour.y) / (curDetL.x - cursour.x); factR = (curDetR.y - cursour.y) / (curDetR.x - cursour.x); factU = (curDet.w - cursour.z) / (curDet.x - cursour.x); factD = (curDet.z - cursour.z) / (curDet.x - cursour.x); constVal = dx * dx * dz / (fabsf(dir.x)); #pragma unroll for(int ii = 0; ii < CMNOI4BH3958BTUJ23; ii++) { obj = (ii - objCntIdx.x) * dx; realL = (obj - curDetL.x) * factL + curDetL.y; realR = (obj - curDetR.x) * factR + curDetR.y; realU = (obj - curDet.x) * factU + curDet.w; realD = (obj - curDet.x) * factD + curDet.z; intersectLength = realR - realL; intersectQWEIOQUWOISDKLJ23 = realU - realD; realD = realD * invdz + objCntIdx.z + 1; realR = realR * invdx + objCntIdx.y + 1; realU = realU * invdz + objCntIdx.z + 1; realL = realL * invdx + objCntIdx.y + 1; summ += (tex3D<float>(VNNR03ASDQ234RA___Tex2, realD, realL, ii + 0.5f) + tex3D<float>(VNNR03ASDQ234RA___Tex2, realU, realR, ii + 0.5f) - (tex3D<float>(VNNR03ASDQ234RA___Tex2, realD, realR, ii + 0.5f) + tex3D<float>(VNNR03ASDQ234RA___Tex2, realU, realL, ii + 0.5f)) ) / (intersectLength * intersectQWEIOQUWOISDKLJ23); } __syncthreads(); proj[(angIdx * CVNKLJFVN2390XZNKLS0RE34QRGFEDFS + detIdU) * ASD23MZNCDS23890I + detIdV] = summ * constVal; } else { summ = 0; factL = (curDetL.x - cursour.x) / (curDetL.y - cursour.y); factR = (curDetR.x - cursour.x) / (curDetR.y - cursour.y); factU = (curDet.w - cursour.z) / (curDet.y - cursour.y); factD = (curDet.z - cursour.z) / (curDet.y - cursour.y); constVal = dx * dx * dz / (fabsf(dir.y)); #pragma unroll for(int jj = 0; jj < QAIWLRUW4W; jj++) { obj = (jj - objCntIdx.y) * dx; realL = (obj - curDetL.y) * factL + curDetL.x; realR = (obj - curDetR.y) * factR + curDetR.x; realU = (obj - curDet.y) * factU + curDet.w; realD = (obj - curDet.y) * factD + curDet.z; intersectLength = realR - realL; intersectQWEIOQUWOISDKLJ23 = realU - realD; realD = realD * invdz + objCntIdx.z + 1; realR = realR * invdx + objCntIdx.x + 1; realU = realU * invdz + objCntIdx.z + 1; realL = realL * invdx + objCntIdx.x + 1; summ += (tex3D<float>(VNNR03ASDQ234RA___Tex1, realD,realL, jj + 0.5f) + tex3D<float>(VNNR03ASDQ234RA___Tex1, realU, realR, jj + 0.5f) - (tex3D<float>(VNNR03ASDQ234RA___Tex1, realD, realR, jj + 0.5f)+ tex3D<float>(VNNR03ASDQ234RA___Tex1, realU, realL, jj + 0.5f)) )/ (intersectLength * intersectQWEIOQUWOISDKLJ23); } __syncthreads(); proj[(angIdx * CVNKLJFVN2390XZNKLS0RE34QRGFEDFS + detIdU) * ASD23MZNCDS23890I + detIdV] = summ * constVal; } } } void DD3_gpu_proj_branchless_sat2d( float x0, float y0, float z0, int CVNKLJFVN2390XZNKLS0RE34QRGFEDFS, int ASD23MZNCDS23890I, float* xds, float* yds, float* zds, float imgXCenter, float imgYCenter, float imgZCenter, float* hangs, float* hzPos, int ZXMCKLNAESD2038EIWONVF, int CMNOI4BH3958BTUJ23, int QAIWLRUW4W, int VMN0348HGRO2, float* VNNR03ASDQ234RA___, float* h_____XCV, float dx, float dz, byte* mask, int gpunum) { for(int i = 0; i != CMNOI4BH3958BTUJ23 * QAIWLRUW4W; ++i) { byte v = mask[i]; for(int z = 0; z != VMN0348HGRO2; ++z) { VNNR03ASDQ234RA___[i * VMN0348HGRO2 + z] = VNNR03ASDQ234RA___[i * VMN0348HGRO2 + z] * v; } } CUDA_SAFE_CALL(hipSetDevice(gpunum)); CUDA_SAFE_CALL(hipDeviceReset()); float* bxds = new float[CVNKLJFVN2390XZNKLS0RE34QRGFEDFS + 1]; float* byds = new float[CVNKLJFVN2390XZNKLS0RE34QRGFEDFS + 1]; float* bzds = new float[ASD23MZNCDS23890I + 1]; DD3Boundaries(CVNKLJFVN2390XZNKLS0RE34QRGFEDFS + 1, xds, bxds); DD3Boundaries(CVNKLJFVN2390XZNKLS0RE34QRGFEDFS + 1, yds, byds); DD3Boundaries(ASD23MZNCDS23890I + 1, zds, bzds); hipStream_t streams[4]; CUDA_SAFE_CALL(hipStreamCreate(&streams[0])); CUDA_SAFE_CALL(hipStreamCreate(&streams[1])); CUDA_SAFE_CALL(hipStreamCreate(&streams[2])); CUDA_SAFE_CALL(hipStreamCreate(&streams[3])); float objCntIdxX = (CMNOI4BH3958BTUJ23 - 1.0) * 0.5 - imgXCenter / dx; float objCntIdxY = (QAIWLRUW4W - 1.0) * 0.5 - imgYCenter / dx; float objCntIdxZ = (VMN0348HGRO2 - 1.0) * 0.5 - imgZCenter / dz; thrust::device_vector<float> SATZXY; thrust::device_vector<float> SATZYX; ______XCNV9340KL9340KFDL(VNNR03ASDQ234RA___,SATZXY,SATZYX, CMNOI4BH3958BTUJ23, QAIWLRUW4W, VMN0348HGRO2); hipTextureObject_t texObj1; hipArray* d_VNNR03ASDQ234RA___umeArray1 = nullptr; hipTextureObject_t texObj2; hipArray* d_VNNR03ASDQ234RA___umeArray2 = nullptr; XCNVXCVA4ERHN3840EIOGNKFNASDFLJ<float>(texObj1,d_VNNR03ASDQ234RA___umeArray1, VMN0348HGRO2 + 1,CMNOI4BH3958BTUJ23 + 1,QAIWLRUW4W, thrust::raw_pointer_cast(&SATZXY[0]), hipMemcpyDeviceToDevice, hipAddressModeClamp, hipFilterModeLinear, hipReadModeElementType,false); SATZXY.clear(); XCNVXCVA4ERHN3840EIOGNKFNASDFLJ<float>(texObj2,d_VNNR03ASDQ234RA___umeArray2, VMN0348HGRO2 + 1,QAIWLRUW4W + 1,CMNOI4BH3958BTUJ23, thrust::raw_pointer_cast(&SATZYX[0]), hipMemcpyDeviceToDevice, hipAddressModeClamp, hipFilterModeLinear, hipReadModeElementType,false); SATZYX.clear(); thrust::device_vector<float> _____XCV(CVNKLJFVN2390XZNKLS0RE34QRGFEDFS * ASD23MZNCDS23890I * ZXMCKLNAESD2038EIWONVF, 0); thrust::device_vector<float> d_xds(xds, xds + CVNKLJFVN2390XZNKLS0RE34QRGFEDFS); thrust::device_vector<float> d_yds(yds, yds + CVNKLJFVN2390XZNKLS0RE34QRGFEDFS); thrust::device_vector<float> d_zds(zds, zds + ASD23MZNCDS23890I); thrust::device_vector<float> d_bxds(bxds, bxds + CVNKLJFVN2390XZNKLS0RE34QRGFEDFS + 1); thrust::device_vector<float> d_byds(byds, byds + CVNKLJFVN2390XZNKLS0RE34QRGFEDFS + 1); thrust::device_vector<float> d_bzds(bzds, bzds + ASD23MZNCDS23890I + 1); thrust::device_vector<float> angs(hangs, hangs + ZXMCKLNAESD2038EIWONVF); thrust::device_vector<float> zPos(hzPos, hzPos + ZXMCKLNAESD2038EIWONVF); thrust::device_vector<float3> cossinZT(ZXMCKLNAESD2038EIWONVF); thrust::transform( thrust::make_zip_iterator(thrust::make_tuple(angs.begin(), zPos.begin())), thrust::make_zip_iterator(thrust::make_tuple(angs.end(), zPos.end())), cossinZT.begin(),CTMBIR::ConstantForBackProjection<float>(x0,y0,z0)); dim3 KLCDNFKVG038Q4OHINGF34; dim3 KSLJNV830Q49EZSDKLNP2Q3; KLCDNFKVG038Q4OHINGF34.x = VAMDSN3409UOJIFK; KLCDNFKVG038Q4OHINGF34.y = KLZSDHNQ04JAIO; KLCDNFKVG038Q4OHINGF34.z = SKLJDFNGHW9835GJK35TG; KSLJNV830Q49EZSDKLNP2Q3.x = (ASD23MZNCDS23890I + KLCDNFKVG038Q4OHINGF34.x - 1) / KLCDNFKVG038Q4OHINGF34.x; KSLJNV830Q49EZSDKLNP2Q3.y = (CVNKLJFVN2390XZNKLS0RE34QRGFEDFS + KLCDNFKVG038Q4OHINGF34.y - 1) / KLCDNFKVG038Q4OHINGF34.y; KSLJNV830Q49EZSDKLNP2Q3.z = (ZXMCKLNAESD2038EIWONVF + KLCDNFKVG038Q4OHINGF34.z - 1) / KLCDNFKVG038Q4OHINGF34.z; hipLaunchKernelGGL(( ______OPRGDMFKLBQ93ZXC893457), dim3(KSLJNV830Q49EZSDKLNP2Q3),dim3(KLCDNFKVG038Q4OHINGF34), 0, 0, texObj1,texObj2, thrust::raw_pointer_cast(&_____XCV[0]), make_float3(x0,y0,z0), thrust::raw_pointer_cast(&cossinZT[0]), thrust::raw_pointer_cast(&d_xds[0]), thrust::raw_pointer_cast(&d_yds[0]), thrust::raw_pointer_cast(&d_zds[0]), thrust::raw_pointer_cast(&d_bxds[0]), thrust::raw_pointer_cast(&d_byds[0]), thrust::raw_pointer_cast(&d_bzds[0]), make_float3(objCntIdxX, objCntIdxY,objCntIdxZ), dx, dz, CMNOI4BH3958BTUJ23, QAIWLRUW4W, CVNKLJFVN2390XZNKLS0RE34QRGFEDFS, ASD23MZNCDS23890I, ZXMCKLNAESD2038EIWONVF); thrust::copy(_____XCV.begin(),_____XCV.end(),h_____XCV); CUDA_SAFE_CALL(hipDestroyTextureObject(texObj1)); CUDA_SAFE_CALL(hipDestroyTextureObject(texObj2)); VNSDKJFN0834HSDFHKJSDHF(texObj1,d_VNNR03ASDQ234RA___umeArray1); VNSDKJFN0834HSDFHKJSDHF(texObj2,d_VNNR03ASDQ234RA___umeArray2); angs.clear(); zPos.clear(); _____XCV.clear(); d_xds.clear(); d_yds.clear(); d_zds.clear(); d_bxds.clear(); d_byds.clear(); d_bzds.clear(); cossinZT.clear(); delete[] bxds; delete[] byds; delete[] bzds; } template<typename T> void DD3Boundaries(int nrBoundaries, thrust::host_vector<T>& Centers, thrust::host_vector<T>& Boundaries) { int i; T* pBoundaries = &Boundaries[0]; T* pCenters = &Centers[0]; if (nrBoundaries >= 3) { *pBoundaries++ = 1.5 * *pCenters - 0.5 * *(pCenters + 1); for (i = 1; i <= (nrBoundaries - 2); i++) { *pBoundaries++ = 0.5 * *pCenters + 0.5 * *(pCenters + 1); pCenters++; } *pBoundaries = 1.5 * *pCenters - 0.5 * *(pCenters - 1); } else { *pBoundaries = *pCenters - 0.5; *(pBoundaries + 1) = *pCenters + 0.5; } } __global__ void DD3Proj_branches_ker(float* proj, const float* VNNR03ASDQ234RA___, float x0, float y0, float z0, const float* xds, const float* yds, const float* bxds, const float* byds, const float* bzds, float3* cossinZT, float objCntIdxX, float objCntIdxY, float objCntIdxZ, const int CMNOI4BH3958BTUJ23, const int QAIWLRUW4W, const int VMN0348HGRO2, const int CVNKLJFVN2390XZNKLS0RE34QRGFEDFS, const int ASD23MZNCDS23890I, const int ZXMCKLNAESD2038EIWONVF, float dx, float dz) { const int detIdV = threadIdx.x + blockIdx.x * blockDim.x; const int detIdU = threadIdx.y + blockIdx.y * blockDim.y; const int angIdx = threadIdx.z + blockIdx.z * blockDim.z; if (detIdU < CVNKLJFVN2390XZNKLS0RE34QRGFEDFS && detIdV < ASD23MZNCDS23890I && angIdx < ZXMCKLNAESD2038EIWONVF) { float cosT = cossinZT[angIdx].x; float sinT = cossinZT[angIdx].y; float zPos = cossinZT[angIdx].z; float3 cursour = make_float3(x0 * cosT - y0 * sinT, x0 * sinT + y0 * cosT, z0 + zPos); float summ = 0; float detPosLX = bxds[detIdU]; float detPosRX = bxds[detIdU + 1]; float detPosLY = byds[detIdU]; float detPosRY = byds[detIdU + 1]; float detPosDZ = bzds[detIdV]; float detPosUZ = detPosDZ + dz; float curDirLX = detPosLX * cosT - detPosLY * sinT - cursour.x; float curDirLY = detPosLX * sinT + detPosLY * cosT - cursour.y; float curDirRX = detPosRX * cosT - detPosRY * sinT - cursour.x; float curDirRY = detPosRX * sinT + detPosRY * cosT - cursour.y; float curDirDZ = detPosDZ + zPos - cursour.z; float curDirUZ = detPosUZ + zPos - cursour.z; float dirX = xds[detIdU] * cosT - yds[detIdU] * sinT - cursour.x; float dirY = xds[detIdU] * sinT + yds[detIdU] * cosT - cursour.y; float dirZ = detPosDZ - 0.5 * dz - z0; if (fabsf(dirX) < fabsf(dirY)) { detPosLX = -cursour.y * curDirLX / curDirLY + cursour.x; detPosRX = -cursour.y * curDirRX / curDirRY + cursour.x; detPosDZ = -cursour.y * curDirDZ / dirY + cursour.z; detPosUZ = -cursour.y * curDirUZ / dirY + cursour.z; dirZ = dx / fabsf(dirY / sqrtf(dirY * dirY + dirX * dirX + dirZ * dirZ) * (detPosLX - detPosRX) * (detPosUZ - detPosDZ)); if (detPosLX > detPosRX) { zPos = detPosLX; detPosLX = detPosRX; detPosRX = zPos; } for (int jj = 0; jj < QAIWLRUW4W; jj++) { y0 = (jj - objCntIdxY) * dx - cursour.y; zPos = y0 / curDirLY; cosT = zPos * curDirLX + cursour.x; sinT = zPos * curDirRX + cursour.x; x0 = zPos * curDirDZ + cursour.z; z0 = zPos * curDirUZ + cursour.z; if (cosT > sinT) { zPos = cosT; cosT = sinT; sinT = zPos; } int minXIdx = floor(cosT / dx + objCntIdxX) - 1; int maxXIdx = ceil(sinT / dx + objCntIdxX) + 1; int minZIdx = floor(x0 / dz + objCntIdxZ) - 1; int maxZIdx = ceil(z0 / dz + objCntIdxZ) + 1; if (maxXIdx < 0) { continue; } if (minXIdx > CMNOI4BH3958BTUJ23) { continue; } if (maxZIdx < 0) { continue; } if (minZIdx > VMN0348HGRO2) { continue; } if (minXIdx < 0) { minXIdx = 0; } if (maxXIdx > CMNOI4BH3958BTUJ23) { maxXIdx = CMNOI4BH3958BTUJ23; } if (minZIdx < 0) { minZIdx = 0; } if (maxZIdx > VMN0348HGRO2) { maxZIdx = VMN0348HGRO2; } cosT = (cursour.x - (minXIdx - objCntIdxX) * dx) * cursour.y / y0 + cursour.x; for (int ii = minXIdx; ii < maxXIdx; ii++) { sinT = cosT - dx * cursour.y / y0; dirX = intersectLength_device<float>(detPosLX, detPosRX, cosT, sinT); x0 = (cursour.z - (minZIdx - objCntIdxZ) * dz) * cursour.y / y0 + cursour.z; for (int kk = minZIdx; kk < maxZIdx; kk++) { z0 = x0 - dz * cursour.y / y0; dirY = intersectLength_device<float>(detPosDZ, detPosUZ, x0, z0); summ += VNNR03ASDQ234RA___[(jj * CMNOI4BH3958BTUJ23 + ii) * VMN0348HGRO2 + kk] * (dirX * dirY); x0 = z0; } cosT = sinT; } } proj[(angIdx * CVNKLJFVN2390XZNKLS0RE34QRGFEDFS + detIdU) * ASD23MZNCDS23890I + detIdV] = summ * dirZ; } else { detPosLY = -cursour.x * curDirLY / curDirLX + cursour.y; detPosRY = -cursour.x * curDirRY / curDirRX + cursour.y; detPosDZ = -cursour.x * curDirDZ / dirX + cursour.z; detPosUZ = -cursour.x * curDirUZ / dirX + cursour.z; dirZ = dx / fabsf(dirX / sqrtf(dirY * dirY + dirX * dirX + dirZ * dirZ) * (detPosLY - detPosRY) * (detPosUZ - detPosDZ)); if (detPosLY > detPosRY) { zPos = detPosLY; detPosLY = detPosRY; detPosRY = zPos; } for (int ii = 0; ii < CMNOI4BH3958BTUJ23; ii++) { x0 = (ii - objCntIdxX) * dx - cursour.x; zPos = x0 / curDirLX; cosT = zPos * curDirLY + cursour.y; sinT = zPos * curDirRY + cursour.y; y0 = zPos * curDirDZ + cursour.z; z0 = zPos * curDirUZ + cursour.z; if (cosT > sinT){ zPos = cosT; cosT = sinT; sinT = zPos; } int minYIdx = floor(cosT / dx + objCntIdxY) - 1; int maxYIdx = ceil(sinT / dx + objCntIdxY) + 1; int minZIdx = floor(y0 / dz + objCntIdxZ) - 1; int maxZIdx = ceil(z0 / dz + objCntIdxZ) + 1; if (maxYIdx < 0) { continue; } if (minYIdx > CMNOI4BH3958BTUJ23) { continue; } if (maxZIdx < 0) { continue; } if (minZIdx > VMN0348HGRO2) { continue; } if (minYIdx < 0) { minYIdx = 0; } if (maxYIdx > CMNOI4BH3958BTUJ23) { maxYIdx = QAIWLRUW4W; } if (minZIdx < 0) { minZIdx = 0; } if (maxZIdx > VMN0348HGRO2) { maxZIdx = VMN0348HGRO2; } cosT = (cursour.y - (minYIdx - objCntIdxY) * dx) * cursour.x / x0 + cursour.y; for (int jj = minYIdx; jj < maxYIdx; jj++) { sinT = cosT - dx * cursour.x / x0; dirX = intersectLength_device<float>(detPosLY, detPosRY, cosT, sinT); y0 = (cursour.z - (minZIdx - objCntIdxZ) * dz) * cursour.x / x0 + cursour.z; for (int kk = minZIdx; kk < maxZIdx; kk++) { z0 = y0 - dz * cursour.x / x0; dirY = intersectLength_device<float>(detPosDZ, detPosUZ, y0, z0); summ += VNNR03ASDQ234RA___[(jj * CMNOI4BH3958BTUJ23 + ii) * VMN0348HGRO2 + kk] * (dirX * dirY); y0 = z0; } cosT = sinT; } } proj[(angIdx * CVNKLJFVN2390XZNKLS0RE34QRGFEDFS + detIdU) * ASD23MZNCDS23890I + detIdV] = summ * dirZ; } } } void DD3Proj_branches(float x0, float y0, float z0, int CVNKLJFVN2390XZNKLS0RE34QRGFEDFS, int ASD23MZNCDS23890I, float* xds, float* yds, float* zds, float imgXCenter, float imgYCenter, float imgZCenter, float* hangs, float* hzPos, int ZXMCKLNAESD2038EIWONVF, int CMNOI4BH3958BTUJ23, int QAIWLRUW4W, int VMN0348HGRO2, float* hVNNR03ASDQ234RA___, float* h_____XCV, float dx, float dz, byte* mask, int gpunum) { hipSetDevice(gpunum); for (int jj = 0; jj != QAIWLRUW4W; ++jj) { for (int ii = 0; ii != CMNOI4BH3958BTUJ23; ++ii) { byte tempV = mask[jj * CMNOI4BH3958BTUJ23 + ii]; for (int kk = 0; kk != VMN0348HGRO2; ++kk) { hVNNR03ASDQ234RA___[(jj * CMNOI4BH3958BTUJ23 + ii) * VMN0348HGRO2 + kk] *= static_cast<float>(tempV); } } } thrust::host_vector<float3> hcossinZT(ZXMCKLNAESD2038EIWONVF); for (int i = 0; i != ZXMCKLNAESD2038EIWONVF; ++i) { hcossinZT[i].x = cosf(hangs[i]); hcossinZT[i].y = sinf(hangs[i]); hcossinZT[i].z = hzPos[i]; } thrust::device_vector<float3> cossinZT = hcossinZT; thrust::host_vector<float> hxds(xds, xds + CVNKLJFVN2390XZNKLS0RE34QRGFEDFS); thrust::host_vector<float> hyds(yds, yds + CVNKLJFVN2390XZNKLS0RE34QRGFEDFS); thrust::host_vector<float> hzds(zds, zds + ASD23MZNCDS23890I); thrust::host_vector<float> bxds(CVNKLJFVN2390XZNKLS0RE34QRGFEDFS + 1, 0); thrust::host_vector<float> byds(CVNKLJFVN2390XZNKLS0RE34QRGFEDFS + 1, 0); thrust::host_vector<float> bzds(ASD23MZNCDS23890I + 1, 0); DD3Boundaries<float>(CVNKLJFVN2390XZNKLS0RE34QRGFEDFS + 1, hxds, bxds); DD3Boundaries<float>(CVNKLJFVN2390XZNKLS0RE34QRGFEDFS + 1, hyds, byds); DD3Boundaries<float>(ASD23MZNCDS23890I + 1, hzds, bzds); thrust::device_vector<float> dxds = hxds; thrust::device_vector<float> dyds = hyds; thrust::device_vector<float> dzds = hzds; thrust::device_vector<float> dbxds = bxds; thrust::device_vector<float> dbyds = byds; thrust::device_vector<float> dbzds = bzds; thrust::device_vector<float> d_____XCV(h_____XCV, h_____XCV + CVNKLJFVN2390XZNKLS0RE34QRGFEDFS*ASD23MZNCDS23890I*ZXMCKLNAESD2038EIWONVF); thrust::device_vector<float> dVNNR03ASDQ234RA___(hVNNR03ASDQ234RA___, hVNNR03ASDQ234RA___ + CMNOI4BH3958BTUJ23*QAIWLRUW4W*VMN0348HGRO2); dim3 KLCDNFKVG038Q4OHINGF34(64, 4, 1); dim3 KSLJNV830Q49EZSDKLNP2Q3( (ASD23MZNCDS23890I + KLCDNFKVG038Q4OHINGF34.x - 1) / KLCDNFKVG038Q4OHINGF34.x, (CVNKLJFVN2390XZNKLS0RE34QRGFEDFS + KLCDNFKVG038Q4OHINGF34.y - 1) / KLCDNFKVG038Q4OHINGF34.y, (ZXMCKLNAESD2038EIWONVF + KLCDNFKVG038Q4OHINGF34.z - 1) / KLCDNFKVG038Q4OHINGF34.z); float objCntIdxX = (static_cast<float>(CMNOI4BH3958BTUJ23) - 1.0f) * 0.5f - imgXCenter / dx; float objCntIdxY = (static_cast<float>(QAIWLRUW4W) - 1.0f) * 0.5f - imgYCenter / dx; float objCntIdxZ = (static_cast<float>(VMN0348HGRO2) - 1.0f) * 0.5f - imgZCenter / dz; DD3Proj_branches_ker << <KSLJNV830Q49EZSDKLNP2Q3, KLCDNFKVG038Q4OHINGF34 >> >( thrust::raw_pointer_cast(&d_____XCV[0]), thrust::raw_pointer_cast(&dVNNR03ASDQ234RA___[0]), x0, y0, z0, thrust::raw_pointer_cast(&dxds[0]), thrust::raw_pointer_cast(&dyds[0]), thrust::raw_pointer_cast(&dbxds[0]), thrust::raw_pointer_cast(&dbyds[0]), thrust::raw_pointer_cast(&dbzds[0]), thrust::raw_pointer_cast(&cossinZT[0]), objCntIdxX, objCntIdxY, objCntIdxZ, CMNOI4BH3958BTUJ23, QAIWLRUW4W, VMN0348HGRO2, CVNKLJFVN2390XZNKLS0RE34QRGFEDFS, ASD23MZNCDS23890I, ZXMCKLNAESD2038EIWONVF, dx, dz); thrust::copy(d_____XCV.begin(), d_____XCV.end(), h_____XCV); hcossinZT.clear(); cossinZT.clear(); hxds.clear(); hyds.clear(); hzds.clear(); bxds.clear(); byds.clear(); bzds.clear(); dxds.clear(); dyds.clear(); dzds.clear(); dbxds.clear(); dbyds.clear(); dbzds.clear(); d_____XCV.clear(); dVNNR03ASDQ234RA___.clear(); } void DD3Proj_gpu( float x0, float y0, float z0, int CVNKLJFVN2390XZNKLS0RE34QRGFEDFS, int ASD23MZNCDS23890I, float* xds, float* yds, float* zds, float imgXCenter, float imgYCenter, float imgZCenter, float* hangs, float* hzPos, int ZXMCKLNAESD2038EIWONVF, int CMNOI4BH3958BTUJ23, int QAIWLRUW4W, int VMN0348HGRO2, float* hVNNR03ASDQ234RA___, float* h_____XCV, float dx, float dz, byte* mask, int gpunum, int _____XCVMode) { switch(_____XCVMode) { case 0: DD3_gpu_proj_branchless_sat2d(x0, y0, z0,CVNKLJFVN2390XZNKLS0RE34QRGFEDFS, ASD23MZNCDS23890I, xds,yds, zds, imgXCenter,imgYCenter, imgZCenter, hangs, hzPos, ZXMCKLNAESD2038EIWONVF, CMNOI4BH3958BTUJ23, QAIWLRUW4W, VMN0348HGRO2, hVNNR03ASDQ234RA___, h_____XCV, dx, dz, mask, 0); break; case 1: ______XCKMVN840WNJK9Q02HJIFKS(x0, y0, z0,CVNKLJFVN2390XZNKLS0RE34QRGFEDFS, ASD23MZNCDS23890I, xds,yds, zds, imgXCenter,imgYCenter, imgZCenter, hangs, hzPos, ZXMCKLNAESD2038EIWONVF, CMNOI4BH3958BTUJ23, QAIWLRUW4W, VMN0348HGRO2, hVNNR03ASDQ234RA___, h_____XCV, dx, dz, mask, 0); break; case 2: DD3_gpu_proj_doubleprecisionbranchless(x0, y0, z0,CVNKLJFVN2390XZNKLS0RE34QRGFEDFS, ASD23MZNCDS23890I, xds,yds, zds, imgXCenter,imgYCenter, imgZCenter, hangs, hzPos, ZXMCKLNAESD2038EIWONVF, CMNOI4BH3958BTUJ23, QAIWLRUW4W, VMN0348HGRO2, hVNNR03ASDQ234RA___, h_____XCV, dx, dz, mask, 0); break; case 3: break; case 4: DD3Proj_branches(x0, y0, z0, CVNKLJFVN2390XZNKLS0RE34QRGFEDFS, ASD23MZNCDS23890I, xds, yds, zds, imgXCenter, imgYCenter, imgZCenter, hangs, hzPos, ZXMCKLNAESD2038EIWONVF, CMNOI4BH3958BTUJ23, QAIWLRUW4W, VMN0348HGRO2, hVNNR03ASDQ234RA___, h_____XCV, dx, dz, mask, 0); break; default: DD3_gpu_proj_branchless_sat2d(x0, y0, z0,CVNKLJFVN2390XZNKLS0RE34QRGFEDFS, ASD23MZNCDS23890I, xds,yds, zds, imgXCenter,imgYCenter, imgZCenter, hangs, hzPos, ZXMCKLNAESD2038EIWONVF, CMNOI4BH3958BTUJ23, QAIWLRUW4W, VMN0348HGRO2, hVNNR03ASDQ234RA___, h_____XCV, dx, dz, mask, 0); break; } }
8c6b1df0462b0443196fa753e63b76a651c687a8.cu
#include "utilities.cuh" #include "DD3_GPU_proj.h" #define VAMDSN3409UOJIFK 32 #define KLZSDHNQ04JAIO 8 #define SKLJDFNGHW9835GJK35TG 1 template<typename VNKJH5IORTN, typename VNJEG9485BNIJLIHNW3828934> __global__ void NKCMVN934Q0UFK9340Q345(VNKJH5IORTN* MCVBKLNFGEO7UDNK92034, VNJEG9485BNIJLIHNW3828934* out_ZXY, VNJEG9485BNIJLIHNW3828934* VHNOREH9384HRGUAIQ121, int CMNOI4BH3958BTUJ23, int QAIWLRUW4W, int VMN0348HGRO2) { int idz = threadIdx.x + blockIdx.x * blockDim.x; int idx = threadIdx.y + blockIdx.y * blockDim.y; int idy = threadIdx.z + blockIdx.z * blockDim.z; if(idx < CMNOI4BH3958BTUJ23 && idy < QAIWLRUW4W && idz < VMN0348HGRO2) { int i = (idy * CMNOI4BH3958BTUJ23 + idx) * VMN0348HGRO2 + idz; int ni = (idy * (CMNOI4BH3958BTUJ23 + 1) + (idx + 1)) * (VMN0348HGRO2 + 1) + idz + 1; int nj = (idx * (QAIWLRUW4W + 1) + (idy + 1)) * (VMN0348HGRO2 + 1) + idz + 1; out_ZXY[ni] = MCVBKLNFGEO7UDNK92034[i]; VHNOREH9384HRGUAIQ121[nj] = MCVBKLNFGEO7UDNK92034[i]; } } template<typename VNKJH5IORTN, typename VNJEG9485BNIJLIHNW3828934> __global__ void CVNOU3H4409IJIFEASD12(VNKJH5IORTN* in, VNJEG9485BNIJLIHNW3828934* out, int N, int VMN0348HGRO2) { int zi = threadIdx.x + blockIdx.x * blockDim.x; if(zi < VMN0348HGRO2) { out[zi] = in[zi]; for(int i = 1;i<N;++i) { out[i * VMN0348HGRO2 + zi] = out[(i - 1) * VMN0348HGRO2 + zi] + in[i * VMN0348HGRO2 + zi]; } } } template<typename VNKJH5IORTN, typename VNJEG9485BNIJLIHNW3828934> __global__ void DSKLVN083Q4HIRKSKDLHNF89WHEF(VNKJH5IORTN* in, VNJEG9485BNIJLIHNW3828934* out, int N, int VMN0348HGRO2) { int xyi = threadIdx.x + blockIdx.x * blockDim.x; if(xyi < N) { out[xyi * VMN0348HGRO2] = in[xyi * VMN0348HGRO2]; for(int ii = 1; ii < VMN0348HGRO2; ++ii) { out[xyi * VMN0348HGRO2 + ii] = out[xyi * VMN0348HGRO2 + ii - 1] + in[xyi * VMN0348HGRO2 + ii]; } } } template<> __global__ void DSKLVN083Q4HIRKSKDLHNF89WHEF(double* in, int2* out, int N, int VMN0348HGRO2) { int xyi = threadIdx.x + blockIdx.x * blockDim.x; if(xyi < N) { double temp = in[xyi * VMN0348HGRO2]; out[xyi * VMN0348HGRO2] = make_int2(__double2loint(temp),__double2hiint(temp)); double temp2 = 0; for(int ii = 1; ii < VMN0348HGRO2; ++ii) { temp2 = temp + in[xyi * VMN0348HGRO2 + ii]; out[xyi * VMN0348HGRO2 + ii] = make_int2(__double2loint(temp2),__double2hiint(temp2)); temp = temp2; } } } template<typename T> __global__ void VNLJBN304IORNFK9023JOVKSLX(T* _____XCV, int VMN0348HGRO2, int N) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if(idx < N) { int CSVNKLDNFGDNFQ = idx * VMN0348HGRO2; for(int ii = 1; ii < VMN0348HGRO2; ++ii) { _____XCV[CSVNKLDNFGDNFQ + ii] = _____XCV[CSVNKLDNFGDNFQ + ii] + _____XCV[CSVNKLDNFGDNFQ + ii - 1]; } } } __global__ void VBDKL9304URGJFIDKAS12(float* _____XCV, int CVNKLJFVN2390XZNKLS0RE34QRGFEDFS, int ASD23MZNCDS23890I, int ZXMCKLNAESD2038EIWONVF) { int idv = threadIdx.x + blockIdx.x * blockDim.x; int pIdx = threadIdx.y + blockIdx.y * blockDim.y; if (idv < ASD23MZNCDS23890I && pIdx < ZXMCKLNAESD2038EIWONVF) { int ZXCVBNJASDFWasDF = pIdx * CVNKLJFVN2390XZNKLS0RE34QRGFEDFS * ASD23MZNCDS23890I + idv; for(int ii = 1; ii < CVNKLJFVN2390XZNKLS0RE34QRGFEDFS; ++ii) { _____XCV[ZXCVBNJASDFWasDF + ii * ASD23MZNCDS23890I] = _____XCV[ZXCVBNJASDFWasDF + ii * ASD23MZNCDS23890I] + _____XCV[ZXCVBNJASDFWasDF + (ii - 1) * ASD23MZNCDS23890I]; } } } void ______XCNV9340KL9340KFDL(float* hVNNR03ASDQ234RA___, thrust::device_vector<float>& ZXY, thrust::device_vector<float>& ZYX, int CMNOI4BH3958BTUJ23, int QAIWLRUW4W, int VMN0348HGRO2) { const int siz = CMNOI4BH3958BTUJ23 * QAIWLRUW4W * VMN0348HGRO2; const int nsiz_ZXY = (VMN0348HGRO2 + 1) * (CMNOI4BH3958BTUJ23 + 1) * QAIWLRUW4W; const int nsiz_ZYX = (VMN0348HGRO2 + 1) * (QAIWLRUW4W + 1) * CMNOI4BH3958BTUJ23; ZXY.resize(nsiz_ZXY); ZYX.resize(nsiz_ZYX); thrust::device_vector<float> VNNR03ASDQ234RA___(hVNNR03ASDQ234RA___,hVNNR03ASDQ234RA___ + siz); dim3 KLCDNFKVG038Q4OHINGF34(64,16,1); dim3 KSLJNV830Q49EZSDKLNP2Q3( (VMN0348HGRO2 + KLCDNFKVG038Q4OHINGF34.x - 1) / KLCDNFKVG038Q4OHINGF34.x, (CMNOI4BH3958BTUJ23 + KLCDNFKVG038Q4OHINGF34.y - 1) / KLCDNFKVG038Q4OHINGF34.y, (QAIWLRUW4W + KLCDNFKVG038Q4OHINGF34.z - 1) / KLCDNFKVG038Q4OHINGF34.z); NKCMVN934Q0UFK9340Q345<<<KSLJNV830Q49EZSDKLNP2Q3,KLCDNFKVG038Q4OHINGF34>>>(thrust::raw_pointer_cast(&VNNR03ASDQ234RA___[0]), thrust::raw_pointer_cast(&ZXY[0]), thrust::raw_pointer_cast(&ZYX[0]), CMNOI4BH3958BTUJ23, QAIWLRUW4W, VMN0348HGRO2); VNNR03ASDQ234RA___.clear(); const int nVMN0348HGRO2 = VMN0348HGRO2 + 1; const int nCMNOI4BH3958BTUJ23 = CMNOI4BH3958BTUJ23 + 1; const int nQAIWLRUW4W = QAIWLRUW4W + 1; KLCDNFKVG038Q4OHINGF34.x = 32; KLCDNFKVG038Q4OHINGF34.y = 1; KLCDNFKVG038Q4OHINGF34.z = 1; KSLJNV830Q49EZSDKLNP2Q3.x = (nCMNOI4BH3958BTUJ23 * QAIWLRUW4W + KLCDNFKVG038Q4OHINGF34.x - 1) / KLCDNFKVG038Q4OHINGF34.x; KSLJNV830Q49EZSDKLNP2Q3.y = 1; KSLJNV830Q49EZSDKLNP2Q3.z = 1; VNLJBN304IORNFK9023JOVKSLX<<<KSLJNV830Q49EZSDKLNP2Q3,KLCDNFKVG038Q4OHINGF34>>>(thrust::raw_pointer_cast(&ZXY[0]), nVMN0348HGRO2, nCMNOI4BH3958BTUJ23 * QAIWLRUW4W); KLCDNFKVG038Q4OHINGF34.x = 64; KLCDNFKVG038Q4OHINGF34.y = 16; KLCDNFKVG038Q4OHINGF34.z = 1; KSLJNV830Q49EZSDKLNP2Q3.x = (nVMN0348HGRO2 + KLCDNFKVG038Q4OHINGF34.x - 1) / KLCDNFKVG038Q4OHINGF34.x; KSLJNV830Q49EZSDKLNP2Q3.y = (QAIWLRUW4W + KLCDNFKVG038Q4OHINGF34.y - 1) / KLCDNFKVG038Q4OHINGF34.y; KSLJNV830Q49EZSDKLNP2Q3.z = 1; VBDKL9304URGJFIDKAS12<<<KSLJNV830Q49EZSDKLNP2Q3,KLCDNFKVG038Q4OHINGF34>>>(thrust::raw_pointer_cast(&ZXY[0]), nCMNOI4BH3958BTUJ23, nVMN0348HGRO2, QAIWLRUW4W); KLCDNFKVG038Q4OHINGF34.x = 32; KLCDNFKVG038Q4OHINGF34.y = 1; KLCDNFKVG038Q4OHINGF34.z = 1; KSLJNV830Q49EZSDKLNP2Q3.x = (nQAIWLRUW4W * CMNOI4BH3958BTUJ23 + KLCDNFKVG038Q4OHINGF34.x - 1) / KLCDNFKVG038Q4OHINGF34.x; KSLJNV830Q49EZSDKLNP2Q3.y = 1; KSLJNV830Q49EZSDKLNP2Q3.z = 1; VNLJBN304IORNFK9023JOVKSLX<<<KSLJNV830Q49EZSDKLNP2Q3,KLCDNFKVG038Q4OHINGF34>>>(thrust::raw_pointer_cast(&ZYX[0]), nVMN0348HGRO2, nQAIWLRUW4W * CMNOI4BH3958BTUJ23); KLCDNFKVG038Q4OHINGF34.x = 64; KLCDNFKVG038Q4OHINGF34.y = 16; KLCDNFKVG038Q4OHINGF34.z = 1; KSLJNV830Q49EZSDKLNP2Q3.x = (nVMN0348HGRO2 + KLCDNFKVG038Q4OHINGF34.x - 1) / KLCDNFKVG038Q4OHINGF34.x; KSLJNV830Q49EZSDKLNP2Q3.y = (CMNOI4BH3958BTUJ23 + KLCDNFKVG038Q4OHINGF34.y - 1) / KLCDNFKVG038Q4OHINGF34.y; KSLJNV830Q49EZSDKLNP2Q3.z = 1; VBDKL9304URGJFIDKAS12<<<KSLJNV830Q49EZSDKLNP2Q3,KLCDNFKVG038Q4OHINGF34>>>(thrust::raw_pointer_cast(&ZYX[0]), nQAIWLRUW4W, nVMN0348HGRO2, CMNOI4BH3958BTUJ23); } template<typename T> void XCNVXCVA4ERHN3840EIOGNKFNASDFLJ( cudaTextureObject_t& texObj, cudaArray* d______XCVArray, int VNIRONV84WOHIDNSASDKL8934, int QWEIOQUWOISDKLJ23, int BCVMNREJDFK42W35, T* SDKLHFW9024U3TRJPIF, cudaMemcpyKind VW34, cudaTextureAddressMode addressMode, cudaTextureFilterMode textureFilterMode, cudaTextureReadMode textureReadMode, bool isNormalized) { cudaExtent _____XCVSize; _____XCVSize.width = VNIRONV84WOHIDNSASDKL8934; _____XCVSize.height = QWEIOQUWOISDKLJ23; _____XCVSize.depth = BCVMNREJDFK42W35; cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<T>(); cudaMalloc3DArray(&d______XCVArray, &channelDesc, _____XCVSize); cudaMemcpy3DParms copyParams = {0}; copyParams.srcPtr = make_cudaPitchedPtr( (void*)SDKLHFW9024U3TRJPIF, _____XCVSize.width * sizeof(T), _____XCVSize.width, _____XCVSize.height); copyParams.dstArray = d______XCVArray; copyParams.extent = _____XCVSize; copyParams.kind = VW34; cudaMemcpy3D(&copyParams); cudaResourceDesc resDesc; memset(&resDesc, 0, sizeof(resDesc)); resDesc.resType = cudaResourceTypeArray; resDesc.res.array.array = d______XCVArray; cudaTextureDesc texDesc; memset(&texDesc, 0, sizeof(texDesc)); texDesc.addressMode[0] = addressMode; texDesc.addressMode[1] = addressMode; texDesc.addressMode[2] = addressMode; texDesc.filterMode = textureFilterMode; texDesc.readMode = textureReadMode; texDesc.normalizedCoords = isNormalized; CUDA_SAFE_CALL(cudaCreateTextureObject(&texObj, &resDesc, &texDesc,nullptr)); } void VNSDKJFN0834HSDFHKJSDHF(cudaTextureObject_t& texObj, cudaArray* d_array) { cudaDestroyTextureObject(texObj); cudaFreeArray(d_array); } __global__ void ________A( cudaTextureObject_t VNNR03ASDQ234RA___Tex1, cudaTextureObject_t VNNR03ASDQ234RA___Tex2, double* proj, double3 s, const double3* __restrict cossinZT, const double* __restrict xds, const double* __restrict yds, const double* __restrict zds, const double* __restrict bxds, const double* __restrict byds, const double* __restrict bzds, double3 objCntIdx, double dx, double dz, int CMNOI4BH3958BTUJ23, int QAIWLRUW4W, int CVNKLJFVN2390XZNKLS0RE34QRGFEDFS, int ASD23MZNCDS23890I, int ZXMCKLNAESD2038EIWONVF) { int detIdV = threadIdx.x + blockIdx.x * blockDim.x; int detIdU = threadIdx.y + blockIdx.y * blockDim.y; int angIdx = threadIdx.z + blockIdx.z * blockDim.z; __shared__ double _xds[KLZSDHNQ04JAIO]; __shared__ double _yds[KLZSDHNQ04JAIO]; _xds[threadIdx.y] = xds[detIdU]; _yds[threadIdx.y] = yds[detIdU]; __syncthreads(); if(detIdU < CVNKLJFVN2390XZNKLS0RE34QRGFEDFS && detIdV < ASD23MZNCDS23890I && angIdx < ZXMCKLNAESD2038EIWONVF) { double3 dir = cossinZT[angIdx]; double3 cursour = make_double3( s.x * dir.x - s.y * dir.y, s.x * dir.y + s.y * dir.x, s.z + dir.z); s = cossinZT[angIdx]; double summ = _xds[threadIdx.y] * s.x - _yds[threadIdx.y] * s.y; double obj = _xds[threadIdx.y] * s.y + _yds[threadIdx.y] * s.x; double realL = bxds[detIdU]; double realR = byds[detIdU]; double realU = bxds[detIdU + 1]; double realD = byds[detIdU + 1]; double2 curDetL = make_double2( realL * s.x - realR * s.y, realL * s.y + realR * s.x); double2 curDetR = make_double2( realU * s.x - realD * s.y, realU * s.y + realD * s.x); double4 curDet = make_double4(summ,obj,bzds[detIdV] + s.z,bzds[detIdV+1] + s.z); dir = normalize(make_double3( summ, obj, zds[detIdV] + s.z) - cursour); summ = 0; obj = 0; double intersectLength, intersectQWEIOQUWOISDKLJ23; double invdz = 1.0 / dz; double invdx = 1.0 / dx; double factL(1.0f); double factR(1.0f); double factU(1.0f); double factD(1.0f); double constVal = 0; int crealD, crealR, crealU, crealL; int frealD, frealR, frealU, frealL; if(abs(s.x) <= abs(s.y)) { summ = 0; factL = (curDetL.y - cursour.y) / (curDetL.x - cursour.x); factR = (curDetR.y - cursour.y) / (curDetR.x - cursour.x); factU = (curDet.w - cursour.z) / (curDet.x - cursour.x); factD = (curDet.z - cursour.z) / (curDet.x - cursour.x); constVal = dx * dx * dz / (abs(dir.x)); #pragma unroll for(int ii = 0; ii < CMNOI4BH3958BTUJ23; ii++) { obj = (ii - objCntIdx.x) * dx; realL = (obj - curDetL.x) * factL + curDetL.y; realR = (obj - curDetR.x) * factR + curDetR.y; realU = (obj - curDet.x) * factU + curDet.w; realD = (obj - curDet.x) * factD + curDet.z; intersectLength = realR - realL; intersectQWEIOQUWOISDKLJ23 = realU - realD; realD = realD * invdz + objCntIdx.z + 1; realR = realR * invdx + objCntIdx.y + 1; realU = realU * invdz + objCntIdx.z + 1; realL = realL * invdx + objCntIdx.y + 1; crealD = ceil(realD); crealR = ceil(realR); crealU = ceil(realU); crealL = ceil(realL); frealD = floor(realD); frealR = floor(realR); frealU = floor(realU); frealL = floor(realL); summ += (bilerp( tex3D<int2>(VNNR03ASDQ234RA___Tex2, frealD, frealL, ii + 0.5), tex3D<int2>(VNNR03ASDQ234RA___Tex2, frealD, crealL, ii + 0.5), tex3D<int2>(VNNR03ASDQ234RA___Tex2, crealD, frealL, ii + 0.5), tex3D<int2>(VNNR03ASDQ234RA___Tex2, crealD, crealL, ii + 0.5), realL - frealL,realD - frealD) + bilerp( tex3D<int2>(VNNR03ASDQ234RA___Tex2, frealU, frealR, ii + 0.5), tex3D<int2>(VNNR03ASDQ234RA___Tex2, frealU, crealR, ii + 0.5), tex3D<int2>(VNNR03ASDQ234RA___Tex2, crealU, frealR, ii + 0.5), tex3D<int2>(VNNR03ASDQ234RA___Tex2, crealU, crealR, ii + 0.5), realR - frealR,realU - frealU) - bilerp( tex3D<int2>(VNNR03ASDQ234RA___Tex2, frealD, frealR, ii + 0.5), tex3D<int2>(VNNR03ASDQ234RA___Tex2, frealD, crealR, ii + 0.5), tex3D<int2>(VNNR03ASDQ234RA___Tex2, crealD, frealR, ii + 0.5), tex3D<int2>(VNNR03ASDQ234RA___Tex2, crealD, crealR, ii + 0.5), realR - frealR,realD - frealD) - bilerp( tex3D<int2>(VNNR03ASDQ234RA___Tex2, frealU, frealL, ii + 0.5), tex3D<int2>(VNNR03ASDQ234RA___Tex2, frealU, crealL, ii + 0.5), tex3D<int2>(VNNR03ASDQ234RA___Tex2, crealU, frealL, ii + 0.5), tex3D<int2>(VNNR03ASDQ234RA___Tex2, crealU, crealL, ii + 0.5), realL - frealL,realU - frealU))/ (intersectLength * intersectQWEIOQUWOISDKLJ23); } __syncthreads(); proj[(angIdx * CVNKLJFVN2390XZNKLS0RE34QRGFEDFS + detIdU) * ASD23MZNCDS23890I + detIdV] = summ * constVal; } else { summ = 0; factL = (curDetL.x - cursour.x) / (curDetL.y - cursour.y); factR = (curDetR.x - cursour.x) / (curDetR.y - cursour.y); factU = (curDet.w - cursour.z) / (curDet.y - cursour.y); factD = (curDet.z - cursour.z) / (curDet.y - cursour.y); constVal = dx * dx * dz / (abs(dir.y)); #pragma unroll for(int jj = 0; jj < QAIWLRUW4W; jj++) { obj = (jj - objCntIdx.y) * dx; realL = (obj - curDetL.y) * factL + curDetL.x; realR = (obj - curDetR.y) * factR + curDetR.x; realU = (obj - curDet.y) * factU + curDet.w; realD = (obj - curDet.y) * factD + curDet.z; intersectLength = realR - realL; intersectQWEIOQUWOISDKLJ23 = realU - realD; realD = realD * invdz + objCntIdx.z + 1; realR = realR * invdx + objCntIdx.x + 1; realU = realU * invdz + objCntIdx.z + 1; realL = realL * invdx + objCntIdx.x + 1; crealD = ceil(realD); crealR = ceil(realR); crealU = ceil(realU); crealL = ceil(realL); frealD = floor(realD); frealR = floor(realR); frealU = floor(realU); frealL = floor(realL); summ += (bilerp( tex3D<int2>(VNNR03ASDQ234RA___Tex1, frealD, frealL, jj + 0.5), tex3D<int2>(VNNR03ASDQ234RA___Tex1, frealD, crealL, jj + 0.5), tex3D<int2>(VNNR03ASDQ234RA___Tex1, crealD, frealL, jj + 0.5), tex3D<int2>(VNNR03ASDQ234RA___Tex1, crealD, crealL, jj + 0.5), realL - frealL,realD - frealD) + bilerp( tex3D<int2>(VNNR03ASDQ234RA___Tex1, frealU, frealR, jj + 0.5), tex3D<int2>(VNNR03ASDQ234RA___Tex1, frealU, crealR, jj + 0.5), tex3D<int2>(VNNR03ASDQ234RA___Tex1, crealU, frealR, jj + 0.5), tex3D<int2>(VNNR03ASDQ234RA___Tex1, crealU, crealR, jj + 0.5), realR - frealR,realU - frealU) - bilerp( tex3D<int2>(VNNR03ASDQ234RA___Tex1, frealD, frealR, jj + 0.5), tex3D<int2>(VNNR03ASDQ234RA___Tex1, frealD, crealR, jj + 0.5), tex3D<int2>(VNNR03ASDQ234RA___Tex1, crealD, frealR, jj + 0.5), tex3D<int2>(VNNR03ASDQ234RA___Tex1, crealD, crealR, jj + 0.5), realR - frealR,realD - frealD) - bilerp( tex3D<int2>(VNNR03ASDQ234RA___Tex1, frealU, frealL, jj + 0.5), tex3D<int2>(VNNR03ASDQ234RA___Tex1, frealU, crealL, jj + 0.5), tex3D<int2>(VNNR03ASDQ234RA___Tex1, crealU, frealL, jj + 0.5), tex3D<int2>(VNNR03ASDQ234RA___Tex1, crealU, crealL, jj + 0.5), realL - frealL,realU - frealU)) / (intersectLength * intersectQWEIOQUWOISDKLJ23); } __syncthreads(); proj[(angIdx * CVNKLJFVN2390XZNKLS0RE34QRGFEDFS + detIdU) * ASD23MZNCDS23890I + detIdV] = summ * constVal; } } } void DD3_gpu_proj_doubleprecisionbranchless( float x0, float y0, float z0, int CVNKLJFVN2390XZNKLS0RE34QRGFEDFS, int ASD23MZNCDS23890I, float* xds, float* yds, float* zds, float imgXCenter, float imgYCenter, float imgZCenter, float* hangs, float* hzPos, int ZXMCKLNAESD2038EIWONVF, int CMNOI4BH3958BTUJ23, int QAIWLRUW4W, int VMN0348HGRO2, float* VNNR03ASDQ234RA___, float* h_____XCV, float dx, float dz, byte* mask, int gpunum) { for(int ii = 0; ii != CMNOI4BH3958BTUJ23 * QAIWLRUW4W; ++ii) { byte v = mask[ii]; for(int jj = 0; jj != VMN0348HGRO2; ++jj) { VNNR03ASDQ234RA___[ii * VMN0348HGRO2 + jj] = VNNR03ASDQ234RA___[ii * VMN0348HGRO2 + jj] * v; } } float* bxds = new float[CVNKLJFVN2390XZNKLS0RE34QRGFEDFS + 1]; float* byds = new float[CVNKLJFVN2390XZNKLS0RE34QRGFEDFS + 1]; float* bzds = new float[ASD23MZNCDS23890I + 1]; DD3Boundaries(CVNKLJFVN2390XZNKLS0RE34QRGFEDFS + 1, xds, bxds); DD3Boundaries(CVNKLJFVN2390XZNKLS0RE34QRGFEDFS + 1, yds, byds); DD3Boundaries(ASD23MZNCDS23890I + 1, zds, bzds); CUDA_SAFE_CALL(cudaSetDevice(gpunum)); CUDA_SAFE_CALL(cudaDeviceReset()); cudaStream_t streams[4]; CUDA_SAFE_CALL(cudaStreamCreate(&streams[0])); CUDA_SAFE_CALL(cudaStreamCreate(&streams[1])); CUDA_SAFE_CALL(cudaStreamCreate(&streams[2])); CUDA_SAFE_CALL(cudaStreamCreate(&streams[3])); int TOTVN = CMNOI4BH3958BTUJ23 * QAIWLRUW4W * VMN0348HGRO2; double objCntIdxX = (CMNOI4BH3958BTUJ23 - 1.0) * 0.5 - imgXCenter / dx; double objCntIdxY = (QAIWLRUW4W - 1.0) * 0.5 - imgYCenter / dx; double objCntIdxZ = (VMN0348HGRO2 - 1.0) * 0.5 - imgZCenter / dz; thrust::device_vector<float> in(VNNR03ASDQ234RA___, VNNR03ASDQ234RA___ + TOTVN); thrust::device_vector<double> MCVBKLNFGEO7UDNK92034((VMN0348HGRO2 + 1) * (CMNOI4BH3958BTUJ23 + 1) * QAIWLRUW4W, 0); thrust::device_vector<double> in_ZYX((VMN0348HGRO2 + 1) * (QAIWLRUW4W + 1) * CMNOI4BH3958BTUJ23, 0); dim3 KLCDNFKVG038Q4OHINGF34(64,16,1); dim3 KSLJNV830Q49EZSDKLNP2Q3( (VMN0348HGRO2 + KLCDNFKVG038Q4OHINGF34.x - 1) / KLCDNFKVG038Q4OHINGF34.x, (CMNOI4BH3958BTUJ23 + KLCDNFKVG038Q4OHINGF34.y - 1) / KLCDNFKVG038Q4OHINGF34.y, (QAIWLRUW4W + KLCDNFKVG038Q4OHINGF34.z - 1) / KLCDNFKVG038Q4OHINGF34.z); NKCMVN934Q0UFK9340Q345<float,double><<<KSLJNV830Q49EZSDKLNP2Q3,KLCDNFKVG038Q4OHINGF34>>>( thrust::raw_pointer_cast(&in[0]), thrust::raw_pointer_cast(&MCVBKLNFGEO7UDNK92034[0]), thrust::raw_pointer_cast(&in_ZYX[0]), CMNOI4BH3958BTUJ23, QAIWLRUW4W, VMN0348HGRO2); in.clear(); thrust::device_vector<double> MCVBKLNFGEO7UDNK92034_summ1((VMN0348HGRO2 + 1) * (CMNOI4BH3958BTUJ23 + 1) * QAIWLRUW4W,0); thrust::device_vector<int2> MCVBKLNFGEO7UDNK92034_summ((VMN0348HGRO2 + 1) * (CMNOI4BH3958BTUJ23 + 1) * QAIWLRUW4W); KLCDNFKVG038Q4OHINGF34.x = 64;KLCDNFKVG038Q4OHINGF34.y = 1;KLCDNFKVG038Q4OHINGF34.z = 1; KSLJNV830Q49EZSDKLNP2Q3.x = (VMN0348HGRO2 + KLCDNFKVG038Q4OHINGF34.x) / KLCDNFKVG038Q4OHINGF34.x;KSLJNV830Q49EZSDKLNP2Q3.y = 1;KSLJNV830Q49EZSDKLNP2Q3.z = 1; dim3 KLCDNFKVG038Q4OHINGF342(64); dim3 KSLJNV830Q49EZSDKLNP2Q32((QAIWLRUW4W + KLCDNFKVG038Q4OHINGF342.x) / KLCDNFKVG038Q4OHINGF342.x); dim3 KLCDNFKVG038Q4OHINGF343(64); dim3 KSLJNV830Q49EZSDKLNP2Q33((CMNOI4BH3958BTUJ23 + KLCDNFKVG038Q4OHINGF343.x) / KLCDNFKVG038Q4OHINGF343.x); for(int jj = 0; jj != QAIWLRUW4W; ++jj) { CVNOU3H4409IJIFEASD12<<<KSLJNV830Q49EZSDKLNP2Q3,KLCDNFKVG038Q4OHINGF34,0,streams[0]>>>( thrust::raw_pointer_cast(&MCVBKLNFGEO7UDNK92034[0]) + jj * (VMN0348HGRO2 + 1) * (CMNOI4BH3958BTUJ23 + 1), thrust::raw_pointer_cast(&MCVBKLNFGEO7UDNK92034_summ1[0]) + jj * (VMN0348HGRO2 + 1) * (CMNOI4BH3958BTUJ23 + 1), CMNOI4BH3958BTUJ23 + 1, VMN0348HGRO2 + 1); DSKLVN083Q4HIRKSKDLHNF89WHEF<<<KSLJNV830Q49EZSDKLNP2Q32,KLCDNFKVG038Q4OHINGF342,0,streams[0]>>>( thrust::raw_pointer_cast(&MCVBKLNFGEO7UDNK92034_summ1[0]) + jj * (VMN0348HGRO2 + 1) * (CMNOI4BH3958BTUJ23 + 1), thrust::raw_pointer_cast(&MCVBKLNFGEO7UDNK92034_summ[0]) + jj * (VMN0348HGRO2 + 1) * (CMNOI4BH3958BTUJ23 + 1), CMNOI4BH3958BTUJ23 + 1, VMN0348HGRO2 + 1); } MCVBKLNFGEO7UDNK92034.clear(); MCVBKLNFGEO7UDNK92034_summ1.clear(); cudaArray* d_VNNR03ASDQ234RA___umeArray1 = nullptr; cudaTextureObject_t texObj1; XCNVXCVA4ERHN3840EIOGNKFNASDFLJ<int2>(texObj1, d_VNNR03ASDQ234RA___umeArray1,VMN0348HGRO2 + 1, CMNOI4BH3958BTUJ23+1,QAIWLRUW4W, thrust::raw_pointer_cast(&MCVBKLNFGEO7UDNK92034_summ[0]), cudaMemcpyDeviceToDevice, cudaAddressModeClamp,cudaFilterModePoint, cudaReadModeElementType,false); MCVBKLNFGEO7UDNK92034_summ.clear(); thrust::device_vector<double> in_ZYX_summ1((VMN0348HGRO2 + 1) * (QAIWLRUW4W + 1) * CMNOI4BH3958BTUJ23, 0); thrust::device_vector<int2> in_ZYX_summ((VMN0348HGRO2 + 1) * (QAIWLRUW4W + 1) * CMNOI4BH3958BTUJ23); for(int ii = 0; ii != CMNOI4BH3958BTUJ23; ++ii) { CVNOU3H4409IJIFEASD12<<<KSLJNV830Q49EZSDKLNP2Q3,KLCDNFKVG038Q4OHINGF34,0,streams[1]>>>( thrust::raw_pointer_cast(&in_ZYX[0]) + ii * (VMN0348HGRO2 + 1) * (QAIWLRUW4W + 1), thrust::raw_pointer_cast(&in_ZYX_summ1[0]) + ii * (VMN0348HGRO2 + 1) * (QAIWLRUW4W + 1), QAIWLRUW4W + 1, VMN0348HGRO2 + 1); DSKLVN083Q4HIRKSKDLHNF89WHEF<<<KSLJNV830Q49EZSDKLNP2Q33,KLCDNFKVG038Q4OHINGF343,0,streams[1]>>>( thrust::raw_pointer_cast(&in_ZYX_summ1[0]) + ii * (VMN0348HGRO2 + 1) * (QAIWLRUW4W + 1), thrust::raw_pointer_cast(&in_ZYX_summ[0]) + ii * (VMN0348HGRO2 + 1) * (QAIWLRUW4W + 1), QAIWLRUW4W + 1, VMN0348HGRO2 + 1); } in_ZYX.clear(); in_ZYX_summ1.clear(); cudaArray* d_VNNR03ASDQ234RA___umeArray2 = nullptr; cudaTextureObject_t texObj2; XCNVXCVA4ERHN3840EIOGNKFNASDFLJ<int2>(texObj2, d_VNNR03ASDQ234RA___umeArray2,VMN0348HGRO2 + 1, QAIWLRUW4W+1,CMNOI4BH3958BTUJ23, thrust::raw_pointer_cast(&in_ZYX_summ[0]), cudaMemcpyDeviceToDevice, cudaAddressModeClamp,cudaFilterModePoint, cudaReadModeElementType,false); in_ZYX_summ.clear(); thrust::device_vector<double> _____XCV(CVNKLJFVN2390XZNKLS0RE34QRGFEDFS * ASD23MZNCDS23890I * ZXMCKLNAESD2038EIWONVF, 0); thrust::device_vector<double> angs(hangs, hangs + ZXMCKLNAESD2038EIWONVF); thrust::device_vector<double> zPos(hzPos, hzPos + ZXMCKLNAESD2038EIWONVF); thrust::device_vector<double> d_xds(xds, xds + CVNKLJFVN2390XZNKLS0RE34QRGFEDFS); thrust::device_vector<double> d_yds(yds, yds + CVNKLJFVN2390XZNKLS0RE34QRGFEDFS); thrust::device_vector<double> d_zds(zds, zds + ASD23MZNCDS23890I); thrust::device_vector<double> d_bxds(bxds, bxds + CVNKLJFVN2390XZNKLS0RE34QRGFEDFS + 1); thrust::device_vector<double> d_byds(byds, byds + CVNKLJFVN2390XZNKLS0RE34QRGFEDFS + 1); thrust::device_vector<double> d_bzds(bzds, bzds + ASD23MZNCDS23890I + 1); thrust::device_vector<double3> cossinZT(ZXMCKLNAESD2038EIWONVF); thrust::transform( thrust::make_zip_iterator(thrust::make_tuple(angs.begin(),zPos.begin())), thrust::make_zip_iterator(thrust::make_tuple(angs.end(),zPos.end())), cossinZT.begin(),CTMBIR::ConstantForBackProjection<double>(x0,y0,z0)); dim3 KLCDNFKVG038Q4OHINGF34c(64,16,1); dim3 KSLJNV830Q49EZSDKLNP2Q3c( (ASD23MZNCDS23890I + KLCDNFKVG038Q4OHINGF34c.x) / KLCDNFKVG038Q4OHINGF34c.x, (CVNKLJFVN2390XZNKLS0RE34QRGFEDFS + KLCDNFKVG038Q4OHINGF34c.y) / KLCDNFKVG038Q4OHINGF34c.y, (ZXMCKLNAESD2038EIWONVF + KLCDNFKVG038Q4OHINGF34c.z - 1) / KLCDNFKVG038Q4OHINGF34c.z); KLCDNFKVG038Q4OHINGF34.x = VAMDSN3409UOJIFK; KLCDNFKVG038Q4OHINGF34.y = KLZSDHNQ04JAIO; KLCDNFKVG038Q4OHINGF34.z = SKLJDFNGHW9835GJK35TG; KSLJNV830Q49EZSDKLNP2Q3.x = (ASD23MZNCDS23890I + KLCDNFKVG038Q4OHINGF34.x - 1) / KLCDNFKVG038Q4OHINGF34.x; KSLJNV830Q49EZSDKLNP2Q3.y = (CVNKLJFVN2390XZNKLS0RE34QRGFEDFS + KLCDNFKVG038Q4OHINGF34.y - 1) / KLCDNFKVG038Q4OHINGF34.y; KSLJNV830Q49EZSDKLNP2Q3.z = (ZXMCKLNAESD2038EIWONVF + KLCDNFKVG038Q4OHINGF34.z - 1) / KLCDNFKVG038Q4OHINGF34.z; ________A<<<KSLJNV830Q49EZSDKLNP2Q3,KLCDNFKVG038Q4OHINGF34>>>(texObj1,texObj2, thrust::raw_pointer_cast(&_____XCV[0]),make_double3(x0,y0,z0), thrust::raw_pointer_cast(&cossinZT[0]), thrust::raw_pointer_cast(&d_xds[0]), thrust::raw_pointer_cast(&d_yds[0]), thrust::raw_pointer_cast(&d_zds[0]), thrust::raw_pointer_cast(&d_bxds[0]), thrust::raw_pointer_cast(&d_byds[0]), thrust::raw_pointer_cast(&d_bzds[0]), make_double3(objCntIdxX, objCntIdxY,objCntIdxZ), dx, dz, CMNOI4BH3958BTUJ23, QAIWLRUW4W, CVNKLJFVN2390XZNKLS0RE34QRGFEDFS, ASD23MZNCDS23890I, ZXMCKLNAESD2038EIWONVF); thrust::copy(_____XCV.begin(),_____XCV.end(),h_____XCV); CUDA_SAFE_CALL(cudaDestroyTextureObject(texObj1)); CUDA_SAFE_CALL(cudaDestroyTextureObject(texObj2)); VNSDKJFN0834HSDFHKJSDHF(texObj1, d_VNNR03ASDQ234RA___umeArray1); VNSDKJFN0834HSDFHKJSDHF(texObj2, d_VNNR03ASDQ234RA___umeArray2); _____XCV.clear(); angs.clear(); zPos.clear(); d_xds.clear(); d_yds.clear(); d_zds.clear(); d_bxds.clear(); d_byds.clear(); d_bzds.clear(); delete[] bxds; delete[] byds; delete[] bzds; } __global__ void ZCNKLXDVN3084ORHINJK9Q304UTIGNFK( cudaTextureObject_t VNNR03ASDQ234RA___Tex, float* proj, float3 s, float* d_xds, float* d_yds, float* d_zds, float3* cossinT, float3 objCntIdx, float dx, float dz, int CMNOI4BH3958BTUJ23, int QAIWLRUW4W, int CVNKLJFVN2390XZNKLS0RE34QRGFEDFS, int ASD23MZNCDS23890I, int ZXMCKLNAESD2038EIWONVF) { int detIdV = threadIdx.x + blockIdx.x * blockDim.x; int detIdU = threadIdx.y + blockIdx.y * blockDim.y; int angIdx = threadIdx.z + blockIdx.z * blockDim.z; if(detIdV < ASD23MZNCDS23890I && detIdU < CVNKLJFVN2390XZNKLS0RE34QRGFEDFS && angIdx < ZXMCKLNAESD2038EIWONVF) { float3 cossin = cossinT[angIdx]; float3 cursour = make_float3( s.x * cossin.x - s.y * cossin.y, s.x * cossin.y + s.y * cossin.x, s.z + cossin.z); float summ = d_xds[detIdU]; float obj = d_yds[detIdU]; float idx = d_zds[detIdV]; float3 curDet = make_float3( summ * cossin.x - obj * cossin.y, summ * cossin.y + obj * cossin.x, idx + cossin.z); float3 dir = normalize(curDet - cursour); summ = 0; obj = 0; float idxZ; if(fabsf(cossin.x) <= fabsf(cossin.y)) { summ = 0; for(int ii = 0; ii < CMNOI4BH3958BTUJ23; ++ii) { obj = (ii - objCntIdx.x) * dx; idx = (obj - curDet.x) / dir.x * dir.y + curDet.y; idxZ = (obj - curDet.x) / dir.x * dir.z + curDet.z; idx = idx / dx + objCntIdx.y + 0.5; idxZ = idxZ / dz + objCntIdx.z + 0.5; summ += tex3D<float>(VNNR03ASDQ234RA___Tex, idxZ, ii + 0.5f, idx); } __syncthreads(); proj[(angIdx * CVNKLJFVN2390XZNKLS0RE34QRGFEDFS + detIdU) * ASD23MZNCDS23890I + detIdV] = summ * dx / fabsf(dir.x); } else { summ = 0; for(int jj = 0; jj < QAIWLRUW4W; ++jj) { obj = (jj - objCntIdx.y) * dx; idx = (obj - curDet.y) / dir.y * dir.x + curDet.x; idxZ = (obj - curDet.y) / dir.y * dir.z + curDet.z; idx = idx / dx + objCntIdx.x + 0.5; idxZ = idxZ / dz + objCntIdx.z + 0.5; summ += tex3D<float>(VNNR03ASDQ234RA___Tex, idxZ, idx, jj + 0.5f); } __syncthreads(); proj[(angIdx * CVNKLJFVN2390XZNKLS0RE34QRGFEDFS + detIdU) * ASD23MZNCDS23890I + detIdV] = summ * dx / fabsf(dir.y); } } } void ______XCKMVN840WNJK9Q02HJIFKS( float x0, float y0, float z0, int CVNKLJFVN2390XZNKLS0RE34QRGFEDFS, int ASD23MZNCDS23890I, float* xds, float* yds, float* zds, float imgXCenter, float imgYCenter, float imgZCenter, float* hangs, float* hzPos, int ZXMCKLNAESD2038EIWONVF, int CMNOI4BH3958BTUJ23, int QAIWLRUW4W, int VMN0348HGRO2, float* hVNNR03ASDQ234RA___, float* h_____XCV, float dx, float dz, byte* mask, int gpunum) { for(int ii = 0; ii != CMNOI4BH3958BTUJ23 * QAIWLRUW4W; ++ii) { byte v = mask[ii]; for(int jj = 0; jj != VMN0348HGRO2; ++jj) { hVNNR03ASDQ234RA___[ii * VMN0348HGRO2 + jj] = hVNNR03ASDQ234RA___[ii * VMN0348HGRO2 + jj] * v; } } float* bxds = new float[CVNKLJFVN2390XZNKLS0RE34QRGFEDFS + 1]; float* byds = new float[CVNKLJFVN2390XZNKLS0RE34QRGFEDFS + 1]; float* bzds = new float[ASD23MZNCDS23890I + 1]; DD3Boundaries(CVNKLJFVN2390XZNKLS0RE34QRGFEDFS + 1, xds, bxds); DD3Boundaries(CVNKLJFVN2390XZNKLS0RE34QRGFEDFS + 1, yds, byds); DD3Boundaries(ASD23MZNCDS23890I + 1, zds, bzds); CUDA_SAFE_CALL(cudaSetDevice(gpunum)); CUDA_SAFE_CALL(cudaDeviceReset()); const int TOTVN = CMNOI4BH3958BTUJ23 * QAIWLRUW4W * VMN0348HGRO2; float objCntIdxX = (CMNOI4BH3958BTUJ23 - 1.0) * 0.5 - imgXCenter / dx; float objCntIdxY = (QAIWLRUW4W - 1.0) * 0.5 - imgYCenter / dx; float objCntIdxZ = (VMN0348HGRO2 - 1.0) * 0.5 - imgZCenter / dz; d_vec_t VNNR03ASDQ234RA___(hVNNR03ASDQ234RA___, hVNNR03ASDQ234RA___ + TOTVN); cudaTextureObject_t texObj; cudaArray* d_VNNR03ASDQ234RA___umeArray = nullptr; XCNVXCVA4ERHN3840EIOGNKFNASDFLJ<float>(texObj,d_VNNR03ASDQ234RA___umeArray, VMN0348HGRO2,CMNOI4BH3958BTUJ23,QAIWLRUW4W, thrust::raw_pointer_cast(&VNNR03ASDQ234RA___[0]), cudaMemcpyDeviceToDevice, cudaAddressModeBorder, cudaFilterModeLinear, cudaReadModeElementType,false); d_vec_t _____XCV(CVNKLJFVN2390XZNKLS0RE34QRGFEDFS * ASD23MZNCDS23890I * ZXMCKLNAESD2038EIWONVF, 0); d_vec_t angs(hangs,hangs + ZXMCKLNAESD2038EIWONVF); d_vec_t zPos(hzPos, hzPos + ZXMCKLNAESD2038EIWONVF); d_vec_t d_xds(xds, xds + CVNKLJFVN2390XZNKLS0RE34QRGFEDFS); d_vec_t d_yds(yds, yds + CVNKLJFVN2390XZNKLS0RE34QRGFEDFS); d_vec_t d_zds(zds, zds + ASD23MZNCDS23890I); thrust::device_vector<float3> cossinZT(ZXMCKLNAESD2038EIWONVF); thrust::transform( thrust::make_zip_iterator(thrust::make_tuple(angs.begin(), zPos.begin())), thrust::make_zip_iterator(thrust::make_tuple(angs.end(), zPos.end())), cossinZT.begin(),CTMBIR::ConstantForBackProjection<float>(x0,y0,z0)); dim3 KLCDNFKVG038Q4OHINGF34(64,16,1); dim3 KSLJNV830Q49EZSDKLNP2Q3((ASD23MZNCDS23890I + KLCDNFKVG038Q4OHINGF34.x - 1) / KLCDNFKVG038Q4OHINGF34.x, (CVNKLJFVN2390XZNKLS0RE34QRGFEDFS + KLCDNFKVG038Q4OHINGF34.y - 1) / KLCDNFKVG038Q4OHINGF34.y, (ZXMCKLNAESD2038EIWONVF + KLCDNFKVG038Q4OHINGF34.z - 1) / KLCDNFKVG038Q4OHINGF34.z); ZCNKLXDVN3084ORHINJK9Q304UTIGNFK<<<KSLJNV830Q49EZSDKLNP2Q3,KLCDNFKVG038Q4OHINGF34>>>(texObj,thrust::raw_pointer_cast(&_____XCV[0]), make_float3(x0,y0,z0), thrust::raw_pointer_cast(&d_xds[0]), thrust::raw_pointer_cast(&d_yds[0]), thrust::raw_pointer_cast(&d_zds[0]), thrust::raw_pointer_cast(&cossinZT[0]), make_float3(objCntIdxX,objCntIdxY,objCntIdxZ), dx, dz, CMNOI4BH3958BTUJ23, QAIWLRUW4W, CVNKLJFVN2390XZNKLS0RE34QRGFEDFS, ASD23MZNCDS23890I, ZXMCKLNAESD2038EIWONVF); thrust::copy(_____XCV.begin(),_____XCV.end(),h_____XCV); VNSDKJFN0834HSDFHKJSDHF(texObj,d_VNNR03ASDQ234RA___umeArray); _____XCV.clear(); angs.clear(); zPos.clear(); d_xds.clear(); d_yds.clear(); d_zds.clear(); cossinZT.clear(); delete[] bxds; delete[] byds; delete[] bzds; } __global__ void ______OPRGDMFKLBQ93ZXC893457( cudaTextureObject_t VNNR03ASDQ234RA___Tex1, cudaTextureObject_t VNNR03ASDQ234RA___Tex2, float* proj, float3 s, const float3* __restrict cossinZT, const float* __restrict xds, const float* __restrict yds, const float* __restrict zds, const float* __restrict bxds, const float* __restrict byds, const float* __restrict bzds, float3 objCntIdx, float dx, float dz, int CMNOI4BH3958BTUJ23, int QAIWLRUW4W, int CVNKLJFVN2390XZNKLS0RE34QRGFEDFS, int ASD23MZNCDS23890I, int ZXMCKLNAESD2038EIWONVF) { int detIdV = threadIdx.x + blockIdx.x * blockDim.x; int detIdU = threadIdx.y + blockIdx.y * blockDim.y; int angIdx = threadIdx.z + blockIdx.z * blockDim.z; __shared__ float _xds[KLZSDHNQ04JAIO]; __shared__ float _yds[KLZSDHNQ04JAIO]; _xds[threadIdx.y] = xds[detIdU]; _yds[threadIdx.y] = yds[detIdU]; __syncthreads(); if(detIdU < CVNKLJFVN2390XZNKLS0RE34QRGFEDFS && detIdV < ASD23MZNCDS23890I && angIdx < ZXMCKLNAESD2038EIWONVF) { float3 dir = cossinZT[angIdx]; float3 cursour = make_float3( s.x * dir.x - s.y * dir.y, s.x * dir.y + s.y * dir.x, s.z + dir.z); s = cossinZT[angIdx]; float summ = _xds[threadIdx.y] * s.x - _yds[threadIdx.y] * s.y; float obj = _xds[threadIdx.y] * s.y + _yds[threadIdx.y] * s.x; float realL = bxds[detIdU]; float realR = byds[detIdU]; float realU = bxds[detIdU + 1]; float realD = byds[detIdU + 1]; float2 curDetL = make_float2( realL * s.x - realR * s.y, realL * s.y + realR * s.x); float2 curDetR = make_float2( realU * s.x - realD * s.y, realU * s.y + realD * s.x); float4 curDet = make_float4(summ,obj,bzds[detIdV] + s.z,bzds[detIdV+1] + s.z); dir = normalize(make_float3( summ, obj, zds[detIdV] + s.z) - cursour); summ = 0; obj = 0; float intersectLength, intersectQWEIOQUWOISDKLJ23; float invdz = 1.0 / dz; float invdx = 1.0 / dx; float factL(1.0f); float factR(1.0f); float factU(1.0f); float factD(1.0f); float constVal = 0; if(fabsf(s.x) <= fabsf(s.y)) { summ = 0; factL = (curDetL.y - cursour.y) / (curDetL.x - cursour.x); factR = (curDetR.y - cursour.y) / (curDetR.x - cursour.x); factU = (curDet.w - cursour.z) / (curDet.x - cursour.x); factD = (curDet.z - cursour.z) / (curDet.x - cursour.x); constVal = dx * dx * dz / (fabsf(dir.x)); #pragma unroll for(int ii = 0; ii < CMNOI4BH3958BTUJ23; ii++) { obj = (ii - objCntIdx.x) * dx; realL = (obj - curDetL.x) * factL + curDetL.y; realR = (obj - curDetR.x) * factR + curDetR.y; realU = (obj - curDet.x) * factU + curDet.w; realD = (obj - curDet.x) * factD + curDet.z; intersectLength = realR - realL; intersectQWEIOQUWOISDKLJ23 = realU - realD; realD = realD * invdz + objCntIdx.z + 1; realR = realR * invdx + objCntIdx.y + 1; realU = realU * invdz + objCntIdx.z + 1; realL = realL * invdx + objCntIdx.y + 1; summ += (tex3D<float>(VNNR03ASDQ234RA___Tex2, realD, realL, ii + 0.5f) + tex3D<float>(VNNR03ASDQ234RA___Tex2, realU, realR, ii + 0.5f) - (tex3D<float>(VNNR03ASDQ234RA___Tex2, realD, realR, ii + 0.5f) + tex3D<float>(VNNR03ASDQ234RA___Tex2, realU, realL, ii + 0.5f)) ) / (intersectLength * intersectQWEIOQUWOISDKLJ23); } __syncthreads(); proj[(angIdx * CVNKLJFVN2390XZNKLS0RE34QRGFEDFS + detIdU) * ASD23MZNCDS23890I + detIdV] = summ * constVal; } else { summ = 0; factL = (curDetL.x - cursour.x) / (curDetL.y - cursour.y); factR = (curDetR.x - cursour.x) / (curDetR.y - cursour.y); factU = (curDet.w - cursour.z) / (curDet.y - cursour.y); factD = (curDet.z - cursour.z) / (curDet.y - cursour.y); constVal = dx * dx * dz / (fabsf(dir.y)); #pragma unroll for(int jj = 0; jj < QAIWLRUW4W; jj++) { obj = (jj - objCntIdx.y) * dx; realL = (obj - curDetL.y) * factL + curDetL.x; realR = (obj - curDetR.y) * factR + curDetR.x; realU = (obj - curDet.y) * factU + curDet.w; realD = (obj - curDet.y) * factD + curDet.z; intersectLength = realR - realL; intersectQWEIOQUWOISDKLJ23 = realU - realD; realD = realD * invdz + objCntIdx.z + 1; realR = realR * invdx + objCntIdx.x + 1; realU = realU * invdz + objCntIdx.z + 1; realL = realL * invdx + objCntIdx.x + 1; summ += (tex3D<float>(VNNR03ASDQ234RA___Tex1, realD,realL, jj + 0.5f) + tex3D<float>(VNNR03ASDQ234RA___Tex1, realU, realR, jj + 0.5f) - (tex3D<float>(VNNR03ASDQ234RA___Tex1, realD, realR, jj + 0.5f)+ tex3D<float>(VNNR03ASDQ234RA___Tex1, realU, realL, jj + 0.5f)) )/ (intersectLength * intersectQWEIOQUWOISDKLJ23); } __syncthreads(); proj[(angIdx * CVNKLJFVN2390XZNKLS0RE34QRGFEDFS + detIdU) * ASD23MZNCDS23890I + detIdV] = summ * constVal; } } } void DD3_gpu_proj_branchless_sat2d( float x0, float y0, float z0, int CVNKLJFVN2390XZNKLS0RE34QRGFEDFS, int ASD23MZNCDS23890I, float* xds, float* yds, float* zds, float imgXCenter, float imgYCenter, float imgZCenter, float* hangs, float* hzPos, int ZXMCKLNAESD2038EIWONVF, int CMNOI4BH3958BTUJ23, int QAIWLRUW4W, int VMN0348HGRO2, float* VNNR03ASDQ234RA___, float* h_____XCV, float dx, float dz, byte* mask, int gpunum) { for(int i = 0; i != CMNOI4BH3958BTUJ23 * QAIWLRUW4W; ++i) { byte v = mask[i]; for(int z = 0; z != VMN0348HGRO2; ++z) { VNNR03ASDQ234RA___[i * VMN0348HGRO2 + z] = VNNR03ASDQ234RA___[i * VMN0348HGRO2 + z] * v; } } CUDA_SAFE_CALL(cudaSetDevice(gpunum)); CUDA_SAFE_CALL(cudaDeviceReset()); float* bxds = new float[CVNKLJFVN2390XZNKLS0RE34QRGFEDFS + 1]; float* byds = new float[CVNKLJFVN2390XZNKLS0RE34QRGFEDFS + 1]; float* bzds = new float[ASD23MZNCDS23890I + 1]; DD3Boundaries(CVNKLJFVN2390XZNKLS0RE34QRGFEDFS + 1, xds, bxds); DD3Boundaries(CVNKLJFVN2390XZNKLS0RE34QRGFEDFS + 1, yds, byds); DD3Boundaries(ASD23MZNCDS23890I + 1, zds, bzds); cudaStream_t streams[4]; CUDA_SAFE_CALL(cudaStreamCreate(&streams[0])); CUDA_SAFE_CALL(cudaStreamCreate(&streams[1])); CUDA_SAFE_CALL(cudaStreamCreate(&streams[2])); CUDA_SAFE_CALL(cudaStreamCreate(&streams[3])); float objCntIdxX = (CMNOI4BH3958BTUJ23 - 1.0) * 0.5 - imgXCenter / dx; float objCntIdxY = (QAIWLRUW4W - 1.0) * 0.5 - imgYCenter / dx; float objCntIdxZ = (VMN0348HGRO2 - 1.0) * 0.5 - imgZCenter / dz; thrust::device_vector<float> SATZXY; thrust::device_vector<float> SATZYX; ______XCNV9340KL9340KFDL(VNNR03ASDQ234RA___,SATZXY,SATZYX, CMNOI4BH3958BTUJ23, QAIWLRUW4W, VMN0348HGRO2); cudaTextureObject_t texObj1; cudaArray* d_VNNR03ASDQ234RA___umeArray1 = nullptr; cudaTextureObject_t texObj2; cudaArray* d_VNNR03ASDQ234RA___umeArray2 = nullptr; XCNVXCVA4ERHN3840EIOGNKFNASDFLJ<float>(texObj1,d_VNNR03ASDQ234RA___umeArray1, VMN0348HGRO2 + 1,CMNOI4BH3958BTUJ23 + 1,QAIWLRUW4W, thrust::raw_pointer_cast(&SATZXY[0]), cudaMemcpyDeviceToDevice, cudaAddressModeClamp, cudaFilterModeLinear, cudaReadModeElementType,false); SATZXY.clear(); XCNVXCVA4ERHN3840EIOGNKFNASDFLJ<float>(texObj2,d_VNNR03ASDQ234RA___umeArray2, VMN0348HGRO2 + 1,QAIWLRUW4W + 1,CMNOI4BH3958BTUJ23, thrust::raw_pointer_cast(&SATZYX[0]), cudaMemcpyDeviceToDevice, cudaAddressModeClamp, cudaFilterModeLinear, cudaReadModeElementType,false); SATZYX.clear(); thrust::device_vector<float> _____XCV(CVNKLJFVN2390XZNKLS0RE34QRGFEDFS * ASD23MZNCDS23890I * ZXMCKLNAESD2038EIWONVF, 0); thrust::device_vector<float> d_xds(xds, xds + CVNKLJFVN2390XZNKLS0RE34QRGFEDFS); thrust::device_vector<float> d_yds(yds, yds + CVNKLJFVN2390XZNKLS0RE34QRGFEDFS); thrust::device_vector<float> d_zds(zds, zds + ASD23MZNCDS23890I); thrust::device_vector<float> d_bxds(bxds, bxds + CVNKLJFVN2390XZNKLS0RE34QRGFEDFS + 1); thrust::device_vector<float> d_byds(byds, byds + CVNKLJFVN2390XZNKLS0RE34QRGFEDFS + 1); thrust::device_vector<float> d_bzds(bzds, bzds + ASD23MZNCDS23890I + 1); thrust::device_vector<float> angs(hangs, hangs + ZXMCKLNAESD2038EIWONVF); thrust::device_vector<float> zPos(hzPos, hzPos + ZXMCKLNAESD2038EIWONVF); thrust::device_vector<float3> cossinZT(ZXMCKLNAESD2038EIWONVF); thrust::transform( thrust::make_zip_iterator(thrust::make_tuple(angs.begin(), zPos.begin())), thrust::make_zip_iterator(thrust::make_tuple(angs.end(), zPos.end())), cossinZT.begin(),CTMBIR::ConstantForBackProjection<float>(x0,y0,z0)); dim3 KLCDNFKVG038Q4OHINGF34; dim3 KSLJNV830Q49EZSDKLNP2Q3; KLCDNFKVG038Q4OHINGF34.x = VAMDSN3409UOJIFK; KLCDNFKVG038Q4OHINGF34.y = KLZSDHNQ04JAIO; KLCDNFKVG038Q4OHINGF34.z = SKLJDFNGHW9835GJK35TG; KSLJNV830Q49EZSDKLNP2Q3.x = (ASD23MZNCDS23890I + KLCDNFKVG038Q4OHINGF34.x - 1) / KLCDNFKVG038Q4OHINGF34.x; KSLJNV830Q49EZSDKLNP2Q3.y = (CVNKLJFVN2390XZNKLS0RE34QRGFEDFS + KLCDNFKVG038Q4OHINGF34.y - 1) / KLCDNFKVG038Q4OHINGF34.y; KSLJNV830Q49EZSDKLNP2Q3.z = (ZXMCKLNAESD2038EIWONVF + KLCDNFKVG038Q4OHINGF34.z - 1) / KLCDNFKVG038Q4OHINGF34.z; ______OPRGDMFKLBQ93ZXC893457<<<KSLJNV830Q49EZSDKLNP2Q3,KLCDNFKVG038Q4OHINGF34>>>(texObj1,texObj2, thrust::raw_pointer_cast(&_____XCV[0]), make_float3(x0,y0,z0), thrust::raw_pointer_cast(&cossinZT[0]), thrust::raw_pointer_cast(&d_xds[0]), thrust::raw_pointer_cast(&d_yds[0]), thrust::raw_pointer_cast(&d_zds[0]), thrust::raw_pointer_cast(&d_bxds[0]), thrust::raw_pointer_cast(&d_byds[0]), thrust::raw_pointer_cast(&d_bzds[0]), make_float3(objCntIdxX, objCntIdxY,objCntIdxZ), dx, dz, CMNOI4BH3958BTUJ23, QAIWLRUW4W, CVNKLJFVN2390XZNKLS0RE34QRGFEDFS, ASD23MZNCDS23890I, ZXMCKLNAESD2038EIWONVF); thrust::copy(_____XCV.begin(),_____XCV.end(),h_____XCV); CUDA_SAFE_CALL(cudaDestroyTextureObject(texObj1)); CUDA_SAFE_CALL(cudaDestroyTextureObject(texObj2)); VNSDKJFN0834HSDFHKJSDHF(texObj1,d_VNNR03ASDQ234RA___umeArray1); VNSDKJFN0834HSDFHKJSDHF(texObj2,d_VNNR03ASDQ234RA___umeArray2); angs.clear(); zPos.clear(); _____XCV.clear(); d_xds.clear(); d_yds.clear(); d_zds.clear(); d_bxds.clear(); d_byds.clear(); d_bzds.clear(); cossinZT.clear(); delete[] bxds; delete[] byds; delete[] bzds; } template<typename T> void DD3Boundaries(int nrBoundaries, thrust::host_vector<T>& Centers, thrust::host_vector<T>& Boundaries) { int i; T* pBoundaries = &Boundaries[0]; T* pCenters = &Centers[0]; if (nrBoundaries >= 3) { *pBoundaries++ = 1.5 * *pCenters - 0.5 * *(pCenters + 1); for (i = 1; i <= (nrBoundaries - 2); i++) { *pBoundaries++ = 0.5 * *pCenters + 0.5 * *(pCenters + 1); pCenters++; } *pBoundaries = 1.5 * *pCenters - 0.5 * *(pCenters - 1); } else { *pBoundaries = *pCenters - 0.5; *(pBoundaries + 1) = *pCenters + 0.5; } } __global__ void DD3Proj_branches_ker(float* proj, const float* VNNR03ASDQ234RA___, float x0, float y0, float z0, const float* xds, const float* yds, const float* bxds, const float* byds, const float* bzds, float3* cossinZT, float objCntIdxX, float objCntIdxY, float objCntIdxZ, const int CMNOI4BH3958BTUJ23, const int QAIWLRUW4W, const int VMN0348HGRO2, const int CVNKLJFVN2390XZNKLS0RE34QRGFEDFS, const int ASD23MZNCDS23890I, const int ZXMCKLNAESD2038EIWONVF, float dx, float dz) { const int detIdV = threadIdx.x + blockIdx.x * blockDim.x; const int detIdU = threadIdx.y + blockIdx.y * blockDim.y; const int angIdx = threadIdx.z + blockIdx.z * blockDim.z; if (detIdU < CVNKLJFVN2390XZNKLS0RE34QRGFEDFS && detIdV < ASD23MZNCDS23890I && angIdx < ZXMCKLNAESD2038EIWONVF) { float cosT = cossinZT[angIdx].x; float sinT = cossinZT[angIdx].y; float zPos = cossinZT[angIdx].z; float3 cursour = make_float3(x0 * cosT - y0 * sinT, x0 * sinT + y0 * cosT, z0 + zPos); float summ = 0; float detPosLX = bxds[detIdU]; float detPosRX = bxds[detIdU + 1]; float detPosLY = byds[detIdU]; float detPosRY = byds[detIdU + 1]; float detPosDZ = bzds[detIdV]; float detPosUZ = detPosDZ + dz; float curDirLX = detPosLX * cosT - detPosLY * sinT - cursour.x; float curDirLY = detPosLX * sinT + detPosLY * cosT - cursour.y; float curDirRX = detPosRX * cosT - detPosRY * sinT - cursour.x; float curDirRY = detPosRX * sinT + detPosRY * cosT - cursour.y; float curDirDZ = detPosDZ + zPos - cursour.z; float curDirUZ = detPosUZ + zPos - cursour.z; float dirX = xds[detIdU] * cosT - yds[detIdU] * sinT - cursour.x; float dirY = xds[detIdU] * sinT + yds[detIdU] * cosT - cursour.y; float dirZ = detPosDZ - 0.5 * dz - z0; if (fabsf(dirX) < fabsf(dirY)) { detPosLX = -cursour.y * curDirLX / curDirLY + cursour.x; detPosRX = -cursour.y * curDirRX / curDirRY + cursour.x; detPosDZ = -cursour.y * curDirDZ / dirY + cursour.z; detPosUZ = -cursour.y * curDirUZ / dirY + cursour.z; dirZ = dx / fabsf(dirY / sqrtf(dirY * dirY + dirX * dirX + dirZ * dirZ) * (detPosLX - detPosRX) * (detPosUZ - detPosDZ)); if (detPosLX > detPosRX) { zPos = detPosLX; detPosLX = detPosRX; detPosRX = zPos; } for (int jj = 0; jj < QAIWLRUW4W; jj++) { y0 = (jj - objCntIdxY) * dx - cursour.y; zPos = y0 / curDirLY; cosT = zPos * curDirLX + cursour.x; sinT = zPos * curDirRX + cursour.x; x0 = zPos * curDirDZ + cursour.z; z0 = zPos * curDirUZ + cursour.z; if (cosT > sinT) { zPos = cosT; cosT = sinT; sinT = zPos; } int minXIdx = floor(cosT / dx + objCntIdxX) - 1; int maxXIdx = ceil(sinT / dx + objCntIdxX) + 1; int minZIdx = floor(x0 / dz + objCntIdxZ) - 1; int maxZIdx = ceil(z0 / dz + objCntIdxZ) + 1; if (maxXIdx < 0) { continue; } if (minXIdx > CMNOI4BH3958BTUJ23) { continue; } if (maxZIdx < 0) { continue; } if (minZIdx > VMN0348HGRO2) { continue; } if (minXIdx < 0) { minXIdx = 0; } if (maxXIdx > CMNOI4BH3958BTUJ23) { maxXIdx = CMNOI4BH3958BTUJ23; } if (minZIdx < 0) { minZIdx = 0; } if (maxZIdx > VMN0348HGRO2) { maxZIdx = VMN0348HGRO2; } cosT = (cursour.x - (minXIdx - objCntIdxX) * dx) * cursour.y / y0 + cursour.x; for (int ii = minXIdx; ii < maxXIdx; ii++) { sinT = cosT - dx * cursour.y / y0; dirX = intersectLength_device<float>(detPosLX, detPosRX, cosT, sinT); x0 = (cursour.z - (minZIdx - objCntIdxZ) * dz) * cursour.y / y0 + cursour.z; for (int kk = minZIdx; kk < maxZIdx; kk++) { z0 = x0 - dz * cursour.y / y0; dirY = intersectLength_device<float>(detPosDZ, detPosUZ, x0, z0); summ += VNNR03ASDQ234RA___[(jj * CMNOI4BH3958BTUJ23 + ii) * VMN0348HGRO2 + kk] * (dirX * dirY); x0 = z0; } cosT = sinT; } } proj[(angIdx * CVNKLJFVN2390XZNKLS0RE34QRGFEDFS + detIdU) * ASD23MZNCDS23890I + detIdV] = summ * dirZ; } else { detPosLY = -cursour.x * curDirLY / curDirLX + cursour.y; detPosRY = -cursour.x * curDirRY / curDirRX + cursour.y; detPosDZ = -cursour.x * curDirDZ / dirX + cursour.z; detPosUZ = -cursour.x * curDirUZ / dirX + cursour.z; dirZ = dx / fabsf(dirX / sqrtf(dirY * dirY + dirX * dirX + dirZ * dirZ) * (detPosLY - detPosRY) * (detPosUZ - detPosDZ)); if (detPosLY > detPosRY) { zPos = detPosLY; detPosLY = detPosRY; detPosRY = zPos; } for (int ii = 0; ii < CMNOI4BH3958BTUJ23; ii++) { x0 = (ii - objCntIdxX) * dx - cursour.x; zPos = x0 / curDirLX; cosT = zPos * curDirLY + cursour.y; sinT = zPos * curDirRY + cursour.y; y0 = zPos * curDirDZ + cursour.z; z0 = zPos * curDirUZ + cursour.z; if (cosT > sinT){ zPos = cosT; cosT = sinT; sinT = zPos; } int minYIdx = floor(cosT / dx + objCntIdxY) - 1; int maxYIdx = ceil(sinT / dx + objCntIdxY) + 1; int minZIdx = floor(y0 / dz + objCntIdxZ) - 1; int maxZIdx = ceil(z0 / dz + objCntIdxZ) + 1; if (maxYIdx < 0) { continue; } if (minYIdx > CMNOI4BH3958BTUJ23) { continue; } if (maxZIdx < 0) { continue; } if (minZIdx > VMN0348HGRO2) { continue; } if (minYIdx < 0) { minYIdx = 0; } if (maxYIdx > CMNOI4BH3958BTUJ23) { maxYIdx = QAIWLRUW4W; } if (minZIdx < 0) { minZIdx = 0; } if (maxZIdx > VMN0348HGRO2) { maxZIdx = VMN0348HGRO2; } cosT = (cursour.y - (minYIdx - objCntIdxY) * dx) * cursour.x / x0 + cursour.y; for (int jj = minYIdx; jj < maxYIdx; jj++) { sinT = cosT - dx * cursour.x / x0; dirX = intersectLength_device<float>(detPosLY, detPosRY, cosT, sinT); y0 = (cursour.z - (minZIdx - objCntIdxZ) * dz) * cursour.x / x0 + cursour.z; for (int kk = minZIdx; kk < maxZIdx; kk++) { z0 = y0 - dz * cursour.x / x0; dirY = intersectLength_device<float>(detPosDZ, detPosUZ, y0, z0); summ += VNNR03ASDQ234RA___[(jj * CMNOI4BH3958BTUJ23 + ii) * VMN0348HGRO2 + kk] * (dirX * dirY); y0 = z0; } cosT = sinT; } } proj[(angIdx * CVNKLJFVN2390XZNKLS0RE34QRGFEDFS + detIdU) * ASD23MZNCDS23890I + detIdV] = summ * dirZ; } } } void DD3Proj_branches(float x0, float y0, float z0, int CVNKLJFVN2390XZNKLS0RE34QRGFEDFS, int ASD23MZNCDS23890I, float* xds, float* yds, float* zds, float imgXCenter, float imgYCenter, float imgZCenter, float* hangs, float* hzPos, int ZXMCKLNAESD2038EIWONVF, int CMNOI4BH3958BTUJ23, int QAIWLRUW4W, int VMN0348HGRO2, float* hVNNR03ASDQ234RA___, float* h_____XCV, float dx, float dz, byte* mask, int gpunum) { cudaSetDevice(gpunum); for (int jj = 0; jj != QAIWLRUW4W; ++jj) { for (int ii = 0; ii != CMNOI4BH3958BTUJ23; ++ii) { byte tempV = mask[jj * CMNOI4BH3958BTUJ23 + ii]; for (int kk = 0; kk != VMN0348HGRO2; ++kk) { hVNNR03ASDQ234RA___[(jj * CMNOI4BH3958BTUJ23 + ii) * VMN0348HGRO2 + kk] *= static_cast<float>(tempV); } } } thrust::host_vector<float3> hcossinZT(ZXMCKLNAESD2038EIWONVF); for (int i = 0; i != ZXMCKLNAESD2038EIWONVF; ++i) { hcossinZT[i].x = cosf(hangs[i]); hcossinZT[i].y = sinf(hangs[i]); hcossinZT[i].z = hzPos[i]; } thrust::device_vector<float3> cossinZT = hcossinZT; thrust::host_vector<float> hxds(xds, xds + CVNKLJFVN2390XZNKLS0RE34QRGFEDFS); thrust::host_vector<float> hyds(yds, yds + CVNKLJFVN2390XZNKLS0RE34QRGFEDFS); thrust::host_vector<float> hzds(zds, zds + ASD23MZNCDS23890I); thrust::host_vector<float> bxds(CVNKLJFVN2390XZNKLS0RE34QRGFEDFS + 1, 0); thrust::host_vector<float> byds(CVNKLJFVN2390XZNKLS0RE34QRGFEDFS + 1, 0); thrust::host_vector<float> bzds(ASD23MZNCDS23890I + 1, 0); DD3Boundaries<float>(CVNKLJFVN2390XZNKLS0RE34QRGFEDFS + 1, hxds, bxds); DD3Boundaries<float>(CVNKLJFVN2390XZNKLS0RE34QRGFEDFS + 1, hyds, byds); DD3Boundaries<float>(ASD23MZNCDS23890I + 1, hzds, bzds); thrust::device_vector<float> dxds = hxds; thrust::device_vector<float> dyds = hyds; thrust::device_vector<float> dzds = hzds; thrust::device_vector<float> dbxds = bxds; thrust::device_vector<float> dbyds = byds; thrust::device_vector<float> dbzds = bzds; thrust::device_vector<float> d_____XCV(h_____XCV, h_____XCV + CVNKLJFVN2390XZNKLS0RE34QRGFEDFS*ASD23MZNCDS23890I*ZXMCKLNAESD2038EIWONVF); thrust::device_vector<float> dVNNR03ASDQ234RA___(hVNNR03ASDQ234RA___, hVNNR03ASDQ234RA___ + CMNOI4BH3958BTUJ23*QAIWLRUW4W*VMN0348HGRO2); dim3 KLCDNFKVG038Q4OHINGF34(64, 4, 1); dim3 KSLJNV830Q49EZSDKLNP2Q3( (ASD23MZNCDS23890I + KLCDNFKVG038Q4OHINGF34.x - 1) / KLCDNFKVG038Q4OHINGF34.x, (CVNKLJFVN2390XZNKLS0RE34QRGFEDFS + KLCDNFKVG038Q4OHINGF34.y - 1) / KLCDNFKVG038Q4OHINGF34.y, (ZXMCKLNAESD2038EIWONVF + KLCDNFKVG038Q4OHINGF34.z - 1) / KLCDNFKVG038Q4OHINGF34.z); float objCntIdxX = (static_cast<float>(CMNOI4BH3958BTUJ23) - 1.0f) * 0.5f - imgXCenter / dx; float objCntIdxY = (static_cast<float>(QAIWLRUW4W) - 1.0f) * 0.5f - imgYCenter / dx; float objCntIdxZ = (static_cast<float>(VMN0348HGRO2) - 1.0f) * 0.5f - imgZCenter / dz; DD3Proj_branches_ker << <KSLJNV830Q49EZSDKLNP2Q3, KLCDNFKVG038Q4OHINGF34 >> >( thrust::raw_pointer_cast(&d_____XCV[0]), thrust::raw_pointer_cast(&dVNNR03ASDQ234RA___[0]), x0, y0, z0, thrust::raw_pointer_cast(&dxds[0]), thrust::raw_pointer_cast(&dyds[0]), thrust::raw_pointer_cast(&dbxds[0]), thrust::raw_pointer_cast(&dbyds[0]), thrust::raw_pointer_cast(&dbzds[0]), thrust::raw_pointer_cast(&cossinZT[0]), objCntIdxX, objCntIdxY, objCntIdxZ, CMNOI4BH3958BTUJ23, QAIWLRUW4W, VMN0348HGRO2, CVNKLJFVN2390XZNKLS0RE34QRGFEDFS, ASD23MZNCDS23890I, ZXMCKLNAESD2038EIWONVF, dx, dz); thrust::copy(d_____XCV.begin(), d_____XCV.end(), h_____XCV); hcossinZT.clear(); cossinZT.clear(); hxds.clear(); hyds.clear(); hzds.clear(); bxds.clear(); byds.clear(); bzds.clear(); dxds.clear(); dyds.clear(); dzds.clear(); dbxds.clear(); dbyds.clear(); dbzds.clear(); d_____XCV.clear(); dVNNR03ASDQ234RA___.clear(); } void DD3Proj_gpu( float x0, float y0, float z0, int CVNKLJFVN2390XZNKLS0RE34QRGFEDFS, int ASD23MZNCDS23890I, float* xds, float* yds, float* zds, float imgXCenter, float imgYCenter, float imgZCenter, float* hangs, float* hzPos, int ZXMCKLNAESD2038EIWONVF, int CMNOI4BH3958BTUJ23, int QAIWLRUW4W, int VMN0348HGRO2, float* hVNNR03ASDQ234RA___, float* h_____XCV, float dx, float dz, byte* mask, int gpunum, int _____XCVMode) { switch(_____XCVMode) { case 0: DD3_gpu_proj_branchless_sat2d(x0, y0, z0,CVNKLJFVN2390XZNKLS0RE34QRGFEDFS, ASD23MZNCDS23890I, xds,yds, zds, imgXCenter,imgYCenter, imgZCenter, hangs, hzPos, ZXMCKLNAESD2038EIWONVF, CMNOI4BH3958BTUJ23, QAIWLRUW4W, VMN0348HGRO2, hVNNR03ASDQ234RA___, h_____XCV, dx, dz, mask, 0); break; case 1: ______XCKMVN840WNJK9Q02HJIFKS(x0, y0, z0,CVNKLJFVN2390XZNKLS0RE34QRGFEDFS, ASD23MZNCDS23890I, xds,yds, zds, imgXCenter,imgYCenter, imgZCenter, hangs, hzPos, ZXMCKLNAESD2038EIWONVF, CMNOI4BH3958BTUJ23, QAIWLRUW4W, VMN0348HGRO2, hVNNR03ASDQ234RA___, h_____XCV, dx, dz, mask, 0); break; case 2: DD3_gpu_proj_doubleprecisionbranchless(x0, y0, z0,CVNKLJFVN2390XZNKLS0RE34QRGFEDFS, ASD23MZNCDS23890I, xds,yds, zds, imgXCenter,imgYCenter, imgZCenter, hangs, hzPos, ZXMCKLNAESD2038EIWONVF, CMNOI4BH3958BTUJ23, QAIWLRUW4W, VMN0348HGRO2, hVNNR03ASDQ234RA___, h_____XCV, dx, dz, mask, 0); break; case 3: break; case 4: DD3Proj_branches(x0, y0, z0, CVNKLJFVN2390XZNKLS0RE34QRGFEDFS, ASD23MZNCDS23890I, xds, yds, zds, imgXCenter, imgYCenter, imgZCenter, hangs, hzPos, ZXMCKLNAESD2038EIWONVF, CMNOI4BH3958BTUJ23, QAIWLRUW4W, VMN0348HGRO2, hVNNR03ASDQ234RA___, h_____XCV, dx, dz, mask, 0); break; default: DD3_gpu_proj_branchless_sat2d(x0, y0, z0,CVNKLJFVN2390XZNKLS0RE34QRGFEDFS, ASD23MZNCDS23890I, xds,yds, zds, imgXCenter,imgYCenter, imgZCenter, hangs, hzPos, ZXMCKLNAESD2038EIWONVF, CMNOI4BH3958BTUJ23, QAIWLRUW4W, VMN0348HGRO2, hVNNR03ASDQ234RA___, h_____XCV, dx, dz, mask, 0); break; } }
6866346a6d181c513c1b3be85f336f35cb51bf6d.hip
// !!! This is a file automatically generated by hipify!!! // Copyright (c) 2020, the YACCLAB contributors, as // shown by the AUTHORS file. All rights reserved. // // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. #include <opencv2/cudafeatures2d.hpp> #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "labeling_algorithms.h" #include "register.h" #define BLOCK_ROWS 16 #define BLOCK_COLS 16 using namespace cv; // Algorithm itself has good performances, but memory allocation is a problem. namespace { // Only use it with unsigned numeric types template <typename T> __device__ __forceinline__ unsigned char HasBit(T bitmap, unsigned char pos) { return (bitmap >> pos) & 1; } __device__ __forceinline__ void SetBit(unsigned char &bitmap, unsigned char pos) { bitmap |= (1 << pos); } // Init phase. // Labels start at value 1. __global__ void Init(const cuda::PtrStepSzb img, cuda::PtrStepSzb block_conn, cuda::PtrStepSzi block_labels) { unsigned row = blockIdx.y * BLOCK_ROWS + threadIdx.y; unsigned col = blockIdx.x * BLOCK_COLS + threadIdx.x; unsigned img_index = 2 * row * img.step + 2 * col; unsigned conn_index = row * (block_conn.step / block_conn.elem_size) + col; unsigned labels_index = row * (block_labels.step / block_labels.elem_size) + col; if (row < block_conn.rows && col < block_conn.cols) { unsigned P0 = 0x777; unsigned P = 0; if (img[img_index]) { P |= P0; } if (2 * col + 1 < img.cols) { if (img[img_index + 1]) { P |= (P0 << 1); } if (2 * row + 1 < img.rows && img[img_index + img.step + 1]) { P |= (P0 << 5); } } if (2 * row + 1 < img.rows) { if (img[img_index + img.step]) { P |= (P0 << 4); } } if (col == 0) { P &= 0xEEEE; } if (2 * col + 1 >= img.cols) { P &= 0x3333; } else if (2 * col + 2 >= img.cols) { P &= 0x7777; } if (row == 0) { P &= 0xFFF0; } if (2 * row + 1 >= img.rows) { P &= 0xFF; } else if (2 * row + 2 >= img.rows) { P &= 0xFFF; } // P is now ready to be used to find neighbour blocks (or it should be) // P value avoids range errors unsigned char conn_bitmask = 0; if (P > 0) { block_labels[labels_index] = labels_index + 1; if (HasBit(P, 0) && img[img_index - img.step - 1]) { SetBit(conn_bitmask, 0); } if ((HasBit(P, 1) && img[img_index - img.step]) || (HasBit(P, 2) && img[img_index + 1 - img.step])) { SetBit(conn_bitmask, 1); } if (HasBit(P, 3) && img[img_index + 2 - img.step]) { SetBit(conn_bitmask, 2); } if ((HasBit(P, 4) && img[img_index - 1]) || (HasBit(P, 8) && img[img_index + img.step - 1])) { SetBit(conn_bitmask, 3); } if ((HasBit(P, 7) && img[img_index + 2]) || (HasBit(P, 11) && img[img_index + img.step + 2])) { SetBit(conn_bitmask, 4); } if (HasBit(P, 12) && img[img_index + 2 * img.step - 1]) { SetBit(conn_bitmask, 5); } if ((HasBit(P, 13) && img[img_index + 2 * img.step]) || (HasBit(P, 14) && img[img_index + 2 * img.step + 1])) { SetBit(conn_bitmask, 6); } if (HasBit(P, 15) && img[img_index + 2 * img.step + 2]) { SetBit(conn_bitmask, 7); } } else { block_labels[labels_index] = 0; } block_conn[conn_index] = conn_bitmask; } } __global__ void ExpandConnections(const cuda::PtrStepSzb connections, cuda::PtrStepSzb expansion) { unsigned row = blockIdx.y * BLOCK_ROWS + threadIdx.y; unsigned col = blockIdx.x * BLOCK_COLS + threadIdx.x; unsigned conn_index = row * (connections.step / connections.elem_size) + col; unsigned exp_index = 3 * row * (expansion.step / expansion.elem_size) + 3 * col; if (row < connections.rows && col < connections.cols) { expansion[exp_index + (expansion.step / expansion.elem_size) + 1] = 2; unsigned char neighbours = connections[conn_index]; if (HasBit(neighbours, 0)) { expansion[exp_index] = 1; } else { expansion[exp_index] = 0; } if (HasBit(neighbours, 1)) { expansion[exp_index + 1] = 1; } else { expansion[exp_index + 1] = 0; } if (HasBit(neighbours, 2)) { expansion[exp_index + 2] = 1; } else { expansion[exp_index + 2] = 0; } if (HasBit(neighbours, 3)) { expansion[exp_index + (expansion.step / expansion.elem_size)] = 1; } else { expansion[exp_index + (expansion.step / expansion.elem_size)] = 0; } if (HasBit(neighbours, 4)) { expansion[exp_index + (expansion.step / expansion.elem_size) + 2] = 1; } else { expansion[exp_index + (expansion.step / expansion.elem_size) + 2] = 0; } if (HasBit(neighbours, 5)) { expansion[exp_index + 2 * (expansion.step / expansion.elem_size)] = 1; } else { expansion[exp_index + 2 * (expansion.step / expansion.elem_size)] = 0; } if (HasBit(neighbours, 6)) { expansion[exp_index + 2 * (expansion.step / expansion.elem_size) + 1] = 1; } else { expansion[exp_index + 2 * (expansion.step / expansion.elem_size) + 1] = 0; } if (HasBit(neighbours, 7)) { expansion[exp_index + 2 * (expansion.step / expansion.elem_size) + 2] = 1; } else { expansion[exp_index + 2 * (expansion.step / expansion.elem_size) + 2] = 0; } } } __device__ unsigned int MinLabel(unsigned l1, unsigned l2) { if (l1 && l2) return min(l1, l2); else return l1; } __device__ unsigned int FindMinLabel(cuda::PtrStepSzi labels, unsigned char neighbours, unsigned label, unsigned labels_index) { unsigned int min = label; if (HasBit(neighbours, 0)) { min = MinLabel(min, labels.data[labels_index - (labels.step / labels.elem_size) - 1]); } if (HasBit(neighbours, 1)) { min = MinLabel(min, labels.data[labels_index - (labels.step / labels.elem_size)]); } if (HasBit(neighbours, 2)) { min = MinLabel(min, labels.data[labels_index - (labels.step / labels.elem_size) + 1]); } if (HasBit(neighbours, 3)) { min = MinLabel(min, labels.data[labels_index - 1]); } if (HasBit(neighbours, 4)) { min = MinLabel(min, labels.data[labels_index + 1]); } if (HasBit(neighbours, 5)) { min = MinLabel(min, labels.data[labels_index + (labels.step / labels.elem_size) - 1]); } if (HasBit(neighbours, 6)) { min = MinLabel(min, labels.data[labels_index + (labels.step / labels.elem_size)]); } if (HasBit(neighbours, 7)) { min = MinLabel(min, labels.data[labels_index + (labels.step / labels.elem_size) + 1]); } return min; } // Scan phase. // The pixel associated with current thread is given the minimum label of the neighbours. __global__ void Scan(cuda::PtrStepSzi labels, cuda::PtrStepSzb connections, char *changes) { unsigned row = blockIdx.y * BLOCK_ROWS + threadIdx.y; unsigned col = blockIdx.x * BLOCK_COLS + threadIdx.x; unsigned labels_index = row * (labels.step / labels.elem_size) + col; unsigned connections_index = row * (connections.step / connections.elem_size) + col; if (row < labels.rows && col < labels.cols) { unsigned char neighbours = connections[connections_index]; unsigned label = labels[labels_index]; if (label) { unsigned min_label = FindMinLabel(labels, neighbours, label, labels_index); if (min_label < label) { labels[label - 1] = min(static_cast<unsigned int>(labels[label - 1]), min_label); *changes = 1; } } } } // Analysis phase. // The pixel associated with current thread is given the minimum label of the neighbours. __global__ void Analyze(cuda::PtrStepSzi labels) { unsigned row = blockIdx.y * BLOCK_ROWS + threadIdx.y; unsigned col = blockIdx.x * BLOCK_COLS + threadIdx.x; unsigned labels_index = row * (labels.step / labels.elem_size) + col; if (row < labels.rows && col < labels.cols) { unsigned label = labels[labels_index]; if (label) { // Performances are the same as the paper variant unsigned index = labels_index; while (label - 1 != index) { index = label - 1; label = labels[index]; } labels[labels_index] = label; } } } // Final Labeling phase // Assigns every pixel of 2x2 blocks the block label __global__ void FinalLabeling(cuda::PtrStepSzi block_labels, cuda::PtrStepSzi labels, const cuda::PtrStepSzb img) { unsigned row = blockIdx.y * BLOCK_ROWS + threadIdx.y; unsigned col = blockIdx.x * BLOCK_COLS + threadIdx.x; unsigned blocks_index = row * (block_labels.step / block_labels.elem_size) + col; unsigned labels_index = 2 * row * (labels.step / labels.elem_size) + 2 * col; unsigned img_index = 2 * row * (img.step / img.elem_size) + 2 * col; if (row < block_labels.rows && col < block_labels.cols) { unsigned int label = block_labels[blocks_index]; if (img[img_index]) labels[labels_index] = label; else { labels[labels_index] = 0; } if (2 * col + 1 < labels.cols) { if (img[img_index + 1]) labels[labels_index + 1] = label; else { labels[labels_index + 1] = 0; } if (2 * row + 1 < labels.rows) { if (img[img_index + img.step + 1]) labels[labels_index + (labels.step / labels.elem_size) + 1] = label; else { labels[labels_index + (labels.step / labels.elem_size) + 1] = 0; } } } if (2 * row + 1 < labels.rows) { if (img[img_index + img.step]) labels[labels_index + (labels.step / labels.elem_size)] = label; else { labels[labels_index + (labels.step / labels.elem_size)] = 0; } } } } } class BE : public GpuLabeling2D<Connectivity2D::CONN_8> { private: dim3 grid_size_; dim3 block_size_; char changes; char *d_changes; cuda::GpuMat d_connections_; cuda::GpuMat d_block_labels_; public: BE() {} void PerformLabeling() { d_img_labels_.create(d_img_.size(), CV_32SC1); // Extra structures that I would gladly do without d_connections_.create((d_img_.rows + 1) / 2, (d_img_.cols + 1) / 2, CV_8UC1); d_block_labels_.create((d_img_.rows + 1) / 2, (d_img_.cols + 1) / 2, CV_32SC1); hipMalloc(&d_changes, sizeof(char)); grid_size_ = dim3((d_connections_.cols + BLOCK_COLS - 1) / BLOCK_COLS, (d_connections_.rows + BLOCK_ROWS - 1) / BLOCK_ROWS, 1); block_size_ = dim3(BLOCK_COLS, BLOCK_ROWS, 1); Init << <grid_size_, block_size_ >> >(d_img_, d_connections_, d_block_labels_); //cuda::GpuMat d_expanded_connections; //d_expanded_connections.create(d_connections_.rows * 3, d_connections_.cols * 3, CV_8UC1); //ExpandConnections << <grid_size_, block_size_ >> > (d_connections_, d_expanded_connections); //Mat1b expanded_connections; //d_expanded_connections.download(expanded_connections); //d_expanded_connections.release(); //Mat1i init_labels; //d_block_labels_.download(init_labels); while (true) { changes = 0; hipMemcpy(d_changes, &changes, sizeof(char), hipMemcpyHostToDevice); Scan << <grid_size_, block_size_ >> > (d_block_labels_, d_connections_, d_changes); hipMemcpy(&changes, d_changes, sizeof(char), hipMemcpyDeviceToHost); if (!changes) break; Analyze << <grid_size_, block_size_ >> > (d_block_labels_); } //Mat1i block_labels; //d_block_labels_.download(block_labels); FinalLabeling << <grid_size_, block_size_ >> >(d_block_labels_, d_img_labels_, d_img_); //d_img_labels_.download(img_labels_); hipDeviceSynchronize(); hipFree(d_changes); d_connections_.release(); d_block_labels_.release(); } private: double Alloc() { perf_.start(); d_img_labels_.create(d_img_.size(), CV_32SC1); d_connections_.create((d_img_.rows + 1) / 2, (d_img_.cols + 1) / 2, CV_8UC1); d_block_labels_.create((d_img_.rows + 1) / 2, (d_img_.cols + 1) / 2, CV_32SC1); hipMalloc(&d_changes, sizeof(char)); perf_.stop(); return perf_.last(); } double Dealloc() { perf_.start(); hipFree(d_changes); d_connections_.release(); d_block_labels_.release(); perf_.stop(); return perf_.last(); } double MemoryTransferHostToDevice() { perf_.start(); d_img_.upload(img_); perf_.stop(); return perf_.last(); } void MemoryTransferDeviceToHost() { d_img_labels_.download(img_labels_); } void AllScans() { grid_size_ = dim3((d_connections_.cols + BLOCK_COLS - 1) / BLOCK_COLS, (d_connections_.rows + BLOCK_ROWS - 1) / BLOCK_ROWS, 1); block_size_ = dim3(BLOCK_COLS, BLOCK_ROWS, 1); Init << <grid_size_, block_size_ >> >(d_img_, d_connections_, d_block_labels_); // La Init esplode // Controlla che cosa contiene connections //cuda::GpuMat d_expanded_connections; //d_expanded_connections.create(d_connections_.rows * 3, d_connections_.cols * 3, CV_8UC1); //ExpandConnections << <grid_size_, block_size_ >> > (d_connections_, d_expanded_connections); //Mat1b expanded_connections; //d_expanded_connections.download(expanded_connections); //d_expanded_connections.release(); //assert(hipDeviceSynchronize() == hipSuccess); //Immagine di debug della inizializzazione //Mat1i init_labels; //d_block_labels_.download(init_labels); while (true) { changes = 0; hipMemcpy(d_changes, &changes, sizeof(char), hipMemcpyHostToDevice); Scan << <grid_size_, block_size_ >> > (d_block_labels_, d_connections_, d_changes); hipMemcpy(&changes, d_changes, sizeof(char), hipMemcpyDeviceToHost); if (!changes) break; Analyze << <grid_size_, block_size_ >> > (d_block_labels_); } // Immagine di debug delle label dei blocchi //Mat1i block_labels; //d_block_labels_.download(block_labels); FinalLabeling << <grid_size_, block_size_ >> >(d_block_labels_, d_img_labels_, d_img_); hipDeviceSynchronize(); } public: void PerformLabelingWithSteps() { double alloc_timing = Alloc(); perf_.start(); AllScans(); perf_.stop(); perf_.store(Step(StepType::ALL_SCANS), perf_.last()); double dealloc_timing = Dealloc(); perf_.store(Step(StepType::ALLOC_DEALLOC), alloc_timing + dealloc_timing); } }; REGISTER_LABELING(BE);
6866346a6d181c513c1b3be85f336f35cb51bf6d.cu
// Copyright (c) 2020, the YACCLAB contributors, as // shown by the AUTHORS file. All rights reserved. // // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. #include <opencv2/cudafeatures2d.hpp> #include "cuda_runtime.h" #include "device_launch_parameters.h" #include "labeling_algorithms.h" #include "register.h" #define BLOCK_ROWS 16 #define BLOCK_COLS 16 using namespace cv; // Algorithm itself has good performances, but memory allocation is a problem. namespace { // Only use it with unsigned numeric types template <typename T> __device__ __forceinline__ unsigned char HasBit(T bitmap, unsigned char pos) { return (bitmap >> pos) & 1; } __device__ __forceinline__ void SetBit(unsigned char &bitmap, unsigned char pos) { bitmap |= (1 << pos); } // Init phase. // Labels start at value 1. __global__ void Init(const cuda::PtrStepSzb img, cuda::PtrStepSzb block_conn, cuda::PtrStepSzi block_labels) { unsigned row = blockIdx.y * BLOCK_ROWS + threadIdx.y; unsigned col = blockIdx.x * BLOCK_COLS + threadIdx.x; unsigned img_index = 2 * row * img.step + 2 * col; unsigned conn_index = row * (block_conn.step / block_conn.elem_size) + col; unsigned labels_index = row * (block_labels.step / block_labels.elem_size) + col; if (row < block_conn.rows && col < block_conn.cols) { unsigned P0 = 0x777; unsigned P = 0; if (img[img_index]) { P |= P0; } if (2 * col + 1 < img.cols) { if (img[img_index + 1]) { P |= (P0 << 1); } if (2 * row + 1 < img.rows && img[img_index + img.step + 1]) { P |= (P0 << 5); } } if (2 * row + 1 < img.rows) { if (img[img_index + img.step]) { P |= (P0 << 4); } } if (col == 0) { P &= 0xEEEE; } if (2 * col + 1 >= img.cols) { P &= 0x3333; } else if (2 * col + 2 >= img.cols) { P &= 0x7777; } if (row == 0) { P &= 0xFFF0; } if (2 * row + 1 >= img.rows) { P &= 0xFF; } else if (2 * row + 2 >= img.rows) { P &= 0xFFF; } // P is now ready to be used to find neighbour blocks (or it should be) // P value avoids range errors unsigned char conn_bitmask = 0; if (P > 0) { block_labels[labels_index] = labels_index + 1; if (HasBit(P, 0) && img[img_index - img.step - 1]) { SetBit(conn_bitmask, 0); } if ((HasBit(P, 1) && img[img_index - img.step]) || (HasBit(P, 2) && img[img_index + 1 - img.step])) { SetBit(conn_bitmask, 1); } if (HasBit(P, 3) && img[img_index + 2 - img.step]) { SetBit(conn_bitmask, 2); } if ((HasBit(P, 4) && img[img_index - 1]) || (HasBit(P, 8) && img[img_index + img.step - 1])) { SetBit(conn_bitmask, 3); } if ((HasBit(P, 7) && img[img_index + 2]) || (HasBit(P, 11) && img[img_index + img.step + 2])) { SetBit(conn_bitmask, 4); } if (HasBit(P, 12) && img[img_index + 2 * img.step - 1]) { SetBit(conn_bitmask, 5); } if ((HasBit(P, 13) && img[img_index + 2 * img.step]) || (HasBit(P, 14) && img[img_index + 2 * img.step + 1])) { SetBit(conn_bitmask, 6); } if (HasBit(P, 15) && img[img_index + 2 * img.step + 2]) { SetBit(conn_bitmask, 7); } } else { block_labels[labels_index] = 0; } block_conn[conn_index] = conn_bitmask; } } __global__ void ExpandConnections(const cuda::PtrStepSzb connections, cuda::PtrStepSzb expansion) { unsigned row = blockIdx.y * BLOCK_ROWS + threadIdx.y; unsigned col = blockIdx.x * BLOCK_COLS + threadIdx.x; unsigned conn_index = row * (connections.step / connections.elem_size) + col; unsigned exp_index = 3 * row * (expansion.step / expansion.elem_size) + 3 * col; if (row < connections.rows && col < connections.cols) { expansion[exp_index + (expansion.step / expansion.elem_size) + 1] = 2; unsigned char neighbours = connections[conn_index]; if (HasBit(neighbours, 0)) { expansion[exp_index] = 1; } else { expansion[exp_index] = 0; } if (HasBit(neighbours, 1)) { expansion[exp_index + 1] = 1; } else { expansion[exp_index + 1] = 0; } if (HasBit(neighbours, 2)) { expansion[exp_index + 2] = 1; } else { expansion[exp_index + 2] = 0; } if (HasBit(neighbours, 3)) { expansion[exp_index + (expansion.step / expansion.elem_size)] = 1; } else { expansion[exp_index + (expansion.step / expansion.elem_size)] = 0; } if (HasBit(neighbours, 4)) { expansion[exp_index + (expansion.step / expansion.elem_size) + 2] = 1; } else { expansion[exp_index + (expansion.step / expansion.elem_size) + 2] = 0; } if (HasBit(neighbours, 5)) { expansion[exp_index + 2 * (expansion.step / expansion.elem_size)] = 1; } else { expansion[exp_index + 2 * (expansion.step / expansion.elem_size)] = 0; } if (HasBit(neighbours, 6)) { expansion[exp_index + 2 * (expansion.step / expansion.elem_size) + 1] = 1; } else { expansion[exp_index + 2 * (expansion.step / expansion.elem_size) + 1] = 0; } if (HasBit(neighbours, 7)) { expansion[exp_index + 2 * (expansion.step / expansion.elem_size) + 2] = 1; } else { expansion[exp_index + 2 * (expansion.step / expansion.elem_size) + 2] = 0; } } } __device__ unsigned int MinLabel(unsigned l1, unsigned l2) { if (l1 && l2) return min(l1, l2); else return l1; } __device__ unsigned int FindMinLabel(cuda::PtrStepSzi labels, unsigned char neighbours, unsigned label, unsigned labels_index) { unsigned int min = label; if (HasBit(neighbours, 0)) { min = MinLabel(min, labels.data[labels_index - (labels.step / labels.elem_size) - 1]); } if (HasBit(neighbours, 1)) { min = MinLabel(min, labels.data[labels_index - (labels.step / labels.elem_size)]); } if (HasBit(neighbours, 2)) { min = MinLabel(min, labels.data[labels_index - (labels.step / labels.elem_size) + 1]); } if (HasBit(neighbours, 3)) { min = MinLabel(min, labels.data[labels_index - 1]); } if (HasBit(neighbours, 4)) { min = MinLabel(min, labels.data[labels_index + 1]); } if (HasBit(neighbours, 5)) { min = MinLabel(min, labels.data[labels_index + (labels.step / labels.elem_size) - 1]); } if (HasBit(neighbours, 6)) { min = MinLabel(min, labels.data[labels_index + (labels.step / labels.elem_size)]); } if (HasBit(neighbours, 7)) { min = MinLabel(min, labels.data[labels_index + (labels.step / labels.elem_size) + 1]); } return min; } // Scan phase. // The pixel associated with current thread is given the minimum label of the neighbours. __global__ void Scan(cuda::PtrStepSzi labels, cuda::PtrStepSzb connections, char *changes) { unsigned row = blockIdx.y * BLOCK_ROWS + threadIdx.y; unsigned col = blockIdx.x * BLOCK_COLS + threadIdx.x; unsigned labels_index = row * (labels.step / labels.elem_size) + col; unsigned connections_index = row * (connections.step / connections.elem_size) + col; if (row < labels.rows && col < labels.cols) { unsigned char neighbours = connections[connections_index]; unsigned label = labels[labels_index]; if (label) { unsigned min_label = FindMinLabel(labels, neighbours, label, labels_index); if (min_label < label) { labels[label - 1] = min(static_cast<unsigned int>(labels[label - 1]), min_label); *changes = 1; } } } } // Analysis phase. // The pixel associated with current thread is given the minimum label of the neighbours. __global__ void Analyze(cuda::PtrStepSzi labels) { unsigned row = blockIdx.y * BLOCK_ROWS + threadIdx.y; unsigned col = blockIdx.x * BLOCK_COLS + threadIdx.x; unsigned labels_index = row * (labels.step / labels.elem_size) + col; if (row < labels.rows && col < labels.cols) { unsigned label = labels[labels_index]; if (label) { // Performances are the same as the paper variant unsigned index = labels_index; while (label - 1 != index) { index = label - 1; label = labels[index]; } labels[labels_index] = label; } } } // Final Labeling phase // Assigns every pixel of 2x2 blocks the block label __global__ void FinalLabeling(cuda::PtrStepSzi block_labels, cuda::PtrStepSzi labels, const cuda::PtrStepSzb img) { unsigned row = blockIdx.y * BLOCK_ROWS + threadIdx.y; unsigned col = blockIdx.x * BLOCK_COLS + threadIdx.x; unsigned blocks_index = row * (block_labels.step / block_labels.elem_size) + col; unsigned labels_index = 2 * row * (labels.step / labels.elem_size) + 2 * col; unsigned img_index = 2 * row * (img.step / img.elem_size) + 2 * col; if (row < block_labels.rows && col < block_labels.cols) { unsigned int label = block_labels[blocks_index]; if (img[img_index]) labels[labels_index] = label; else { labels[labels_index] = 0; } if (2 * col + 1 < labels.cols) { if (img[img_index + 1]) labels[labels_index + 1] = label; else { labels[labels_index + 1] = 0; } if (2 * row + 1 < labels.rows) { if (img[img_index + img.step + 1]) labels[labels_index + (labels.step / labels.elem_size) + 1] = label; else { labels[labels_index + (labels.step / labels.elem_size) + 1] = 0; } } } if (2 * row + 1 < labels.rows) { if (img[img_index + img.step]) labels[labels_index + (labels.step / labels.elem_size)] = label; else { labels[labels_index + (labels.step / labels.elem_size)] = 0; } } } } } class BE : public GpuLabeling2D<Connectivity2D::CONN_8> { private: dim3 grid_size_; dim3 block_size_; char changes; char *d_changes; cuda::GpuMat d_connections_; cuda::GpuMat d_block_labels_; public: BE() {} void PerformLabeling() { d_img_labels_.create(d_img_.size(), CV_32SC1); // Extra structures that I would gladly do without d_connections_.create((d_img_.rows + 1) / 2, (d_img_.cols + 1) / 2, CV_8UC1); d_block_labels_.create((d_img_.rows + 1) / 2, (d_img_.cols + 1) / 2, CV_32SC1); cudaMalloc(&d_changes, sizeof(char)); grid_size_ = dim3((d_connections_.cols + BLOCK_COLS - 1) / BLOCK_COLS, (d_connections_.rows + BLOCK_ROWS - 1) / BLOCK_ROWS, 1); block_size_ = dim3(BLOCK_COLS, BLOCK_ROWS, 1); Init << <grid_size_, block_size_ >> >(d_img_, d_connections_, d_block_labels_); //cuda::GpuMat d_expanded_connections; //d_expanded_connections.create(d_connections_.rows * 3, d_connections_.cols * 3, CV_8UC1); //ExpandConnections << <grid_size_, block_size_ >> > (d_connections_, d_expanded_connections); //Mat1b expanded_connections; //d_expanded_connections.download(expanded_connections); //d_expanded_connections.release(); //Mat1i init_labels; //d_block_labels_.download(init_labels); while (true) { changes = 0; cudaMemcpy(d_changes, &changes, sizeof(char), cudaMemcpyHostToDevice); Scan << <grid_size_, block_size_ >> > (d_block_labels_, d_connections_, d_changes); cudaMemcpy(&changes, d_changes, sizeof(char), cudaMemcpyDeviceToHost); if (!changes) break; Analyze << <grid_size_, block_size_ >> > (d_block_labels_); } //Mat1i block_labels; //d_block_labels_.download(block_labels); FinalLabeling << <grid_size_, block_size_ >> >(d_block_labels_, d_img_labels_, d_img_); //d_img_labels_.download(img_labels_); cudaDeviceSynchronize(); cudaFree(d_changes); d_connections_.release(); d_block_labels_.release(); } private: double Alloc() { perf_.start(); d_img_labels_.create(d_img_.size(), CV_32SC1); d_connections_.create((d_img_.rows + 1) / 2, (d_img_.cols + 1) / 2, CV_8UC1); d_block_labels_.create((d_img_.rows + 1) / 2, (d_img_.cols + 1) / 2, CV_32SC1); cudaMalloc(&d_changes, sizeof(char)); perf_.stop(); return perf_.last(); } double Dealloc() { perf_.start(); cudaFree(d_changes); d_connections_.release(); d_block_labels_.release(); perf_.stop(); return perf_.last(); } double MemoryTransferHostToDevice() { perf_.start(); d_img_.upload(img_); perf_.stop(); return perf_.last(); } void MemoryTransferDeviceToHost() { d_img_labels_.download(img_labels_); } void AllScans() { grid_size_ = dim3((d_connections_.cols + BLOCK_COLS - 1) / BLOCK_COLS, (d_connections_.rows + BLOCK_ROWS - 1) / BLOCK_ROWS, 1); block_size_ = dim3(BLOCK_COLS, BLOCK_ROWS, 1); Init << <grid_size_, block_size_ >> >(d_img_, d_connections_, d_block_labels_); // La Init esplode // Controlla che cosa contiene connections //cuda::GpuMat d_expanded_connections; //d_expanded_connections.create(d_connections_.rows * 3, d_connections_.cols * 3, CV_8UC1); //ExpandConnections << <grid_size_, block_size_ >> > (d_connections_, d_expanded_connections); //Mat1b expanded_connections; //d_expanded_connections.download(expanded_connections); //d_expanded_connections.release(); //assert(cudaDeviceSynchronize() == cudaSuccess); //Immagine di debug della inizializzazione //Mat1i init_labels; //d_block_labels_.download(init_labels); while (true) { changes = 0; cudaMemcpy(d_changes, &changes, sizeof(char), cudaMemcpyHostToDevice); Scan << <grid_size_, block_size_ >> > (d_block_labels_, d_connections_, d_changes); cudaMemcpy(&changes, d_changes, sizeof(char), cudaMemcpyDeviceToHost); if (!changes) break; Analyze << <grid_size_, block_size_ >> > (d_block_labels_); } // Immagine di debug delle label dei blocchi //Mat1i block_labels; //d_block_labels_.download(block_labels); FinalLabeling << <grid_size_, block_size_ >> >(d_block_labels_, d_img_labels_, d_img_); cudaDeviceSynchronize(); } public: void PerformLabelingWithSteps() { double alloc_timing = Alloc(); perf_.start(); AllScans(); perf_.stop(); perf_.store(Step(StepType::ALL_SCANS), perf_.last()); double dealloc_timing = Dealloc(); perf_.store(Step(StepType::ALLOC_DEALLOC), alloc_timing + dealloc_timing); } }; REGISTER_LABELING(BE);
8d9b150ce5aa20064ee9055ba80e7b190a00ad63.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #define TOTAL_THREADS 1024 #define THREADS_PER_BLOCK 256 #define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0)) __global__ void gather_points_kernel(int b, int c, int n, int m, const float *__restrict__ points, const int *__restrict__ idx, float *__restrict__ out) { // points: (B, C, N) // idx: (B, M) // output: // out: (B, C, M) int bs_idx = blockIdx.z; int c_idx = blockIdx.y; int pt_idx = blockIdx.x * blockDim.x + threadIdx.x; if (bs_idx >= b || c_idx >= c || pt_idx >= m) return; out += bs_idx * c * m + c_idx * m + pt_idx; idx += bs_idx * m + pt_idx; points += bs_idx * c * n + c_idx * n; out[0] = points[idx[0]]; } void gather_points_kernel_launcher(int b, int c, int n, int npoints, const float *points, const int *idx, float *out, hipStream_t stream) { // points: (B, C, N) // idx: (B, npoints) // output: // out: (B, C, npoints) hipError_t err; dim3 blocks(DIVUP(npoints, THREADS_PER_BLOCK), c, b); // blockIdx.x(col), blockIdx.y(row) dim3 threads(THREADS_PER_BLOCK); hipLaunchKernelGGL(( gather_points_kernel), dim3(blocks), dim3(threads), 0, stream, b, c, n, npoints, points, idx, out); err = hipGetLastError(); if (hipSuccess != err) { fprintf(stderr, "CUDA kernel failed : %s\n", hipGetErrorString(err)); exit(-1); } } __global__ void gather_points_grad_kernel(int b, int c, int n, int m, const float *__restrict__ grad_out, const int *__restrict__ idx, float *__restrict__ grad_points) { // grad_out: (B, C, M) // idx: (B, M) // output: // grad_points: (B, C, N) int bs_idx = blockIdx.z; int c_idx = blockIdx.y; int pt_idx = blockIdx.x * blockDim.x + threadIdx.x; if (bs_idx >= b || c_idx >= c || pt_idx >= m) return; grad_out += bs_idx * c * m + c_idx * m + pt_idx; idx += bs_idx * m + pt_idx; grad_points += bs_idx * c * n + c_idx * n; atomicAdd(grad_points + idx[0], grad_out[0]); } void gather_points_grad_kernel_launcher(int b, int c, int n, int npoints, const float *grad_out, const int *idx, float *grad_points, hipStream_t stream) { // grad_out: (B, C, npoints) // idx: (B, npoints) // output: // grad_points: (B, C, N) hipError_t err; dim3 blocks(DIVUP(npoints, THREADS_PER_BLOCK), c, b); // blockIdx.x(col), blockIdx.y(row) dim3 threads(THREADS_PER_BLOCK); hipLaunchKernelGGL(( gather_points_grad_kernel), dim3(blocks), dim3(threads), 0, stream, b, c, n, npoints, grad_out, idx, grad_points); err = hipGetLastError(); if (hipSuccess != err) { fprintf(stderr, "CUDA kernel failed : %s\n", hipGetErrorString(err)); exit(-1); } }
8d9b150ce5aa20064ee9055ba80e7b190a00ad63.cu
#include <stdio.h> #include <stdlib.h> #define TOTAL_THREADS 1024 #define THREADS_PER_BLOCK 256 #define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0)) __global__ void gather_points_kernel(int b, int c, int n, int m, const float *__restrict__ points, const int *__restrict__ idx, float *__restrict__ out) { // points: (B, C, N) // idx: (B, M) // output: // out: (B, C, M) int bs_idx = blockIdx.z; int c_idx = blockIdx.y; int pt_idx = blockIdx.x * blockDim.x + threadIdx.x; if (bs_idx >= b || c_idx >= c || pt_idx >= m) return; out += bs_idx * c * m + c_idx * m + pt_idx; idx += bs_idx * m + pt_idx; points += bs_idx * c * n + c_idx * n; out[0] = points[idx[0]]; } void gather_points_kernel_launcher(int b, int c, int n, int npoints, const float *points, const int *idx, float *out, cudaStream_t stream) { // points: (B, C, N) // idx: (B, npoints) // output: // out: (B, C, npoints) cudaError_t err; dim3 blocks(DIVUP(npoints, THREADS_PER_BLOCK), c, b); // blockIdx.x(col), blockIdx.y(row) dim3 threads(THREADS_PER_BLOCK); gather_points_kernel<<<blocks, threads, 0, stream>>>(b, c, n, npoints, points, idx, out); err = cudaGetLastError(); if (cudaSuccess != err) { fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err)); exit(-1); } } __global__ void gather_points_grad_kernel(int b, int c, int n, int m, const float *__restrict__ grad_out, const int *__restrict__ idx, float *__restrict__ grad_points) { // grad_out: (B, C, M) // idx: (B, M) // output: // grad_points: (B, C, N) int bs_idx = blockIdx.z; int c_idx = blockIdx.y; int pt_idx = blockIdx.x * blockDim.x + threadIdx.x; if (bs_idx >= b || c_idx >= c || pt_idx >= m) return; grad_out += bs_idx * c * m + c_idx * m + pt_idx; idx += bs_idx * m + pt_idx; grad_points += bs_idx * c * n + c_idx * n; atomicAdd(grad_points + idx[0], grad_out[0]); } void gather_points_grad_kernel_launcher(int b, int c, int n, int npoints, const float *grad_out, const int *idx, float *grad_points, cudaStream_t stream) { // grad_out: (B, C, npoints) // idx: (B, npoints) // output: // grad_points: (B, C, N) cudaError_t err; dim3 blocks(DIVUP(npoints, THREADS_PER_BLOCK), c, b); // blockIdx.x(col), blockIdx.y(row) dim3 threads(THREADS_PER_BLOCK); gather_points_grad_kernel<<<blocks, threads, 0, stream>>>( b, c, n, npoints, grad_out, idx, grad_points); err = cudaGetLastError(); if (cudaSuccess != err) { fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err)); exit(-1); } }
99c015868015a40cd0e916a775b5473a07c10683.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" extern "C" { } __global__ void vsign(const int n, const double *a, double *b) { int i = threadIdx.x + blockIdx.x * blockDim.x; if (i<n) { if (a[i]<0) {b[i]=-1.0;} else {if (a[i]>0) {b[i]=1.0;} else {b[i]=0.0;} } } }
99c015868015a40cd0e916a775b5473a07c10683.cu
#include "includes.h" extern "C" { } __global__ void vsign(const int n, const double *a, double *b) { int i = threadIdx.x + blockIdx.x * blockDim.x; if (i<n) { if (a[i]<0) {b[i]=-1.0;} else {if (a[i]>0) {b[i]=1.0;} else {b[i]=0.0;} } } }
47b4c9ed6355a6dcd149b6e85beda1ce3eff2a39.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "CollectiveModeGPU.cuh" #include "hoomd/VectorMath.h" #include "hoomd/HOOMDMath.h" #include <stdio.h> #include "hoomd/Saru.h" using namespace hoomd; #include <assert.h> // definitions to call the correct cublas functions depending on specified precision #ifdef SINGLE_PRECISION #define SINCOS(...) sincosf(__VA_ARGS__) #else #define SINCOS(...) sincos(__VA_ARGS__) #endif #define BLOCK_SIZE 256 #define WARP_SIZE 32 #define FULL_MASK 0xffffffff extern "C" __global__ void print_matrix(Scalar* d_mat, int rows, int cols, bool row_major) { for (int i = 0; i < rows; i++) { for (int j = 0; j < cols; j++) { if (row_major) printf("%f ", d_mat[cols*i + j]); else printf("%f ", d_mat[j*rows + i]); } printf("\n"); } } extern "C" __device__ void print_device_matrix(Scalar* d_mat, int rows, int cols, bool row_major) { for (int i = 0; i < rows; i++) { for (int j = 0; j < cols; j++) { if (row_major) printf("%f ", d_mat[cols*i + j]); else printf("%f ", d_mat[j*rows + i]); } printf("\n"); } } /*! \file CollectiveModeGPU.cu \brief Defines GPU kernel code for Collective Mode Brownian Dynamics. */ // ======================================== KERNELS ========================================= /* adds a column-major displacement to the Scalar 4 array of positions d_net_force HOOMD array of forces for each particle d_B_T_F_cos and d_B_T_F_sin row-major Nk x 3 matrices containing the cosine and sine transforms of particle positions times the forces d_A_mat a column-major 3x3 matrix containing the self part of the mobility d_A_half_mat a column-major 3xe matrix containing the Cholesky decomposition of matrix A, the self-part d_dft_cos and d_dft_sin row-major N x Nk matrices containing the cosine and sine transforms of particle positions d_pos HOOMD array of positions d_index_array HOOMD array of index mapping for particles (matters when integrating a subset of particles) d_tag HOOMD array of particle tags necessary for generation of independent random numbers for different groups d_image HOOMD array containing the index of the image in which each particle resides box HOOMD object that contains data on the box size alpha factor that describes how much to excite collective modes alpha = 0 corresponds to typical Brownian dynamics N number of particles Nk number of excited wave vectors timestep the current timestep seed a random seed MUST BE DIFFERENT than the seed for the self part above T temperature dt time step */ extern "C" __global__ void add_to_pos(const Scalar4* d_net_force, const Scalar* d_B_T_F_cos, const Scalar* d_B_T_F_sin, const Scalar* d_A_mat, const Scalar* d_A_half_mat, const Scalar* d_dft_cos, const Scalar* d_dft_sin, Scalar4* d_pos, const unsigned int* d_index_array, const unsigned int* d_tag, int3* d_image, const BoxDim box, const Scalar alpha, const unsigned int N, const unsigned int Nk, const unsigned int timestep, const unsigned int seed, const Scalar T, const Scalar dt) { extern __shared__ Scalar buf[]; Scalar* A = buf; Scalar* A_half = &buf[9]; Scalar* C_T = &buf[18]; Scalar* S_T = &buf[18 + 3*Nk]; int idx = threadIdx.x; // copy A and B_T_F matrices to shared memory while (idx < max(3*Nk, 9)) { if (idx < 9) { A[idx] = d_A_mat[idx]; A_half[idx] = d_A_half_mat[idx]; } C_T[idx] = d_B_T_F_cos[idx]; S_T[idx] = d_B_T_F_sin[idx]; idx += BLOCK_SIZE; } __syncthreads(); idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < N) { unsigned int p_idx = d_index_array[idx]; Scalar4 p = d_pos[p_idx]; Scalar4 F = d_net_force[p_idx]; int3 image = d_image[p_idx]; unsigned int ptag = d_tag[p_idx]; // --------------- self part ------------------- // dt * F*A p.x += dt*(F.x*A[0] + F.y*A[1] + F.z*A[2]); p.y += dt*(F.x*A[3] + F.y*A[4] + F.z*A[5]); p.z += dt*(F.x*A[6] + F.y*A[7] + F.z*A[8]); // Brownian part detail::Saru saru(ptag, timestep, seed); Scalar sx = saru.s<Scalar>(-1,1); Scalar sy = saru.s<Scalar>(-1,1); Scalar sz = saru.s<Scalar>(-1,1); Scalar coeff = fast::sqrt(Scalar(3.0)*Scalar(2.0)*T*dt); // sqrt(3) because not Gaussian p.x += coeff*(sx*A_half[0]); // A_half is upper triangular p.y += coeff*(sx*A_half[3] + sy*A_half[4]); p.z += coeff*(sx*A_half[6] + sy*A_half[7] + sz*A_half[8]); // ------------ collective part ---------------- Scalar c, s; for (int i = 0; i < Nk; i++) { c = d_dft_cos[Nk*idx + i]; s = d_dft_sin[Nk*idx + i]; p.x += c * C_T[3*i]; p.x += s * S_T[3*i]; p.y += c * C_T[3*i + 1]; p.y += s * S_T[3*i + 1]; p.z += c * C_T[3*i + 2]; p.z += s * S_T[3*i + 2]; } // wrap particles in case displacements pushed particles outside box box.wrap(p, image); // write out data d_pos[p_idx] = p; d_image[p_idx] = image; } } /* Calculates the nonuniform Fourier transform for given k vectors d_pos HOOMD array of positions d_index_array HOOMD array of index mapping for particles (matters when integrating a subset of particles) d_net_force HOOMD array of forces for each particle d_B_T_F_cos and d_B_T_F_sin row-major Nk x 3 matrices containing the cosine and sine transforms of particle positions d_ks_mat row-major Nk x 3 matrix containing the excited wave vectors box HOOMD object that contains data on the box size N number of particles D number of dimensions Nk number of excited wave vectors dt time step T temperature alpha factor that describes how much to excite collective modes alpha = 0 corresponds to typical Brownian dynamics timestep the current timestep wave_seed a random seed MUST BE DIFFERENT than the seed for the self part above */ extern "C" __global__ void calculate_dft_and_reduce(const Scalar4* d_pos, const unsigned int* d_index_array, const Scalar4* d_net_force, Scalar* d_B_T_F_cos, Scalar* d_B_T_F_sin, Scalar* d_dft_cos, Scalar* d_dft_sin, const Scalar* d_ks_mat, const unsigned int N, const unsigned int D, const unsigned int Nk, const Scalar dt, const Scalar T, const Scalar alpha, const int timestep, const unsigned int wave_seed) { extern __shared__ Scalar ks[]; // copy d_ks_mat (row-major) to shared memory int idx = threadIdx.x; while (idx < Nk) { ks[3*idx] = d_ks_mat[3*idx]; ks[3*idx + 1] = d_ks_mat[3*idx + 1]; if (D == 3) ks[3*idx + 2] = d_ks_mat[3*idx + 2]; else ks[3*idx + 2] = 0.0; idx += BLOCK_SIZE; } // __syncthreads(); // don't need this because of ballot_sync below // variables for discrete fourier transform idx = blockIdx.x * blockDim.x + threadIdx.x; Scalar4 p, F; unsigned int p_idx; // calculate reduction mask unsigned mask = __ballot_sync(FULL_MASK, idx < N); if (idx >= N) return; p_idx = d_index_array[idx]; p = d_pos[p_idx]; F = d_net_force[p_idx]; if (D == 2) { p.z = 0.0; F.z = 0.0; } // calculate cos and sin transforms and do reduction Scalar dft_cos; Scalar dft_sin; Scalar cf_x, cf_y, cf_z, sf_x, sf_y, sf_z; detail::Saru saru(idx, timestep, wave_seed); Scalar dt_a_Nk = dt*alpha/Nk; for (int kidx = 0; kidx < Nk; kidx++) { // stagger the k vector each block is operating on int i = (blockIdx.x + kidx) % Nk; // argument is 2 * pi * dot(k, x/L) Scalar arg = ks[3*i]*p.x + ks[3*i+1]*p.y + ks[3*i+2]*p.z; SINCOS(arg, &dft_sin, &dft_cos); // save values to d_dft arrays if (idx < N) { d_dft_cos[Nk*idx + i] = dft_cos; d_dft_sin[Nk*idx + i] = dft_sin; } // calculate action on forces cf_x = dt_a_Nk * dft_cos * F.x; cf_y = dt_a_Nk * dft_cos * F.y; cf_z = dt_a_Nk * dft_cos * F.z; sf_x = dt_a_Nk * dft_sin * F.x; sf_y = dt_a_Nk * dft_sin * F.y; sf_z = dt_a_Nk * dft_sin * F.z; // add random wave-space displacement if (idx < 6) { Scalar rand_disp = fast::sqrt(Scalar(3.0)*Scalar(2.0)*T*dt*alpha/Nk) * saru.s<Scalar>(-1,1); switch(idx) { case 0: cf_x += rand_disp; break; case 1: cf_y += rand_disp; break; case 2: cf_z += rand_disp; break; case 3: sf_x -= rand_disp; break; case 4: sf_y -= rand_disp; break; case 5: sf_z -= rand_disp; break; } } // warp reduce for (int offset = WARP_SIZE / 2; offset > 0; offset /= 2) { cf_x += __shfl_down_sync(mask, cf_x, offset); cf_y += __shfl_down_sync(mask, cf_y, offset); cf_z += __shfl_down_sync(mask, cf_z, offset); sf_x += __shfl_down_sync(mask, sf_x, offset); sf_y += __shfl_down_sync(mask, sf_y, offset); sf_z += __shfl_down_sync(mask, sf_z, offset); } // device-wide atomic reduction if ((threadIdx.x & (WARP_SIZE - 1)) == 0) { atomicAdd(&d_B_T_F_cos[3*i], cf_x); atomicAdd(&d_B_T_F_cos[3*i+1], cf_y); atomicAdd(&d_B_T_F_cos[3*i+2], cf_z); atomicAdd(&d_B_T_F_sin[3*i], sf_x); atomicAdd(&d_B_T_F_sin[3*i+1], sf_y); atomicAdd(&d_B_T_F_sin[3*i+2], sf_z); } } } /* Takes the cos and sin transform matrices and performs the orthogonal wavespace projection (i.e., multiplication by (I-kk) for each k) d_B_T_F_cos and d_B_T_F_sin both row-major Nk x 3 matrices each row corresponds to a different wave vector d_ks_norm_mat a row-major Nk x 3 matrix each row contains a normalized wave vector Nk the number of wave vectors Assumes kernel will be launched with 1 block consisting of # threads = Nk */ extern "C" __global__ void project_ks(Scalar* d_B_T_F_cos, Scalar* d_B_T_F_sin, const Scalar* d_ks_norm_mat, const unsigned int Nk) { int idx = threadIdx.x; if (idx < Nk) { Scalar kx = d_ks_norm_mat[3*idx]; Scalar ky = d_ks_norm_mat[3*idx + 1]; Scalar kz = d_ks_norm_mat[3*idx + 2]; Scalar cx = d_B_T_F_cos[3*idx]; Scalar cy = d_B_T_F_cos[3*idx + 1]; Scalar cz = d_B_T_F_cos[3*idx + 2]; Scalar sx = d_B_T_F_sin[3*idx]; Scalar sy = d_B_T_F_sin[3*idx + 1]; Scalar sz = d_B_T_F_sin[3*idx + 2]; // do dot product Scalar dotc = kx*cx + ky*cy + kz*cz; Scalar dots = kx*sx + ky*sy + kz*sz; // I - kk d_B_T_F_cos[3*idx] -= dotc * kx; d_B_T_F_cos[3*idx + 1] -= dotc * ky; d_B_T_F_cos[3*idx + 2] -= dotc * kz; d_B_T_F_sin[3*idx] -= dots * kx; d_B_T_F_sin[3*idx + 1] -= dots * ky; d_B_T_F_sin[3*idx + 2] -= dots * kz; } } /* zeros out arrays */ extern "C" __global__ void zero_matrices(Scalar* d_B_T_F_cos, Scalar* d_B_T_F_sin, const unsigned int Nk) { int idx = threadIdx.x; if (idx < 3*Nk) { d_B_T_F_cos[idx] = 0.0; d_B_T_F_sin[idx] = 0.0; } } // ============================== MAIN FXN ============================================= hipError_t gpu_collective(unsigned int timestep, unsigned int seed, unsigned int wave_seed, Scalar4* d_pos, int3* d_image, const BoxDim &box, const unsigned int* d_index_array, const unsigned int* d_tag, const Scalar alpha, const unsigned int N, const unsigned int Nk, const Scalar4* d_net_force, const Scalar dt, const unsigned int D, const Scalar T, Scalar* d_dft_cos, Scalar* d_dft_sin, Scalar* d_B_T_F_cos, Scalar* d_B_T_F_sin, const Scalar* d_ks_mat, const Scalar* d_ks_norm_mat, const Scalar* d_A_mat, const Scalar* d_A_half_mat) { // zero out reduction matrices // hipMemset(d_B_T_F_cos, 0, Nk*3*sizeof(Scalar)); // hipMemset(d_B_T_F_sin, 0, Nk*3*sizeof(Scalar)); hipLaunchKernelGGL(( zero_matrices), dim3(1), dim3(3*Nk) , 0, 0, d_B_T_F_cos, d_B_T_F_sin, Nk); hipLaunchKernelGGL(( calculate_dft_and_reduce), dim3(N/BLOCK_SIZE + 1), dim3(BLOCK_SIZE), 3*Nk*sizeof(Scalar) , 0, d_pos, d_index_array, d_net_force, d_B_T_F_cos, d_B_T_F_sin, d_dft_cos, d_dft_sin, d_ks_mat, N, D, Nk, dt, T, alpha, timestep, wave_seed); hipLaunchKernelGGL(( project_ks), dim3(1), dim3(Nk) , 0, 0, d_B_T_F_cos, d_B_T_F_sin, d_ks_norm_mat, Nk); hipLaunchKernelGGL(( add_to_pos), dim3(N/BLOCK_SIZE + 1), dim3(BLOCK_SIZE), (18 + 6*Nk)*sizeof(Scalar) , 0, d_net_force, d_B_T_F_cos, d_B_T_F_sin, d_A_mat, d_A_half_mat, d_dft_cos, d_dft_sin, d_pos, d_index_array, d_tag, d_image, box, alpha, N, Nk, timestep, seed, T, dt); return hipSuccess; }
47b4c9ed6355a6dcd149b6e85beda1ce3eff2a39.cu
#include "CollectiveModeGPU.cuh" #include "hoomd/VectorMath.h" #include "hoomd/HOOMDMath.h" #include <stdio.h> #include "hoomd/Saru.h" using namespace hoomd; #include <assert.h> // definitions to call the correct cublas functions depending on specified precision #ifdef SINGLE_PRECISION #define SINCOS(...) sincosf(__VA_ARGS__) #else #define SINCOS(...) sincos(__VA_ARGS__) #endif #define BLOCK_SIZE 256 #define WARP_SIZE 32 #define FULL_MASK 0xffffffff extern "C" __global__ void print_matrix(Scalar* d_mat, int rows, int cols, bool row_major) { for (int i = 0; i < rows; i++) { for (int j = 0; j < cols; j++) { if (row_major) printf("%f ", d_mat[cols*i + j]); else printf("%f ", d_mat[j*rows + i]); } printf("\n"); } } extern "C" __device__ void print_device_matrix(Scalar* d_mat, int rows, int cols, bool row_major) { for (int i = 0; i < rows; i++) { for (int j = 0; j < cols; j++) { if (row_major) printf("%f ", d_mat[cols*i + j]); else printf("%f ", d_mat[j*rows + i]); } printf("\n"); } } /*! \file CollectiveModeGPU.cu \brief Defines GPU kernel code for Collective Mode Brownian Dynamics. */ // ======================================== KERNELS ========================================= /* adds a column-major displacement to the Scalar 4 array of positions d_net_force HOOMD array of forces for each particle d_B_T_F_cos and d_B_T_F_sin row-major Nk x 3 matrices containing the cosine and sine transforms of particle positions times the forces d_A_mat a column-major 3x3 matrix containing the self part of the mobility d_A_half_mat a column-major 3xe matrix containing the Cholesky decomposition of matrix A, the self-part d_dft_cos and d_dft_sin row-major N x Nk matrices containing the cosine and sine transforms of particle positions d_pos HOOMD array of positions d_index_array HOOMD array of index mapping for particles (matters when integrating a subset of particles) d_tag HOOMD array of particle tags necessary for generation of independent random numbers for different groups d_image HOOMD array containing the index of the image in which each particle resides box HOOMD object that contains data on the box size alpha factor that describes how much to excite collective modes alpha = 0 corresponds to typical Brownian dynamics N number of particles Nk number of excited wave vectors timestep the current timestep seed a random seed MUST BE DIFFERENT than the seed for the self part above T temperature dt time step */ extern "C" __global__ void add_to_pos(const Scalar4* d_net_force, const Scalar* d_B_T_F_cos, const Scalar* d_B_T_F_sin, const Scalar* d_A_mat, const Scalar* d_A_half_mat, const Scalar* d_dft_cos, const Scalar* d_dft_sin, Scalar4* d_pos, const unsigned int* d_index_array, const unsigned int* d_tag, int3* d_image, const BoxDim box, const Scalar alpha, const unsigned int N, const unsigned int Nk, const unsigned int timestep, const unsigned int seed, const Scalar T, const Scalar dt) { extern __shared__ Scalar buf[]; Scalar* A = buf; Scalar* A_half = &buf[9]; Scalar* C_T = &buf[18]; Scalar* S_T = &buf[18 + 3*Nk]; int idx = threadIdx.x; // copy A and B_T_F matrices to shared memory while (idx < max(3*Nk, 9)) { if (idx < 9) { A[idx] = d_A_mat[idx]; A_half[idx] = d_A_half_mat[idx]; } C_T[idx] = d_B_T_F_cos[idx]; S_T[idx] = d_B_T_F_sin[idx]; idx += BLOCK_SIZE; } __syncthreads(); idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < N) { unsigned int p_idx = d_index_array[idx]; Scalar4 p = d_pos[p_idx]; Scalar4 F = d_net_force[p_idx]; int3 image = d_image[p_idx]; unsigned int ptag = d_tag[p_idx]; // --------------- self part ------------------- // dt * F*A p.x += dt*(F.x*A[0] + F.y*A[1] + F.z*A[2]); p.y += dt*(F.x*A[3] + F.y*A[4] + F.z*A[5]); p.z += dt*(F.x*A[6] + F.y*A[7] + F.z*A[8]); // Brownian part detail::Saru saru(ptag, timestep, seed); Scalar sx = saru.s<Scalar>(-1,1); Scalar sy = saru.s<Scalar>(-1,1); Scalar sz = saru.s<Scalar>(-1,1); Scalar coeff = fast::sqrt(Scalar(3.0)*Scalar(2.0)*T*dt); // sqrt(3) because not Gaussian p.x += coeff*(sx*A_half[0]); // A_half is upper triangular p.y += coeff*(sx*A_half[3] + sy*A_half[4]); p.z += coeff*(sx*A_half[6] + sy*A_half[7] + sz*A_half[8]); // ------------ collective part ---------------- Scalar c, s; for (int i = 0; i < Nk; i++) { c = d_dft_cos[Nk*idx + i]; s = d_dft_sin[Nk*idx + i]; p.x += c * C_T[3*i]; p.x += s * S_T[3*i]; p.y += c * C_T[3*i + 1]; p.y += s * S_T[3*i + 1]; p.z += c * C_T[3*i + 2]; p.z += s * S_T[3*i + 2]; } // wrap particles in case displacements pushed particles outside box box.wrap(p, image); // write out data d_pos[p_idx] = p; d_image[p_idx] = image; } } /* Calculates the nonuniform Fourier transform for given k vectors d_pos HOOMD array of positions d_index_array HOOMD array of index mapping for particles (matters when integrating a subset of particles) d_net_force HOOMD array of forces for each particle d_B_T_F_cos and d_B_T_F_sin row-major Nk x 3 matrices containing the cosine and sine transforms of particle positions d_ks_mat row-major Nk x 3 matrix containing the excited wave vectors box HOOMD object that contains data on the box size N number of particles D number of dimensions Nk number of excited wave vectors dt time step T temperature alpha factor that describes how much to excite collective modes alpha = 0 corresponds to typical Brownian dynamics timestep the current timestep wave_seed a random seed MUST BE DIFFERENT than the seed for the self part above */ extern "C" __global__ void calculate_dft_and_reduce(const Scalar4* d_pos, const unsigned int* d_index_array, const Scalar4* d_net_force, Scalar* d_B_T_F_cos, Scalar* d_B_T_F_sin, Scalar* d_dft_cos, Scalar* d_dft_sin, const Scalar* d_ks_mat, const unsigned int N, const unsigned int D, const unsigned int Nk, const Scalar dt, const Scalar T, const Scalar alpha, const int timestep, const unsigned int wave_seed) { extern __shared__ Scalar ks[]; // copy d_ks_mat (row-major) to shared memory int idx = threadIdx.x; while (idx < Nk) { ks[3*idx] = d_ks_mat[3*idx]; ks[3*idx + 1] = d_ks_mat[3*idx + 1]; if (D == 3) ks[3*idx + 2] = d_ks_mat[3*idx + 2]; else ks[3*idx + 2] = 0.0; idx += BLOCK_SIZE; } // __syncthreads(); // don't need this because of ballot_sync below // variables for discrete fourier transform idx = blockIdx.x * blockDim.x + threadIdx.x; Scalar4 p, F; unsigned int p_idx; // calculate reduction mask unsigned mask = __ballot_sync(FULL_MASK, idx < N); if (idx >= N) return; p_idx = d_index_array[idx]; p = d_pos[p_idx]; F = d_net_force[p_idx]; if (D == 2) { p.z = 0.0; F.z = 0.0; } // calculate cos and sin transforms and do reduction Scalar dft_cos; Scalar dft_sin; Scalar cf_x, cf_y, cf_z, sf_x, sf_y, sf_z; detail::Saru saru(idx, timestep, wave_seed); Scalar dt_a_Nk = dt*alpha/Nk; for (int kidx = 0; kidx < Nk; kidx++) { // stagger the k vector each block is operating on int i = (blockIdx.x + kidx) % Nk; // argument is 2 * pi * dot(k, x/L) Scalar arg = ks[3*i]*p.x + ks[3*i+1]*p.y + ks[3*i+2]*p.z; SINCOS(arg, &dft_sin, &dft_cos); // save values to d_dft arrays if (idx < N) { d_dft_cos[Nk*idx + i] = dft_cos; d_dft_sin[Nk*idx + i] = dft_sin; } // calculate action on forces cf_x = dt_a_Nk * dft_cos * F.x; cf_y = dt_a_Nk * dft_cos * F.y; cf_z = dt_a_Nk * dft_cos * F.z; sf_x = dt_a_Nk * dft_sin * F.x; sf_y = dt_a_Nk * dft_sin * F.y; sf_z = dt_a_Nk * dft_sin * F.z; // add random wave-space displacement if (idx < 6) { Scalar rand_disp = fast::sqrt(Scalar(3.0)*Scalar(2.0)*T*dt*alpha/Nk) * saru.s<Scalar>(-1,1); switch(idx) { case 0: cf_x += rand_disp; break; case 1: cf_y += rand_disp; break; case 2: cf_z += rand_disp; break; case 3: sf_x -= rand_disp; break; case 4: sf_y -= rand_disp; break; case 5: sf_z -= rand_disp; break; } } // warp reduce for (int offset = WARP_SIZE / 2; offset > 0; offset /= 2) { cf_x += __shfl_down_sync(mask, cf_x, offset); cf_y += __shfl_down_sync(mask, cf_y, offset); cf_z += __shfl_down_sync(mask, cf_z, offset); sf_x += __shfl_down_sync(mask, sf_x, offset); sf_y += __shfl_down_sync(mask, sf_y, offset); sf_z += __shfl_down_sync(mask, sf_z, offset); } // device-wide atomic reduction if ((threadIdx.x & (WARP_SIZE - 1)) == 0) { atomicAdd(&d_B_T_F_cos[3*i], cf_x); atomicAdd(&d_B_T_F_cos[3*i+1], cf_y); atomicAdd(&d_B_T_F_cos[3*i+2], cf_z); atomicAdd(&d_B_T_F_sin[3*i], sf_x); atomicAdd(&d_B_T_F_sin[3*i+1], sf_y); atomicAdd(&d_B_T_F_sin[3*i+2], sf_z); } } } /* Takes the cos and sin transform matrices and performs the orthogonal wavespace projection (i.e., multiplication by (I-kk) for each k) d_B_T_F_cos and d_B_T_F_sin both row-major Nk x 3 matrices each row corresponds to a different wave vector d_ks_norm_mat a row-major Nk x 3 matrix each row contains a normalized wave vector Nk the number of wave vectors Assumes kernel will be launched with 1 block consisting of # threads = Nk */ extern "C" __global__ void project_ks(Scalar* d_B_T_F_cos, Scalar* d_B_T_F_sin, const Scalar* d_ks_norm_mat, const unsigned int Nk) { int idx = threadIdx.x; if (idx < Nk) { Scalar kx = d_ks_norm_mat[3*idx]; Scalar ky = d_ks_norm_mat[3*idx + 1]; Scalar kz = d_ks_norm_mat[3*idx + 2]; Scalar cx = d_B_T_F_cos[3*idx]; Scalar cy = d_B_T_F_cos[3*idx + 1]; Scalar cz = d_B_T_F_cos[3*idx + 2]; Scalar sx = d_B_T_F_sin[3*idx]; Scalar sy = d_B_T_F_sin[3*idx + 1]; Scalar sz = d_B_T_F_sin[3*idx + 2]; // do dot product Scalar dotc = kx*cx + ky*cy + kz*cz; Scalar dots = kx*sx + ky*sy + kz*sz; // I - kk d_B_T_F_cos[3*idx] -= dotc * kx; d_B_T_F_cos[3*idx + 1] -= dotc * ky; d_B_T_F_cos[3*idx + 2] -= dotc * kz; d_B_T_F_sin[3*idx] -= dots * kx; d_B_T_F_sin[3*idx + 1] -= dots * ky; d_B_T_F_sin[3*idx + 2] -= dots * kz; } } /* zeros out arrays */ extern "C" __global__ void zero_matrices(Scalar* d_B_T_F_cos, Scalar* d_B_T_F_sin, const unsigned int Nk) { int idx = threadIdx.x; if (idx < 3*Nk) { d_B_T_F_cos[idx] = 0.0; d_B_T_F_sin[idx] = 0.0; } } // ============================== MAIN FXN ============================================= cudaError_t gpu_collective(unsigned int timestep, unsigned int seed, unsigned int wave_seed, Scalar4* d_pos, int3* d_image, const BoxDim &box, const unsigned int* d_index_array, const unsigned int* d_tag, const Scalar alpha, const unsigned int N, const unsigned int Nk, const Scalar4* d_net_force, const Scalar dt, const unsigned int D, const Scalar T, Scalar* d_dft_cos, Scalar* d_dft_sin, Scalar* d_B_T_F_cos, Scalar* d_B_T_F_sin, const Scalar* d_ks_mat, const Scalar* d_ks_norm_mat, const Scalar* d_A_mat, const Scalar* d_A_half_mat) { // zero out reduction matrices // cudaMemset(d_B_T_F_cos, 0, Nk*3*sizeof(Scalar)); // cudaMemset(d_B_T_F_sin, 0, Nk*3*sizeof(Scalar)); zero_matrices<<< 1, 3*Nk >>>(d_B_T_F_cos, d_B_T_F_sin, Nk); calculate_dft_and_reduce<<< N/BLOCK_SIZE + 1, BLOCK_SIZE, 3*Nk*sizeof(Scalar) >>>(d_pos, d_index_array, d_net_force, d_B_T_F_cos, d_B_T_F_sin, d_dft_cos, d_dft_sin, d_ks_mat, N, D, Nk, dt, T, alpha, timestep, wave_seed); project_ks<<< 1, Nk >>>(d_B_T_F_cos, d_B_T_F_sin, d_ks_norm_mat, Nk); add_to_pos<<< N/BLOCK_SIZE + 1, BLOCK_SIZE, (18 + 6*Nk)*sizeof(Scalar) >>>(d_net_force, d_B_T_F_cos, d_B_T_F_sin, d_A_mat, d_A_half_mat, d_dft_cos, d_dft_sin, d_pos, d_index_array, d_tag, d_image, box, alpha, N, Nk, timestep, seed, T, dt); return cudaSuccess; }
d5a1a3516656fc416f0a2c699343a0cf708ed33b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <opencv2/core/cuda.hpp> #include <datatypes.hpp> using cv::cuda::GpuMat; __global__ void kernel_compute_vertex_map(const cv::cuda::PtrStepSz<float> depth_map, cv::cuda::PtrStepSz<float3> vertex_map, const CameraIntrinsics camera_params, const float max_depth){ // Calculate global row and column for each thread const int col = blockIdx.x * blockDim.x + threadIdx.x; const int row = blockIdx.y * blockDim.y + threadIdx.y; if (col >= depth_map.cols || row >= depth_map.rows) return; float depth_val = depth_map(row, col); // Don't use depth values larger than max_depth if (depth_val > max_depth){ depth_val = 0.f; } // from screen to camera space vertex_map(row, col) = make_float3((col - camera_params.cx) * depth_val / camera_params.fx, (row - camera_params.cy) * depth_val / camera_params.fy, depth_val); } __global__ void kernel_compute_normal_map(cv::cuda::PtrStepSz<float3> vertex_map, cv::cuda::PtrStepSz<float3> normal_map){ // Calculate global row and column for each thread const int col = blockIdx.x * blockDim.x + threadIdx.x; const int row = blockIdx.y * blockDim.y + threadIdx.y; if (col >= vertex_map.cols - 1 || row >= vertex_map.rows - 1){ if (col == vertex_map.cols - 1 || row == vertex_map.rows - 1){ normal_map(row, col) = make_float3(0.f,0.f,0.f); // TODO: maybe compute them with vertex_map(row - 1, col) etc. } return; } float sx = vertex_map(row + 1, col).x - vertex_map(row, col).x; float sy = vertex_map(row + 1, col).y - vertex_map(row, col).y; float sz = vertex_map(row + 1, col).z - vertex_map(row, col).z; float tx = vertex_map(row, col + 1).x - vertex_map(row, col).x; float ty = vertex_map(row, col + 1).y - vertex_map(row, col).y; float tz = vertex_map(row, col + 1).z - vertex_map(row, col).z; float3 cross_prod = make_float3(sy * tz - sz * ty, sz * tx - sx * tz, sx * ty - sy * tx); float norm = sqrt(cross_prod.x * cross_prod.x + cross_prod.y * cross_prod.y + cross_prod.z * cross_prod.z) + .000001f; normal_map(row, col) = make_float3(cross_prod.x / norm, cross_prod.y / norm, cross_prod.z / norm); } void compute_vertex_map(const GpuMat& filtered_depth_map, GpuMat& vertex_map, const CameraIntrinsics camera_params, const float max_depth){ int threads = 32; dim3 T(threads, threads, 1); // number of threads per block (depends on compute capability of your GPU) int blocks_x = (filtered_depth_map.cols + T.x - 1) / T.x; int blocks_y = (filtered_depth_map.rows + T.y - 1) / T.y; dim3 M(blocks_x, blocks_y, 1); // number of thread blocks (depends on compute capability of your GPU) hipLaunchKernelGGL(( kernel_compute_vertex_map), dim3(M) , dim3(T) , 0, 0, filtered_depth_map, vertex_map, camera_params, max_depth); hipDeviceSynchronize(); } void compute_normal_map(const GpuMat& vertex_map, GpuMat& normal_map){ int threads = 32; dim3 T(threads, threads, 1); // number of threads per block (depends on compute capability of your GPU) int blocks_x = (vertex_map.cols + T.x - 1) / T.x; int blocks_y = (vertex_map.rows + T.y - 1) / T.y; dim3 M(blocks_x, blocks_y, 1); // number of thread blocks (depends on compute capability of your GPU) hipLaunchKernelGGL(( kernel_compute_normal_map), dim3(M) , dim3(T) , 0, 0, vertex_map, normal_map); hipDeviceSynchronize(); }
d5a1a3516656fc416f0a2c699343a0cf708ed33b.cu
#include <opencv2/core/cuda.hpp> #include <datatypes.hpp> using cv::cuda::GpuMat; __global__ void kernel_compute_vertex_map(const cv::cuda::PtrStepSz<float> depth_map, cv::cuda::PtrStepSz<float3> vertex_map, const CameraIntrinsics camera_params, const float max_depth){ // Calculate global row and column for each thread const int col = blockIdx.x * blockDim.x + threadIdx.x; const int row = blockIdx.y * blockDim.y + threadIdx.y; if (col >= depth_map.cols || row >= depth_map.rows) return; float depth_val = depth_map(row, col); // Don't use depth values larger than max_depth if (depth_val > max_depth){ depth_val = 0.f; } // from screen to camera space vertex_map(row, col) = make_float3((col - camera_params.cx) * depth_val / camera_params.fx, (row - camera_params.cy) * depth_val / camera_params.fy, depth_val); } __global__ void kernel_compute_normal_map(cv::cuda::PtrStepSz<float3> vertex_map, cv::cuda::PtrStepSz<float3> normal_map){ // Calculate global row and column for each thread const int col = blockIdx.x * blockDim.x + threadIdx.x; const int row = blockIdx.y * blockDim.y + threadIdx.y; if (col >= vertex_map.cols - 1 || row >= vertex_map.rows - 1){ if (col == vertex_map.cols - 1 || row == vertex_map.rows - 1){ normal_map(row, col) = make_float3(0.f,0.f,0.f); // TODO: maybe compute them with vertex_map(row - 1, col) etc. } return; } float sx = vertex_map(row + 1, col).x - vertex_map(row, col).x; float sy = vertex_map(row + 1, col).y - vertex_map(row, col).y; float sz = vertex_map(row + 1, col).z - vertex_map(row, col).z; float tx = vertex_map(row, col + 1).x - vertex_map(row, col).x; float ty = vertex_map(row, col + 1).y - vertex_map(row, col).y; float tz = vertex_map(row, col + 1).z - vertex_map(row, col).z; float3 cross_prod = make_float3(sy * tz - sz * ty, sz * tx - sx * tz, sx * ty - sy * tx); float norm = sqrt(cross_prod.x * cross_prod.x + cross_prod.y * cross_prod.y + cross_prod.z * cross_prod.z) + .000001f; normal_map(row, col) = make_float3(cross_prod.x / norm, cross_prod.y / norm, cross_prod.z / norm); } void compute_vertex_map(const GpuMat& filtered_depth_map, GpuMat& vertex_map, const CameraIntrinsics camera_params, const float max_depth){ int threads = 32; dim3 T(threads, threads, 1); // number of threads per block (depends on compute capability of your GPU) int blocks_x = (filtered_depth_map.cols + T.x - 1) / T.x; int blocks_y = (filtered_depth_map.rows + T.y - 1) / T.y; dim3 M(blocks_x, blocks_y, 1); // number of thread blocks (depends on compute capability of your GPU) kernel_compute_vertex_map<<< M , T >>>(filtered_depth_map, vertex_map, camera_params, max_depth); cudaDeviceSynchronize(); } void compute_normal_map(const GpuMat& vertex_map, GpuMat& normal_map){ int threads = 32; dim3 T(threads, threads, 1); // number of threads per block (depends on compute capability of your GPU) int blocks_x = (vertex_map.cols + T.x - 1) / T.x; int blocks_y = (vertex_map.rows + T.y - 1) / T.y; dim3 M(blocks_x, blocks_y, 1); // number of thread blocks (depends on compute capability of your GPU) kernel_compute_normal_map<<< M , T >>>(vertex_map, normal_map); cudaDeviceSynchronize(); }
96a71d5e32ea2eb1b7bff510dd0b99c0aa4d293a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void reduction(int * in, int * out){ int globalid = blockIdx.x*blockDim.x + threadIdx.x; __shared__ int s_array[BLOCK_DIM]; s_array[threadIdx.x] = in[globalid]; __syncthreads(); for (int i = blockDim.x / 2; i > 0; i /= 2){ if (threadIdx.x < i){ s_array[threadIdx.x] += s_array[threadIdx.x + i]; } __syncthreads(); } if (threadIdx.x == 0) out[blockIdx.x] = s_array[0]; }
96a71d5e32ea2eb1b7bff510dd0b99c0aa4d293a.cu
#include "includes.h" __global__ void reduction(int * in, int * out){ int globalid = blockIdx.x*blockDim.x + threadIdx.x; __shared__ int s_array[BLOCK_DIM]; s_array[threadIdx.x] = in[globalid]; __syncthreads(); for (int i = blockDim.x / 2; i > 0; i /= 2){ if (threadIdx.x < i){ s_array[threadIdx.x] += s_array[threadIdx.x + i]; } __syncthreads(); } if (threadIdx.x == 0) out[blockIdx.x] = s_array[0]; }
3582d4cd73e2aac617822b58636aa53e73ec4ea1.hip
// !!! This is a file automatically generated by hipify!!! #include <allocator.h> #include <cycles/cgcycle.h> #include <cusp/multiply.h> #include <cusp/blas.h> template <class Matrix, class Vector> CG_Flex_Cycle<Matrix, Vector>::CG_Flex_Cycle(CycleType next_cycle, int num_iters, AMG_Level<Matrix_h, Vector_h> *next, const Matrix_hyb_d_CG &Aell, const Vector_d_CG &b, Vector_d_CG &x, CGType tol, int maxiters, bool verbose) { typedef typename Matrix::value_type ValueType; typedef typename Matrix::index_type IndexType; typedef typename Matrix::memory_space MemorySpace; int N = b.size(); ValueType bnorm = cusp::blas::nrm2(b); Vector_d_CG y(N); Vector_d_CG z(N); Vector_d_CG r(N); Vector_d_CG d(N); Vector_d_CG p(N); cusp::multiply(Aell, x, y); cusp::blas::axpby(b, y, r, ValueType(1), ValueType(-1)); next->cycle_level0(next_cycle, r, z); cusp::blas::copy(z, p); ValueType rzold = cusp::blas::dotc(r, z); ValueType rznew; int niter = 0; double iter_start, iter_stop; iter_start = CLOCK(); while(niter < maxiters) { cusp::multiply(Aell, p, y); ValueType yp = cusp::blas::dotc(y, p); ValueType alpha = rzold / yp; cusp::blas::axpy(p, x, alpha); cusp::blas::axpy(y, r, -alpha); ValueType normr = cusp::blas::nrm2(r); if (verbose) std::cout << "normr=" << std::scientific << normr << " niter=" << niter << std::endl; if( (normr / bnorm) <= tol) break; niter++; next->cycle_level0(next_cycle, r, z, verbose); rznew = cusp::blas::dotc(z, r); ValueType beta = rznew / rzold; cusp::blas::axpby(z, p, p, ValueType(1), beta); rzold = rznew; } hipDeviceSynchronize(); iter_stop = CLOCK(); if (verbose) { std::cout << "average time per iteration: " << (iter_stop-iter_start) / niter << std::endl; std::cout << "total solve time: " << (iter_stop-iter_start) << std::endl; } y.clear(); z.clear(); r.clear(); d.clear(); p.clear(); } /**************************************** * Explict instantiations ***************************************/ template class CG_Flex_Cycle<Matrix_h_CG, Vector_h_CG>;
3582d4cd73e2aac617822b58636aa53e73ec4ea1.cu
#include <allocator.h> #include <cycles/cgcycle.h> #include <cusp/multiply.h> #include <cusp/blas.h> template <class Matrix, class Vector> CG_Flex_Cycle<Matrix, Vector>::CG_Flex_Cycle(CycleType next_cycle, int num_iters, AMG_Level<Matrix_h, Vector_h> *next, const Matrix_hyb_d_CG &Aell, const Vector_d_CG &b, Vector_d_CG &x, CGType tol, int maxiters, bool verbose) { typedef typename Matrix::value_type ValueType; typedef typename Matrix::index_type IndexType; typedef typename Matrix::memory_space MemorySpace; int N = b.size(); ValueType bnorm = cusp::blas::nrm2(b); Vector_d_CG y(N); Vector_d_CG z(N); Vector_d_CG r(N); Vector_d_CG d(N); Vector_d_CG p(N); cusp::multiply(Aell, x, y); cusp::blas::axpby(b, y, r, ValueType(1), ValueType(-1)); next->cycle_level0(next_cycle, r, z); cusp::blas::copy(z, p); ValueType rzold = cusp::blas::dotc(r, z); ValueType rznew; int niter = 0; double iter_start, iter_stop; iter_start = CLOCK(); while(niter < maxiters) { cusp::multiply(Aell, p, y); ValueType yp = cusp::blas::dotc(y, p); ValueType alpha = rzold / yp; cusp::blas::axpy(p, x, alpha); cusp::blas::axpy(y, r, -alpha); ValueType normr = cusp::blas::nrm2(r); if (verbose) std::cout << "normr=" << std::scientific << normr << " niter=" << niter << std::endl; if( (normr / bnorm) <= tol) break; niter++; next->cycle_level0(next_cycle, r, z, verbose); rznew = cusp::blas::dotc(z, r); ValueType beta = rznew / rzold; cusp::blas::axpby(z, p, p, ValueType(1), beta); rzold = rznew; } cudaThreadSynchronize(); iter_stop = CLOCK(); if (verbose) { std::cout << "average time per iteration: " << (iter_stop-iter_start) / niter << std::endl; std::cout << "total solve time: " << (iter_stop-iter_start) << std::endl; } y.clear(); z.clear(); r.clear(); d.clear(); p.clear(); } /**************************************** * Explict instantiations ***************************************/ template class CG_Flex_Cycle<Matrix_h_CG, Vector_h_CG>;
50e70aabf746ca88dcae0f37cafe67d80b09af80.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //#include "crop_cuda.h" // //#include <stdio.h> //#include <cstdlib> //#include <math.h> //#include <iostream> // //#include "../common/macro.h" // //#define PIXEL_PER_THREAD 128 // //namespace va_cv { // //texture<unsigned char, 2> tex_src; //__constant__ int rect[5]; // // //__global__ void kernel_crop_grey(unsigned char *dst ) { // // map from threadIdx/BlockIdx to pixel position(on dst) // int tid = threadIdx.x + blockIdx.x * blockDim.x; // while (tid < rect[2] * rect[3]) { // int dst_x = tid % rect[2]; // int dst_y = tid / rect[2]; // dst[tid] = tex2D(tex_src, dst_x + rect[0], dst_y + rect[1]); // // tid += blockDim.x * gridDim.x; // } //} // //void CropCuda::crop_cuda_grey_int8(const unsigned char *src, int src_width, int src_height, // unsigned char *dst, // int crop_left, int crop_top, int crop_width, int crop_height) { // // crop rect, use const value // int *rect_vec = new int[5]{crop_left, crop_top, crop_width, crop_height, src_width}; // hipMemcpyToSymbol( rect, rect_vec, sizeof(int) * 5); // // // int dst_size = crop_width * crop_height; // int src_size = src_width * src_height; // // dstcuda malloc // unsigned char *dev_src, *dev_dst; // hipMalloc( (void**)&dev_dst, dst_size * sizeof(unsigned char) ) ; // hipMalloc( (void**)&dev_src, src_size * sizeof(unsigned char) ) ; // hipMemcpy( dev_src, src, src_size * sizeof(unsigned char), hipMemcpyHostToDevice ); // // // src // hipChannelFormatDesc desc = hipCreateChannelDesc<unsigned char>(); // int err = hipBindTexture2D( NULL, tex_src, dev_src, desc, src_width, src_height, // sizeof(unsigned char) * src_width ); // if (err != hipSuccess) { // printf("bind failed!!! %d\n", err); // } // // // // dim3 blocks((dst_size + PIXEL_PER_THREAD - 1) / PIXEL_PER_THREAD); // dim3 threads(PIXEL_PER_THREAD); // kernel_crop_grey<<<blocks,threads>>>( dev_dst ); // // // dst // hipMemcpy(dst, dev_dst, dst_size * sizeof(unsigned char), hipMemcpyDeviceToHost); // // // // hipFree(dev_dst); // hipFree(dev_src); // hipUnbindTexture( tex_src ); // // delete[] rect_vec; //} // //}
50e70aabf746ca88dcae0f37cafe67d80b09af80.cu
//#include "crop_cuda.h" // //#include <stdio.h> //#include <cstdlib> //#include <math.h> //#include <iostream> // //#include "../common/macro.h" // //#define PIXEL_PER_THREAD 128 // //namespace va_cv { // //texture<unsigned char, 2> tex_src; //__constant__ int rect[5]; // // //__global__ void kernel_crop_grey(unsigned char *dst ) { // // map from threadIdx/BlockIdx to pixel position(on dst) // int tid = threadIdx.x + blockIdx.x * blockDim.x; // while (tid < rect[2] * rect[3]) { // int dst_x = tid % rect[2]; // int dst_y = tid / rect[2]; // dst[tid] = tex2D(tex_src, dst_x + rect[0], dst_y + rect[1]); // // tid += blockDim.x * gridDim.x; // } //} // //void CropCuda::crop_cuda_grey_int8(const unsigned char *src, int src_width, int src_height, // unsigned char *dst, // int crop_left, int crop_top, int crop_width, int crop_height) { // // crop rect, use const value // int *rect_vec = new int[5]{crop_left, crop_top, crop_width, crop_height, src_width}; // cudaMemcpyToSymbol( rect, rect_vec, sizeof(int) * 5); // // // int dst_size = crop_width * crop_height; // int src_size = src_width * src_height; // // dst使用cuda malloc // unsigned char *dev_src, *dev_dst; // cudaMalloc( (void**)&dev_dst, dst_size * sizeof(unsigned char) ) ; // cudaMalloc( (void**)&dev_src, src_size * sizeof(unsigned char) ) ; // cudaMemcpy( dev_src, src, src_size * sizeof(unsigned char), cudaMemcpyHostToDevice ); // // // src使用紋理內存 // cudaChannelFormatDesc desc = cudaCreateChannelDesc<unsigned char>(); // int err = cudaBindTexture2D( NULL, tex_src, dev_src, desc, src_width, src_height, // sizeof(unsigned char) * src_width ); // if (err != cudaSuccess) { // printf("bind failed!!! %d\n", err); // } // // // 設備函數 // dim3 blocks((dst_size + PIXEL_PER_THREAD - 1) / PIXEL_PER_THREAD); // dim3 threads(PIXEL_PER_THREAD); // kernel_crop_grey<<<blocks,threads>>>( dev_dst ); // // // 讀取dst內存 // cudaMemcpy(dst, dev_dst, dst_size * sizeof(unsigned char), cudaMemcpyDeviceToHost); // // // 回收內存 // cudaFree(dev_dst); // cudaFree(dev_src); // cudaUnbindTexture( tex_src ); // // delete[] rect_vec; //} // //}
3015a98c30d587ed91e7f0e8e9f0e0fedb6c2499.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "gasal.h" #define CHECKCUDAERROR(error) \ do{\ err = error;\ if (hipSuccess != err ) { \ fprintf(stderr, "[GASAL CUDA ERROR:] %s(CUDA error no.=%d). Line no. %d in file %s\n", hipGetErrorString(err), err, __LINE__, __FILE__); \ exit(EXIT_FAILURE);\ }\ }while(0)\ inline int CudaCheckKernelLaunch() { hipError_t err = hipGetLastError(); if ( hipSuccess != err ) { return -1; } return 0; } #include "gasal_kernels_inl.h" gasal_gpu_storage_v gasal_init_gpu_storage_v(int n_streams) { gasal_gpu_storage_v v; v.a = (gasal_gpu_storage_t*)calloc(n_streams, sizeof(gasal_gpu_storage_t)); v.n = n_streams; return v; } //GASAL2 blocking alignment function void gasal_aln(gasal_gpu_storage_t *gpu_storage, const uint8_t *query_batch, const uint32_t *query_batch_offsets, const uint32_t *query_batch_lens, const uint8_t *target_batch, const uint32_t *target_batch_offsets, const uint32_t *target_batch_lens, const uint32_t actual_query_batch_bytes, const uint32_t actual_target_batch_bytes, const uint32_t actual_n_alns, int32_t *host_aln_score, int32_t *host_query_batch_start, int32_t *host_target_batch_start, int32_t *host_query_batch_end, int32_t *host_target_batch_end, int algo, int start) { hipError_t err; if (actual_n_alns <= 0) { fprintf(stderr, "[GASAL ERROR:] actual_n_alns <= 0\n"); exit(EXIT_FAILURE); } if (actual_query_batch_bytes <= 0) { fprintf(stderr, "[GASAL ERROR:] actual_query_batch_bytes <= 0\n"); exit(EXIT_FAILURE); } if (actual_target_batch_bytes <= 0) { fprintf(stderr, "[GASAL ERROR:] actual_target_batch_bytes <= 0\n"); exit(EXIT_FAILURE); } if (actual_query_batch_bytes % 8) { fprintf(stderr, "[GASAL ERROR:] actual_query_batch_bytes=%d is not a multiple of 8\n", actual_query_batch_bytes); exit(EXIT_FAILURE); } if (actual_target_batch_bytes % 8) { fprintf(stderr, "[GASAL ERROR:] actual_target_batch_bytes=%d is not a multiple of 8\n", actual_target_batch_bytes); exit(EXIT_FAILURE); } //--------------if pre-allocated memory is less, allocate more-------------------------- if (gpu_storage->gpu_max_query_batch_bytes < actual_query_batch_bytes) { int i = 2; while ( (gpu_storage->gpu_max_query_batch_bytes * i) < actual_query_batch_bytes) i++; gpu_storage->gpu_max_query_batch_bytes = gpu_storage->gpu_max_query_batch_bytes * i; fprintf(stderr, "[GASAL WARNING:] actual_query_batch_bytes(%d) > Allocated GPU memory (gpu_max_query_batch_bytes=%d). Therefore, allocating %d bytes on GPU (gpu_max_query_batch_bytes=%d). Performance may be lost if this is repeated many times.\n", actual_query_batch_bytes, gpu_storage->gpu_max_query_batch_bytes, gpu_storage->gpu_max_query_batch_bytes*i, gpu_storage->gpu_max_query_batch_bytes*i); if (gpu_storage->unpacked_query_batch != NULL) CHECKCUDAERROR(hipFree(gpu_storage->unpacked_query_batch)); if (gpu_storage->packed_query_batch != NULL) CHECKCUDAERROR(hipFree(gpu_storage->packed_query_batch)); CHECKCUDAERROR(hipMalloc(&(gpu_storage->unpacked_query_batch), gpu_storage->gpu_max_query_batch_bytes * sizeof(uint8_t))); CHECKCUDAERROR(hipMalloc(&(gpu_storage->packed_query_batch), (gpu_storage->gpu_max_query_batch_bytes/8) * sizeof(uint32_t))); } if (gpu_storage->gpu_max_target_batch_bytes < actual_target_batch_bytes) { int i = 2; while ( (gpu_storage->gpu_max_target_batch_bytes * i) < actual_target_batch_bytes) i++; gpu_storage->gpu_max_target_batch_bytes = gpu_storage->gpu_max_target_batch_bytes * i; fprintf(stderr, "[GASAL WARNING:] actual_target_batch_bytes(%d) > Allocated GPU memory (gpu_max_target_batch_bytes=%d). Therefore, allocating %d bytes on GPU (gpu_max_target_batch_bytes=%d). Performance may be lost if this is repeated many times.\n", actual_target_batch_bytes, gpu_storage->gpu_max_target_batch_bytes, gpu_storage->gpu_max_target_batch_bytes*i, gpu_storage->gpu_max_target_batch_bytes*i); if (gpu_storage->unpacked_target_batch != NULL) CHECKCUDAERROR(hipFree(gpu_storage->unpacked_target_batch)); if (gpu_storage->packed_target_batch != NULL) CHECKCUDAERROR(hipFree(gpu_storage->packed_target_batch)); CHECKCUDAERROR(hipMalloc(&(gpu_storage->unpacked_target_batch), gpu_storage->gpu_max_target_batch_bytes * sizeof(uint8_t))); CHECKCUDAERROR(hipMalloc(&(gpu_storage->packed_target_batch), (gpu_storage->gpu_max_target_batch_bytes/8) * sizeof(uint32_t))); } if (gpu_storage->gpu_max_n_alns < actual_n_alns) { fprintf(stderr, "[GASAL] gpu_max_n_alns(%d) should be >= acutal_n_alns(%d)\n", gpu_storage->gpu_max_n_alns, actual_n_alns); int i = 2; while ( (gpu_storage->gpu_max_n_alns * i) < actual_n_alns) i++; gpu_storage->gpu_max_n_alns = gpu_storage->gpu_max_n_alns * i; fprintf(stderr, "[GASAL WARNING:] actual_n_alns(%d) > gpu_max_n_alns(%d). Therefore, allocating memory for %d alignments on GPU (gpu_max_n_alns=%d). Performance may be lost if this is repeated many times.\n", actual_n_alns, gpu_storage->gpu_max_n_alns, gpu_storage->gpu_max_n_alns*i, gpu_storage->gpu_max_n_alns*i); if (gpu_storage->query_batch_offsets != NULL) CHECKCUDAERROR(hipFree(gpu_storage->query_batch_offsets)); if (gpu_storage->target_batch_offsets != NULL) CHECKCUDAERROR(hipFree(gpu_storage->target_batch_offsets)); if (gpu_storage->query_batch_lens != NULL) CHECKCUDAERROR(hipFree(gpu_storage->query_batch_lens)); if (gpu_storage->target_batch_lens != NULL) CHECKCUDAERROR(hipFree(gpu_storage->target_batch_lens)); if (gpu_storage->aln_score != NULL) CHECKCUDAERROR(hipFree(gpu_storage->aln_score)); if (gpu_storage->query_batch_start != NULL) CHECKCUDAERROR(hipFree(gpu_storage->query_batch_start)); if (gpu_storage->target_batch_start != NULL) CHECKCUDAERROR(hipFree(gpu_storage->target_batch_start)); if (gpu_storage->query_batch_end != NULL) CHECKCUDAERROR(hipFree(gpu_storage->query_batch_end)); if (gpu_storage->target_batch_end != NULL) CHECKCUDAERROR(hipFree(gpu_storage->target_batch_end)); CHECKCUDAERROR(hipMalloc(&(gpu_storage->query_batch_lens), gpu_storage->gpu_max_n_alns * sizeof(uint32_t))); CHECKCUDAERROR(hipMalloc(&(gpu_storage->target_batch_lens), gpu_storage->gpu_max_n_alns * sizeof(uint32_t))); CHECKCUDAERROR(hipMalloc(&(gpu_storage->query_batch_offsets), gpu_storage->gpu_max_n_alns * sizeof(uint32_t))); CHECKCUDAERROR(hipMalloc(&(gpu_storage->target_batch_offsets), gpu_storage->gpu_max_n_alns * sizeof(uint32_t))); CHECKCUDAERROR(hipMalloc(&(gpu_storage->aln_score),gpu_storage->gpu_max_n_alns * sizeof(int32_t))); if (algo == GLOBAL) { gpu_storage->query_batch_start = NULL; gpu_storage->query_batch_end = NULL; gpu_storage->target_batch_start = NULL; gpu_storage->target_batch_end = NULL; } else { CHECKCUDAERROR( hipMalloc(&(gpu_storage->target_batch_end), gpu_storage->gpu_max_n_alns * sizeof(uint32_t))); if (start == WITH_START) { CHECKCUDAERROR( hipMalloc(&(gpu_storage->target_batch_start), gpu_storage->gpu_max_n_alns * sizeof(uint32_t))); } else gpu_storage->target_batch_start = NULL; if (algo == LOCAL) { CHECKCUDAERROR( hipMalloc(&(gpu_storage->query_batch_end), gpu_storage->gpu_max_n_alns * sizeof(uint32_t))); if (start == WITH_START) { CHECKCUDAERROR( hipMalloc(&(gpu_storage->query_batch_start), gpu_storage->gpu_max_n_alns * sizeof(uint32_t))); } else gpu_storage->query_batch_start = NULL; } else { gpu_storage->query_batch_start = NULL; gpu_storage->query_batch_end = NULL; } } } //------------------------------------------------------------------------------------------- //------------------------copy sequence batches from CPU to GPU--------------------------- CHECKCUDAERROR(hipMemcpy(gpu_storage->unpacked_query_batch, query_batch, actual_query_batch_bytes, hipMemcpyHostToDevice)); CHECKCUDAERROR(hipMemcpy(gpu_storage->unpacked_target_batch, target_batch, actual_target_batch_bytes, hipMemcpyHostToDevice)); //---------------------------------------------------------------------------------------- uint32_t BLOCKDIM = 128; uint32_t N_BLOCKS = (actual_n_alns + BLOCKDIM - 1) / BLOCKDIM; int query_batch_tasks_per_thread = (int)ceil((double)actual_query_batch_bytes/(8*BLOCKDIM*N_BLOCKS)); int target_batch_tasks_per_thread = (int)ceil((double)actual_target_batch_bytes/(8*BLOCKDIM*N_BLOCKS)); //launch packing kernel hipLaunchKernelGGL(( gasal_pack_kernel), dim3(N_BLOCKS), dim3(BLOCKDIM), 0, 0, (uint32_t*)(gpu_storage->unpacked_query_batch), (uint32_t*)(gpu_storage->unpacked_target_batch), gpu_storage->packed_query_batch, gpu_storage->packed_target_batch, query_batch_tasks_per_thread, target_batch_tasks_per_thread, actual_query_batch_bytes/4, actual_target_batch_bytes/4); hipError_t pack_kernel_err = hipGetLastError(); if ( hipSuccess != pack_kernel_err ) { fprintf(stderr, "[GASAL CUDA ERROR:] %s(CUDA error no.=%d). Line no. %d in file %s\n", hipGetErrorString(pack_kernel_err), pack_kernel_err, __LINE__, __FILE__); exit(EXIT_FAILURE); } //----------------------copy sequence offsets and lengths from CPU to GPU-------------------------------------- CHECKCUDAERROR(hipMemcpy(gpu_storage->query_batch_lens, query_batch_lens, actual_n_alns * sizeof(uint32_t), hipMemcpyHostToDevice)); CHECKCUDAERROR(hipMemcpy(gpu_storage->target_batch_lens, target_batch_lens, actual_n_alns * sizeof(uint32_t), hipMemcpyHostToDevice)); CHECKCUDAERROR(hipMemcpy(gpu_storage->query_batch_offsets, query_batch_offsets, actual_n_alns * sizeof(uint32_t), hipMemcpyHostToDevice)); CHECKCUDAERROR(hipMemcpy(gpu_storage->target_batch_offsets, target_batch_offsets, actual_n_alns * sizeof(uint32_t), hipMemcpyHostToDevice)); //------------------------------------------------------------------------------------------------------------------------ //--------------------------------------launch alignment kernels-------------------------------------------------------------- if(algo == LOCAL) { if (start == WITH_START) { hipLaunchKernelGGL(( gasal_local_with_start_kernel), dim3(N_BLOCKS), dim3(BLOCKDIM), 0, 0, gpu_storage->packed_query_batch, gpu_storage->packed_target_batch, gpu_storage->query_batch_lens, gpu_storage->target_batch_lens, gpu_storage->query_batch_offsets, gpu_storage->target_batch_offsets, gpu_storage->aln_score, gpu_storage->query_batch_end, gpu_storage->target_batch_end, gpu_storage->query_batch_start, gpu_storage->target_batch_start, actual_n_alns); } else { hipLaunchKernelGGL(( gasal_local_kernel), dim3(N_BLOCKS), dim3(BLOCKDIM), 0, 0, gpu_storage->packed_query_batch, gpu_storage->packed_target_batch, gpu_storage->query_batch_lens, gpu_storage->target_batch_lens, gpu_storage->query_batch_offsets, gpu_storage->target_batch_offsets, gpu_storage->aln_score, gpu_storage->query_batch_end, gpu_storage->target_batch_end, actual_n_alns); } } else if (algo == SEMI_GLOBAL) { if (start == WITH_START) { hipLaunchKernelGGL(( gasal_semi_global_with_start_kernel), dim3(N_BLOCKS), dim3(BLOCKDIM), 0, 0, gpu_storage->packed_query_batch, gpu_storage->packed_target_batch, gpu_storage->query_batch_lens, gpu_storage->target_batch_lens, gpu_storage->query_batch_offsets, gpu_storage->target_batch_offsets, gpu_storage->aln_score, gpu_storage->target_batch_end, gpu_storage->target_batch_start, actual_n_alns); } else { hipLaunchKernelGGL(( gasal_semi_global_kernel), dim3(N_BLOCKS), dim3(BLOCKDIM), 0, 0, gpu_storage->packed_query_batch, gpu_storage->packed_target_batch, gpu_storage->query_batch_lens, gpu_storage->target_batch_lens, gpu_storage->query_batch_offsets, gpu_storage->target_batch_offsets, gpu_storage->aln_score, gpu_storage->target_batch_end, actual_n_alns); } } else if (algo == GLOBAL) { hipLaunchKernelGGL(( gasal_global_kernel), dim3(N_BLOCKS), dim3(BLOCKDIM), 0, 0, gpu_storage->packed_query_batch, gpu_storage->packed_target_batch, gpu_storage->query_batch_lens, gpu_storage->target_batch_lens, gpu_storage->query_batch_offsets, gpu_storage->target_batch_offsets, gpu_storage->aln_score, actual_n_alns); } else { fprintf(stderr, "[GASAL ERROR:] Algo type invalid\n"); exit(EXIT_FAILURE); } //----------------------------------------------------------------------------------------------------------------------- hipError_t aln_kernel_err = hipGetLastError(); if ( hipSuccess != aln_kernel_err ) { fprintf(stderr, "[GASAL CUDA ERROR:] %s(CUDA error no.=%d). Line no. %d in file %s\n", hipGetErrorString(aln_kernel_err), aln_kernel_err, __LINE__, __FILE__); exit(EXIT_FAILURE); } //------------------------copy alignment results from GPU to CPU-------------------------------------- if (host_aln_score != NULL && gpu_storage->aln_score != NULL) CHECKCUDAERROR(hipMemcpy(host_aln_score, gpu_storage->aln_score, actual_n_alns * sizeof(int32_t), hipMemcpyDeviceToHost)); else { fprintf(stderr, "[GASAL ERROR:] The *host_aln_score input can't be NULL\n"); exit(EXIT_FAILURE); } if (host_query_batch_start != NULL && gpu_storage->query_batch_start != NULL) CHECKCUDAERROR(hipMemcpy(host_query_batch_start, gpu_storage->query_batch_start, actual_n_alns * sizeof(int32_t), hipMemcpyDeviceToHost)); if (host_target_batch_start != NULL && gpu_storage->target_batch_start != NULL) CHECKCUDAERROR(hipMemcpy(host_target_batch_start, gpu_storage->target_batch_start, actual_n_alns * sizeof(int32_t), hipMemcpyDeviceToHost)); if (host_query_batch_end != NULL && gpu_storage->query_batch_end != NULL) CHECKCUDAERROR(hipMemcpy(host_query_batch_end, gpu_storage->query_batch_end, actual_n_alns * sizeof(int32_t), hipMemcpyDeviceToHost)); if (host_target_batch_end != NULL && gpu_storage->target_batch_end != NULL) CHECKCUDAERROR(hipMemcpy(host_target_batch_end, gpu_storage->target_batch_end, actual_n_alns * sizeof(int32_t), hipMemcpyDeviceToHost)); //------------------------------------------------------------------------------------------------------ } //GASAL2 asynchronous (a.k.a non-blocking) alignment function void gasal_aln_async(gasal_gpu_storage_t *gpu_storage, const uint32_t actual_query_batch_bytes, const uint32_t actual_target_batch_bytes, const uint32_t actual_n_alns, int algo, int start) { hipError_t err; if (actual_n_alns <= 0) { fprintf(stderr, "[GASAL ERROR:] actual_n_alns <= 0\n"); exit(EXIT_FAILURE); } if (actual_query_batch_bytes <= 0) { fprintf(stderr, "[GASAL ERROR:] actual_query_batch_bytes <= 0\n"); exit(EXIT_FAILURE); } if (actual_target_batch_bytes <= 0) { fprintf(stderr, "[GASAL ERROR:] actual_target_batch_bytes <= 0\n"); exit(EXIT_FAILURE); } if (actual_query_batch_bytes % 8) { fprintf(stderr, "[GASAL ERROR:] actual_query_batch_bytes=%d is not a multiple of 8\n", actual_query_batch_bytes); exit(EXIT_FAILURE); } if (actual_target_batch_bytes % 8) { fprintf(stderr, "[GASAL ERROR:] actual_target_batch_bytes=%d is not a multiple of 8\n", actual_target_batch_bytes); exit(EXIT_FAILURE); } if (actual_query_batch_bytes > gpu_storage->host_max_query_batch_bytes) { fprintf(stderr, "[GASAL ERROR:] actual_query_batch_bytes(%d) > host_max_query_batch_bytes(%d)\n", actual_query_batch_bytes, gpu_storage->host_max_query_batch_bytes); exit(EXIT_FAILURE); } if (actual_target_batch_bytes > gpu_storage->host_max_target_batch_bytes) { fprintf(stderr, "[GASAL ERROR:] actual_target_batch_bytes(%d) > host_max_target_batch_bytes(%d)\n", actual_target_batch_bytes, gpu_storage->host_max_target_batch_bytes); exit(EXIT_FAILURE); } if (actual_n_alns > gpu_storage->host_max_n_alns) { fprintf(stderr, "[GASAL ERROR:] actual_n_alns(%d) > host_max_n_alns(%d)\n", actual_n_alns, gpu_storage->host_max_n_alns); exit(EXIT_FAILURE); } //--------------if pre-allocated memory is less, allocate more-------------------------- if (gpu_storage->gpu_max_query_batch_bytes < actual_query_batch_bytes) { int i = 2; while ( (gpu_storage->gpu_max_query_batch_bytes * i) < actual_query_batch_bytes) i++; gpu_storage->gpu_max_query_batch_bytes = gpu_storage->gpu_max_query_batch_bytes * i; fprintf(stderr, "[GASAL WARNING:] actual_query_batch_bytes(%d) > Allocated GPU memory (gpu_max_query_batch_bytes=%d). Therefore, allocating %d bytes on GPU (gpu_max_query_batch_bytes=%d). Performance may be lost if this is repeated many times.\n", actual_query_batch_bytes, gpu_storage->gpu_max_query_batch_bytes, gpu_storage->gpu_max_query_batch_bytes*i, gpu_storage->gpu_max_query_batch_bytes*i); if (gpu_storage->unpacked_query_batch != NULL) CHECKCUDAERROR(hipFree(gpu_storage->unpacked_query_batch)); if (gpu_storage->packed_query_batch != NULL) CHECKCUDAERROR(hipFree(gpu_storage->packed_query_batch)); CHECKCUDAERROR(hipMalloc(&(gpu_storage->unpacked_query_batch), gpu_storage->gpu_max_query_batch_bytes * sizeof(uint8_t))); CHECKCUDAERROR(hipMalloc(&(gpu_storage->packed_query_batch), (gpu_storage->gpu_max_query_batch_bytes/8) * sizeof(uint32_t))); } if (gpu_storage->gpu_max_target_batch_bytes < actual_target_batch_bytes) { int i = 2; while ( (gpu_storage->gpu_max_target_batch_bytes * i) < actual_target_batch_bytes) i++; gpu_storage->gpu_max_target_batch_bytes = gpu_storage->gpu_max_target_batch_bytes * i; fprintf(stderr, "[GASAL WARNING:] actual_target_batch_bytes(%d) > Allocated GPU memory (gpu_max_target_batch_bytes=%d). Therefore, allocating %d bytes on GPU (gpu_max_target_batch_bytes=%d). Performance may be lost if this is repeated many times.\n", actual_target_batch_bytes, gpu_storage->gpu_max_target_batch_bytes, gpu_storage->gpu_max_target_batch_bytes*i, gpu_storage->gpu_max_target_batch_bytes*i); if (gpu_storage->unpacked_target_batch != NULL) CHECKCUDAERROR(hipFree(gpu_storage->unpacked_target_batch)); if (gpu_storage->packed_target_batch != NULL) CHECKCUDAERROR(hipFree(gpu_storage->packed_target_batch)); CHECKCUDAERROR(hipMalloc(&(gpu_storage->unpacked_target_batch), gpu_storage->gpu_max_target_batch_bytes * sizeof(uint8_t))); CHECKCUDAERROR(hipMalloc(&(gpu_storage->packed_target_batch), (gpu_storage->gpu_max_target_batch_bytes/8) * sizeof(uint32_t))); } if (gpu_storage->gpu_max_n_alns < actual_n_alns) { int i = 2; while ( (gpu_storage->gpu_max_n_alns * i) < actual_n_alns) i++; gpu_storage->gpu_max_n_alns = gpu_storage->gpu_max_n_alns * i; fprintf(stderr, "[GASAL WARNING:] actual_n_alns(%d) > gpu_max_n_alns(%d). Therefore, allocating memory for %d alignments on GPU (gpu_max_n_alns=%d). Performance may be lost if this is repeated many times.\n", actual_n_alns, gpu_storage->gpu_max_n_alns, gpu_storage->gpu_max_n_alns*i, gpu_storage->gpu_max_n_alns*i); if (gpu_storage->query_batch_offsets != NULL) CHECKCUDAERROR(hipFree(gpu_storage->query_batch_offsets)); if (gpu_storage->target_batch_offsets != NULL) CHECKCUDAERROR(hipFree(gpu_storage->target_batch_offsets)); if (gpu_storage->query_batch_lens != NULL) CHECKCUDAERROR(hipFree(gpu_storage->query_batch_lens)); if (gpu_storage->target_batch_lens != NULL) CHECKCUDAERROR(hipFree(gpu_storage->target_batch_lens)); if (gpu_storage->aln_score != NULL) CHECKCUDAERROR(hipFree(gpu_storage->aln_score)); if (gpu_storage->query_batch_start != NULL) CHECKCUDAERROR(hipFree(gpu_storage->query_batch_start)); if (gpu_storage->target_batch_start != NULL) CHECKCUDAERROR(hipFree(gpu_storage->target_batch_start)); if (gpu_storage->query_batch_end != NULL) CHECKCUDAERROR(hipFree(gpu_storage->query_batch_end)); if (gpu_storage->target_batch_end != NULL) CHECKCUDAERROR(hipFree(gpu_storage->target_batch_end)); CHECKCUDAERROR(hipMalloc(&(gpu_storage->query_batch_lens), gpu_storage->gpu_max_n_alns * sizeof(uint32_t))); CHECKCUDAERROR(hipMalloc(&(gpu_storage->target_batch_lens), gpu_storage->gpu_max_n_alns * sizeof(uint32_t))); CHECKCUDAERROR(hipMalloc(&(gpu_storage->query_batch_offsets), gpu_storage->gpu_max_n_alns * sizeof(uint32_t))); CHECKCUDAERROR(hipMalloc(&(gpu_storage->target_batch_offsets), gpu_storage->gpu_max_n_alns * sizeof(uint32_t))); CHECKCUDAERROR(hipMalloc(&(gpu_storage->aln_score), gpu_storage->gpu_max_n_alns * sizeof(int32_t))); if (algo == GLOBAL) { gpu_storage->query_batch_start = NULL; gpu_storage->target_batch_start = NULL; gpu_storage->query_batch_end = NULL; gpu_storage->target_batch_end = NULL; } else if (algo == SEMI_GLOBAL) { gpu_storage->query_batch_start = NULL; gpu_storage->query_batch_end = NULL; if (start == WITH_START) { CHECKCUDAERROR( hipMalloc(&(gpu_storage->target_batch_start), gpu_storage->gpu_max_n_alns * sizeof(uint32_t))); CHECKCUDAERROR( hipMalloc(&(gpu_storage->target_batch_end), gpu_storage->gpu_max_n_alns * sizeof(uint32_t))); } else { CHECKCUDAERROR( hipMalloc(&(gpu_storage->target_batch_end), gpu_storage->gpu_max_n_alns * sizeof(uint32_t))); gpu_storage->target_batch_start = NULL; } } else { if (start == WITH_START) { CHECKCUDAERROR( hipMalloc(&(gpu_storage->query_batch_start), gpu_storage->gpu_max_n_alns * sizeof(uint32_t))); CHECKCUDAERROR( hipMalloc(&(gpu_storage->target_batch_start), gpu_storage->gpu_max_n_alns * sizeof(uint32_t))); CHECKCUDAERROR( hipMalloc(&(gpu_storage->query_batch_end), gpu_storage->gpu_max_n_alns * sizeof(uint32_t))); CHECKCUDAERROR( hipMalloc(&(gpu_storage->target_batch_end), gpu_storage->gpu_max_n_alns * sizeof(uint32_t))); } else { CHECKCUDAERROR( hipMalloc(&(gpu_storage->query_batch_end), gpu_storage->gpu_max_n_alns * sizeof(uint32_t))); CHECKCUDAERROR( hipMalloc(&(gpu_storage->target_batch_end), gpu_storage->gpu_max_n_alns * sizeof(uint32_t))); gpu_storage->query_batch_start = NULL; gpu_storage->target_batch_start = NULL; } } } //------------------------------------------ //------------------------launch copying of sequence batches from CPU to GPU--------------------------- CHECKCUDAERROR(hipMemcpyAsync(gpu_storage->unpacked_query_batch, gpu_storage->host_unpacked_query_batch, actual_query_batch_bytes, hipMemcpyHostToDevice, gpu_storage->str)); CHECKCUDAERROR(hipMemcpyAsync(gpu_storage->unpacked_target_batch, gpu_storage->host_unpacked_target_batch, actual_target_batch_bytes, hipMemcpyHostToDevice, gpu_storage->str)); //----------------------------------------------------------------------------------------------------------- uint32_t BLOCKDIM = 128; uint32_t N_BLOCKS = (actual_n_alns + BLOCKDIM - 1) / BLOCKDIM; int query_batch_tasks_per_thread = (int)ceil((double)actual_query_batch_bytes/(8*BLOCKDIM*N_BLOCKS)); int target_batch_tasks_per_thread = (int)ceil((double)actual_target_batch_bytes/(8*BLOCKDIM*N_BLOCKS)); //-------------------------------------------launch packing kernel hipLaunchKernelGGL(( gasal_pack_kernel), dim3(N_BLOCKS), dim3(BLOCKDIM), 0, gpu_storage->str, (uint32_t*)(gpu_storage->unpacked_query_batch), (uint32_t*)(gpu_storage->unpacked_target_batch), gpu_storage->packed_query_batch, gpu_storage->packed_target_batch, query_batch_tasks_per_thread, target_batch_tasks_per_thread, actual_query_batch_bytes/4, actual_target_batch_bytes/4); hipError_t pack_kernel_err = hipGetLastError(); if ( hipSuccess != pack_kernel_err ) { fprintf(stderr, "[GASAL CUDA ERROR:] %s(CUDA error no.=%d). Line no. %d in file %s\n", hipGetErrorString(pack_kernel_err), pack_kernel_err, __LINE__, __FILE__); exit(EXIT_FAILURE); } //----------------------launch copying of sequence offsets and lengths from CPU to GPU-------------------------------------- CHECKCUDAERROR(hipMemcpyAsync(gpu_storage->query_batch_lens, gpu_storage->host_query_batch_lens, actual_n_alns * sizeof(uint32_t), hipMemcpyHostToDevice, gpu_storage->str)); CHECKCUDAERROR(hipMemcpyAsync(gpu_storage->target_batch_lens, gpu_storage->host_target_batch_lens, actual_n_alns * sizeof(uint32_t), hipMemcpyHostToDevice, gpu_storage->str)); CHECKCUDAERROR(hipMemcpyAsync(gpu_storage->query_batch_offsets, gpu_storage->host_query_batch_offsets, actual_n_alns * sizeof(uint32_t), hipMemcpyHostToDevice, gpu_storage->str)); CHECKCUDAERROR(hipMemcpyAsync(gpu_storage->target_batch_offsets, gpu_storage->host_target_batch_offsets, actual_n_alns * sizeof(uint32_t), hipMemcpyHostToDevice, gpu_storage->str)); //--------------------------------------------------------------------------------------------------------------- //--------------------------------------launch alignment kernels-------------------------------------------------------------- if(algo == LOCAL) { if (start == WITH_START) { hipLaunchKernelGGL(( gasal_local_with_start_kernel), dim3(N_BLOCKS), dim3(BLOCKDIM), 0, gpu_storage->str, gpu_storage->packed_query_batch, gpu_storage->packed_target_batch, gpu_storage->query_batch_lens, gpu_storage->target_batch_lens, gpu_storage->query_batch_offsets, gpu_storage->target_batch_offsets, gpu_storage->aln_score, gpu_storage->query_batch_end, gpu_storage->target_batch_end, gpu_storage->query_batch_start, gpu_storage->target_batch_start, actual_n_alns); } else { hipLaunchKernelGGL(( gasal_local_kernel), dim3(N_BLOCKS), dim3(BLOCKDIM), 0, gpu_storage->str, gpu_storage->packed_query_batch, gpu_storage->packed_target_batch, gpu_storage->query_batch_lens, gpu_storage->target_batch_lens, gpu_storage->query_batch_offsets, gpu_storage->target_batch_offsets, gpu_storage->aln_score, gpu_storage->query_batch_end, gpu_storage->target_batch_end, actual_n_alns); } } else if (algo == SEMI_GLOBAL) { if (start == WITH_START) { hipLaunchKernelGGL(( gasal_semi_global_with_start_kernel), dim3(N_BLOCKS), dim3(BLOCKDIM), 0, gpu_storage->str, gpu_storage->packed_query_batch, gpu_storage->packed_target_batch, gpu_storage->query_batch_lens, gpu_storage->target_batch_lens, gpu_storage->query_batch_offsets, gpu_storage->target_batch_offsets, gpu_storage->aln_score, gpu_storage->target_batch_end, gpu_storage->target_batch_start, actual_n_alns); } else { hipLaunchKernelGGL(( gasal_semi_global_kernel), dim3(N_BLOCKS), dim3(BLOCKDIM), 0, gpu_storage->str, gpu_storage->packed_query_batch, gpu_storage->packed_target_batch, gpu_storage->query_batch_lens, gpu_storage->target_batch_lens, gpu_storage->query_batch_offsets, gpu_storage->target_batch_offsets, gpu_storage->aln_score, gpu_storage->target_batch_end, actual_n_alns); } } else if (algo == GLOBAL) { hipLaunchKernelGGL(( gasal_global_kernel), dim3(N_BLOCKS), dim3(BLOCKDIM), 0, gpu_storage->str, gpu_storage->packed_query_batch, gpu_storage->packed_target_batch, gpu_storage->query_batch_lens, gpu_storage->target_batch_lens, gpu_storage->query_batch_offsets, gpu_storage->target_batch_offsets, gpu_storage->aln_score, actual_n_alns); } else { fprintf(stderr, "[GASAL ERROR:] Algo type invalid\n"); exit(EXIT_FAILURE); } //----------------------------------------------------------------------------------------------------------------------- hipError_t aln_kernel_err = hipGetLastError(); if ( hipSuccess != aln_kernel_err ) { fprintf(stderr, "[GASAL CUDA ERROR:] %s(CUDA error no.=%d). Line no. %d in file %s\n", hipGetErrorString(aln_kernel_err), aln_kernel_err, __LINE__, __FILE__); exit(EXIT_FAILURE); } //------------------------launch the copying of alignment results from GPU to CPU-------------------------------------- if (gpu_storage->host_aln_score != NULL && gpu_storage->aln_score != NULL) CHECKCUDAERROR(hipMemcpyAsync(gpu_storage->host_aln_score, gpu_storage->aln_score, actual_n_alns * sizeof(int32_t), hipMemcpyDeviceToHost, gpu_storage->str)); if (gpu_storage->host_query_batch_start != NULL && gpu_storage->query_batch_start != NULL) CHECKCUDAERROR(hipMemcpyAsync(gpu_storage->host_query_batch_start, gpu_storage->query_batch_start, actual_n_alns * sizeof(int32_t), hipMemcpyDeviceToHost, gpu_storage->str)); if (gpu_storage->host_target_batch_start != NULL && gpu_storage->target_batch_start != NULL) CHECKCUDAERROR(hipMemcpyAsync(gpu_storage->host_target_batch_start, gpu_storage->target_batch_start, actual_n_alns * sizeof(int32_t), hipMemcpyDeviceToHost, gpu_storage->str)); if (gpu_storage->host_query_batch_end != NULL && gpu_storage->query_batch_end != NULL) CHECKCUDAERROR(hipMemcpyAsync(gpu_storage->host_query_batch_end, gpu_storage->query_batch_end, actual_n_alns * sizeof(int32_t), hipMemcpyDeviceToHost, gpu_storage->str)); if (gpu_storage->host_target_batch_end != NULL && gpu_storage->target_batch_end != NULL) CHECKCUDAERROR(hipMemcpyAsync(gpu_storage->host_target_batch_end, gpu_storage->target_batch_end, actual_n_alns * sizeof(int32_t), hipMemcpyDeviceToHost, gpu_storage->str)); //----------------------------------------------------------------------------------------------------------------------- gpu_storage->is_free = 0; //set the availability of current stream to false } int gasal_is_aln_async_done(gasal_gpu_storage_t *gpu_storage) { hipError_t err; if(gpu_storage->is_free == 1) return -2;//if no work is launced in this stream, return -2 err = hipStreamQuery(gpu_storage->str);//check to see if the stream is finished if (err != hipSuccess ) { if (err == hipErrorNotReady) return -1; else{ fprintf(stderr, "[GASAL CUDA ERROR:] %s(CUDA error no.=%d). Line no. %d in file %s\n", hipGetErrorString(err), err, __LINE__, __FILE__); exit(EXIT_FAILURE); } } gpu_storage->is_free = 1; return 0; } void gasal_gpu_mem_alloc(gasal_gpu_storage_t *gpu_storage, int gpu_max_query_batch_bytes, int gpu_max_target_batch_bytes, int gpu_max_n_alns, int algo, int start) { hipError_t err; // if (gpu_storage->gpu_max_query_batch_bytes % 8) { // fprintf(stderr, "[GASAL ERROR:] max_query_batch_bytes=%d is not a multiple of 8\n", gpu_storage->gpu_max_query_batch_bytes % 8); // exit(EXIT_FAILURE); // } // if (gpu_storage->gpu_max_target_batch_bytes % 8) { // fprintf(stderr, "[GASAL ERROR:] max_target_batch_bytes=%d is not a multiple of 8\n", gpu_storage->gpu_max_target_batch_bytes % 8); // exit(EXIT_FAILURE); // } CHECKCUDAERROR(hipMalloc(&(gpu_storage->unpacked_query_batch), gpu_max_query_batch_bytes * sizeof(uint8_t))); CHECKCUDAERROR(hipMalloc(&(gpu_storage->unpacked_target_batch), gpu_max_target_batch_bytes * sizeof(uint8_t))); CHECKCUDAERROR(hipMalloc(&(gpu_storage->packed_query_batch), (gpu_max_query_batch_bytes/8) * sizeof(uint32_t))); CHECKCUDAERROR(hipMalloc(&(gpu_storage->packed_target_batch), (gpu_max_target_batch_bytes/8) * sizeof(uint32_t))); CHECKCUDAERROR(hipMalloc(&(gpu_storage->query_batch_lens), gpu_max_n_alns * sizeof(uint32_t))); CHECKCUDAERROR(hipMalloc(&(gpu_storage->target_batch_lens), gpu_max_n_alns * sizeof(uint32_t))); CHECKCUDAERROR(hipMalloc(&(gpu_storage->query_batch_offsets), gpu_max_n_alns * sizeof(uint32_t))); CHECKCUDAERROR(hipMalloc(&(gpu_storage->target_batch_offsets), gpu_max_n_alns * sizeof(uint32_t))); CHECKCUDAERROR(hipMalloc(&(gpu_storage->aln_score), gpu_max_n_alns * sizeof(int32_t))); if (algo == GLOBAL) { gpu_storage->query_batch_start = NULL; gpu_storage->query_batch_end = NULL; gpu_storage->target_batch_start = NULL; gpu_storage->target_batch_end = NULL; } else { CHECKCUDAERROR( hipMalloc(&(gpu_storage->target_batch_end), gpu_max_n_alns * sizeof(uint32_t))); if (start == WITH_START) { CHECKCUDAERROR( hipMalloc(&(gpu_storage->target_batch_start), gpu_max_n_alns * sizeof(uint32_t))); } else gpu_storage->target_batch_start = NULL; if (algo == LOCAL) { CHECKCUDAERROR( hipMalloc(&(gpu_storage->query_batch_end), gpu_max_n_alns * sizeof(uint32_t))); if (start == WITH_START) { CHECKCUDAERROR( hipMalloc(&(gpu_storage->query_batch_start), gpu_max_n_alns * sizeof(uint32_t))); } else gpu_storage->query_batch_start = NULL; } else { gpu_storage->query_batch_start = NULL; gpu_storage->query_batch_end = NULL; } } gpu_storage->gpu_max_query_batch_bytes = gpu_max_query_batch_bytes; gpu_storage->gpu_max_target_batch_bytes = gpu_max_target_batch_bytes; gpu_storage->gpu_max_n_alns = gpu_max_n_alns; } void gasal_init_streams(gasal_gpu_storage_v *gpu_storage_vec, int host_max_query_batch_bytes, int gpu_max_query_batch_bytes, int host_max_target_batch_bytes, int gpu_max_target_batch_bytes, int host_max_n_alns, int gpu_max_n_alns, int algo, int start) { hipError_t err; int i; for (i = 0; i < gpu_storage_vec->n; i++) { CHECKCUDAERROR(hipHostMalloc(&(gpu_storage_vec->a[i].host_unpacked_query_batch), host_max_query_batch_bytes * sizeof(uint8_t))); CHECKCUDAERROR(hipHostMalloc(&(gpu_storage_vec->a[i].host_unpacked_target_batch), host_max_target_batch_bytes * sizeof(uint8_t))); CHECKCUDAERROR(hipMalloc(&(gpu_storage_vec->a[i].unpacked_query_batch), gpu_max_query_batch_bytes * sizeof(uint8_t))); CHECKCUDAERROR(hipMalloc(&(gpu_storage_vec->a[i].unpacked_target_batch), gpu_max_target_batch_bytes * sizeof(uint8_t))); CHECKCUDAERROR(hipMalloc(&(gpu_storage_vec->a[i].packed_query_batch), (gpu_max_query_batch_bytes/8) * sizeof(uint32_t))); CHECKCUDAERROR(hipMalloc(&(gpu_storage_vec->a[i].packed_target_batch), (gpu_max_target_batch_bytes/8) * sizeof(uint32_t))); CHECKCUDAERROR(hipHostMalloc(&(gpu_storage_vec->a[i].host_query_batch_lens), host_max_n_alns * sizeof(uint32_t))); CHECKCUDAERROR(hipHostMalloc(&(gpu_storage_vec->a[i].host_target_batch_lens), host_max_n_alns * sizeof(uint32_t))); CHECKCUDAERROR(hipHostMalloc(&(gpu_storage_vec->a[i].host_query_batch_offsets), host_max_n_alns * sizeof(uint32_t))); CHECKCUDAERROR(hipHostMalloc(&(gpu_storage_vec->a[i].host_target_batch_offsets), host_max_n_alns * sizeof(uint32_t))); CHECKCUDAERROR(hipHostMalloc(&(gpu_storage_vec->a[i].host_aln_score), host_max_n_alns * sizeof(int32_t))); CHECKCUDAERROR(hipMalloc(&(gpu_storage_vec->a[i].query_batch_lens), gpu_max_n_alns * sizeof(uint32_t))); CHECKCUDAERROR(hipMalloc(&(gpu_storage_vec->a[i].target_batch_lens), gpu_max_n_alns * sizeof(uint32_t))); CHECKCUDAERROR(hipMalloc(&(gpu_storage_vec->a[i].query_batch_offsets), gpu_max_n_alns * sizeof(uint32_t))); CHECKCUDAERROR(hipMalloc(&(gpu_storage_vec->a[i].target_batch_offsets), gpu_max_n_alns * sizeof(uint32_t))); CHECKCUDAERROR(hipMalloc(&(gpu_storage_vec->a[i].aln_score), gpu_max_n_alns * sizeof(int32_t))); if (algo == GLOBAL) { gpu_storage_vec->a[i].host_query_batch_start = NULL; gpu_storage_vec->a[i].host_target_batch_start = NULL; gpu_storage_vec->a[i].host_query_batch_end = NULL; gpu_storage_vec->a[i].host_target_batch_end = NULL; gpu_storage_vec->a[i].query_batch_start = NULL; gpu_storage_vec->a[i].target_batch_start = NULL; gpu_storage_vec->a[i].query_batch_end = NULL; gpu_storage_vec->a[i].target_batch_end = NULL; } else if (algo == SEMI_GLOBAL) { gpu_storage_vec->a[i].host_query_batch_start = NULL; gpu_storage_vec->a[i].host_query_batch_end = NULL; gpu_storage_vec->a[i].query_batch_start = NULL; gpu_storage_vec->a[i].query_batch_end = NULL; if (start == WITH_START) { CHECKCUDAERROR(hipHostMalloc(&(gpu_storage_vec->a[i].host_target_batch_start),host_max_n_alns * sizeof(uint32_t))); CHECKCUDAERROR(hipHostMalloc(&(gpu_storage_vec->a[i].host_target_batch_end),host_max_n_alns * sizeof(uint32_t))); CHECKCUDAERROR( hipMalloc(&(gpu_storage_vec->a[i].target_batch_start), gpu_max_n_alns * sizeof(uint32_t))); CHECKCUDAERROR( hipMalloc(&(gpu_storage_vec->a[i].target_batch_end), gpu_max_n_alns * sizeof(uint32_t))); } else { CHECKCUDAERROR(hipHostMalloc(&(gpu_storage_vec->a[i].host_target_batch_end),host_max_n_alns * sizeof(uint32_t))); CHECKCUDAERROR( hipMalloc(&(gpu_storage_vec->a[i].target_batch_end), gpu_max_n_alns * sizeof(uint32_t))); gpu_storage_vec->a[i].host_target_batch_start = NULL; gpu_storage_vec->a[i].target_batch_start = NULL; } } else { if (start == WITH_START) { CHECKCUDAERROR(hipHostMalloc(&(gpu_storage_vec->a[i].host_query_batch_start),host_max_n_alns * sizeof(uint32_t))); CHECKCUDAERROR(hipHostMalloc(&(gpu_storage_vec->a[i].host_target_batch_start),host_max_n_alns * sizeof(uint32_t))); CHECKCUDAERROR(hipHostMalloc(&(gpu_storage_vec->a[i].host_query_batch_end),host_max_n_alns * sizeof(uint32_t))); CHECKCUDAERROR(hipHostMalloc(&(gpu_storage_vec->a[i].host_target_batch_end),host_max_n_alns * sizeof(uint32_t))); CHECKCUDAERROR( hipMalloc(&(gpu_storage_vec->a[i].query_batch_start), gpu_max_n_alns * sizeof(uint32_t))); CHECKCUDAERROR( hipMalloc(&(gpu_storage_vec->a[i].target_batch_start), gpu_max_n_alns * sizeof(uint32_t))); CHECKCUDAERROR( hipMalloc(&(gpu_storage_vec->a[i].query_batch_end), gpu_max_n_alns * sizeof(uint32_t))); CHECKCUDAERROR( hipMalloc(&(gpu_storage_vec->a[i].target_batch_end), gpu_max_n_alns * sizeof(uint32_t))); } else { CHECKCUDAERROR(hipHostMalloc(&(gpu_storage_vec->a[i].host_query_batch_end),host_max_n_alns * sizeof(uint32_t))); CHECKCUDAERROR(hipHostMalloc(&(gpu_storage_vec->a[i].host_target_batch_end),host_max_n_alns * sizeof(uint32_t))); CHECKCUDAERROR( hipMalloc(&(gpu_storage_vec->a[i].query_batch_end), gpu_max_n_alns * sizeof(uint32_t))); CHECKCUDAERROR( hipMalloc(&(gpu_storage_vec->a[i].target_batch_end), gpu_max_n_alns * sizeof(uint32_t))); gpu_storage_vec->a[i].host_query_batch_start = NULL; gpu_storage_vec->a[i].host_target_batch_start = NULL; gpu_storage_vec->a[i].query_batch_start = NULL; gpu_storage_vec->a[i].target_batch_start = NULL; } } CHECKCUDAERROR(hipStreamCreate(&(gpu_storage_vec->a[i].str))); gpu_storage_vec->a[i].is_free = 1; gpu_storage_vec->a[i].host_max_query_batch_bytes = host_max_query_batch_bytes; gpu_storage_vec->a[i].host_max_target_batch_bytes = host_max_target_batch_bytes; gpu_storage_vec->a[i].host_max_n_alns = host_max_n_alns; gpu_storage_vec->a[i].gpu_max_query_batch_bytes = gpu_max_query_batch_bytes; gpu_storage_vec->a[i].gpu_max_target_batch_bytes = gpu_max_target_batch_bytes; gpu_storage_vec->a[i].gpu_max_n_alns = gpu_max_n_alns; } } void gasal_gpu_mem_free(gasal_gpu_storage_t *gpu_storage) { hipError_t err; if (gpu_storage->unpacked_query_batch != NULL) CHECKCUDAERROR(hipFree(gpu_storage->unpacked_query_batch)); if (gpu_storage->unpacked_target_batch != NULL) CHECKCUDAERROR(hipFree(gpu_storage->unpacked_target_batch)); if (gpu_storage->packed_query_batch != NULL) CHECKCUDAERROR(hipFree(gpu_storage->packed_query_batch)); if (gpu_storage->packed_target_batch != NULL) CHECKCUDAERROR(hipFree(gpu_storage->packed_target_batch)); if (gpu_storage->query_batch_offsets != NULL) CHECKCUDAERROR(hipFree(gpu_storage->query_batch_offsets)); if (gpu_storage->target_batch_offsets != NULL) CHECKCUDAERROR(hipFree(gpu_storage->target_batch_offsets)); if (gpu_storage->query_batch_lens != NULL) CHECKCUDAERROR(hipFree(gpu_storage->query_batch_lens)); if (gpu_storage->target_batch_lens != NULL) CHECKCUDAERROR(hipFree(gpu_storage->target_batch_lens)); if (gpu_storage->aln_score != NULL) CHECKCUDAERROR(hipFree(gpu_storage->aln_score)); if (gpu_storage->query_batch_start != NULL) CHECKCUDAERROR(hipFree(gpu_storage->query_batch_start)); if (gpu_storage->target_batch_start != NULL) CHECKCUDAERROR(hipFree(gpu_storage->target_batch_start)); if (gpu_storage->query_batch_end != NULL) CHECKCUDAERROR(hipFree(gpu_storage->query_batch_end)); if (gpu_storage->target_batch_end != NULL) CHECKCUDAERROR(hipFree(gpu_storage->target_batch_end)); } void gasal_destroy_streams(gasal_gpu_storage_v *gpu_storage_vec) { hipError_t err; int i; for (i = 0; i < gpu_storage_vec->n; i ++) { if (gpu_storage_vec->a[i].host_unpacked_query_batch != NULL) CHECKCUDAERROR(hipHostFree(gpu_storage_vec->a[i].host_unpacked_query_batch)); if (gpu_storage_vec->a[i].host_unpacked_target_batch != NULL) CHECKCUDAERROR(hipHostFree(gpu_storage_vec->a[i].host_unpacked_target_batch)); if (gpu_storage_vec->a[i].host_query_batch_offsets != NULL) CHECKCUDAERROR(hipHostFree(gpu_storage_vec->a[i].host_query_batch_offsets)); if (gpu_storage_vec->a[i].host_target_batch_offsets != NULL) CHECKCUDAERROR(hipHostFree(gpu_storage_vec->a[i].host_target_batch_offsets)); if (gpu_storage_vec->a[i].host_query_batch_lens != NULL) CHECKCUDAERROR(hipHostFree(gpu_storage_vec->a[i].host_query_batch_lens)); if (gpu_storage_vec->a[i].host_target_batch_lens != NULL) CHECKCUDAERROR(hipHostFree(gpu_storage_vec->a[i].host_target_batch_lens)); if (gpu_storage_vec->a[i].host_aln_score != NULL) CHECKCUDAERROR(hipHostFree(gpu_storage_vec->a[i].host_aln_score)); if (gpu_storage_vec->a[i].host_query_batch_start != NULL) CHECKCUDAERROR(hipHostFree(gpu_storage_vec->a[i].host_query_batch_start)); if (gpu_storage_vec->a[i].host_target_batch_start != NULL) CHECKCUDAERROR(hipHostFree(gpu_storage_vec->a[i].host_target_batch_start)); if (gpu_storage_vec->a[i].host_query_batch_end != NULL) CHECKCUDAERROR(hipHostFree(gpu_storage_vec->a[i].host_query_batch_end)); if (gpu_storage_vec->a[i].host_target_batch_end != NULL) CHECKCUDAERROR(hipHostFree(gpu_storage_vec->a[i].host_target_batch_end)); if (gpu_storage_vec->a[i].unpacked_query_batch != NULL) CHECKCUDAERROR(hipFree(gpu_storage_vec->a[i].unpacked_query_batch)); if (gpu_storage_vec->a[i].unpacked_target_batch != NULL) CHECKCUDAERROR(hipFree(gpu_storage_vec->a[i].unpacked_target_batch)); if (gpu_storage_vec->a[i].packed_query_batch != NULL) CHECKCUDAERROR(hipFree(gpu_storage_vec->a[i].packed_query_batch)); if (gpu_storage_vec->a[i].packed_target_batch != NULL) CHECKCUDAERROR(hipFree(gpu_storage_vec->a[i].packed_target_batch)); if (gpu_storage_vec->a[i].query_batch_offsets != NULL) CHECKCUDAERROR(hipFree(gpu_storage_vec->a[i].query_batch_offsets)); if (gpu_storage_vec->a[i].target_batch_offsets != NULL) CHECKCUDAERROR(hipFree(gpu_storage_vec->a[i].target_batch_offsets)); if (gpu_storage_vec->a[i].query_batch_lens != NULL) CHECKCUDAERROR(hipFree(gpu_storage_vec->a[i].query_batch_lens)); if (gpu_storage_vec->a[i].target_batch_lens != NULL) CHECKCUDAERROR(hipFree(gpu_storage_vec->a[i].target_batch_lens)); if (gpu_storage_vec->a[i].aln_score != NULL) CHECKCUDAERROR(hipFree(gpu_storage_vec->a[i].aln_score)); if (gpu_storage_vec->a[i].query_batch_start != NULL) CHECKCUDAERROR(hipFree(gpu_storage_vec->a[i].query_batch_start)); if (gpu_storage_vec->a[i].target_batch_start != NULL) CHECKCUDAERROR(hipFree(gpu_storage_vec->a[i].target_batch_start)); if (gpu_storage_vec->a[i].query_batch_end != NULL) CHECKCUDAERROR(hipFree(gpu_storage_vec->a[i].query_batch_end)); if (gpu_storage_vec->a[i].target_batch_end != NULL) CHECKCUDAERROR(hipFree(gpu_storage_vec->a[i].target_batch_end)); if (gpu_storage_vec->a[i].str != NULL)CHECKCUDAERROR(hipStreamDestroy(gpu_storage_vec->a[i].str)); } } void gasal_destroy_gpu_storage_v(gasal_gpu_storage_v *gpu_storage_vec) { if(gpu_storage_vec->a != NULL) free(gpu_storage_vec->a); } void gasal_copy_subst_scores(gasal_subst_scores *subst){ hipError_t err; CHECKCUDAERROR(hipMemcpyToSymbol(_cudaGapO, &(subst->gap_open), sizeof(int32_t), 0, hipMemcpyHostToDevice)); CHECKCUDAERROR(hipMemcpyToSymbol(_cudaGapExtend, &(subst->gap_extend), sizeof(int32_t), 0, hipMemcpyHostToDevice)); int32_t gapoe = (subst->gap_open + subst->gap_extend); CHECKCUDAERROR(hipMemcpyToSymbol(_cudaGapOE, &(gapoe), sizeof(int32_t), 0, hipMemcpyHostToDevice)); CHECKCUDAERROR(hipMemcpyToSymbol(_cudaMatchScore, &(subst->match), sizeof(int32_t), 0, hipMemcpyHostToDevice)); CHECKCUDAERROR(hipMemcpyToSymbol(_cudaMismatchScore, &(subst->mismatch), sizeof(int32_t), 0, hipMemcpyHostToDevice)); return; }
3015a98c30d587ed91e7f0e8e9f0e0fedb6c2499.cu
#include "gasal.h" #define CHECKCUDAERROR(error) \ do{\ err = error;\ if (cudaSuccess != err ) { \ fprintf(stderr, "[GASAL CUDA ERROR:] %s(CUDA error no.=%d). Line no. %d in file %s\n", cudaGetErrorString(err), err, __LINE__, __FILE__); \ exit(EXIT_FAILURE);\ }\ }while(0)\ inline int CudaCheckKernelLaunch() { cudaError err = cudaGetLastError(); if ( cudaSuccess != err ) { return -1; } return 0; } #include "gasal_kernels_inl.h" gasal_gpu_storage_v gasal_init_gpu_storage_v(int n_streams) { gasal_gpu_storage_v v; v.a = (gasal_gpu_storage_t*)calloc(n_streams, sizeof(gasal_gpu_storage_t)); v.n = n_streams; return v; } //GASAL2 blocking alignment function void gasal_aln(gasal_gpu_storage_t *gpu_storage, const uint8_t *query_batch, const uint32_t *query_batch_offsets, const uint32_t *query_batch_lens, const uint8_t *target_batch, const uint32_t *target_batch_offsets, const uint32_t *target_batch_lens, const uint32_t actual_query_batch_bytes, const uint32_t actual_target_batch_bytes, const uint32_t actual_n_alns, int32_t *host_aln_score, int32_t *host_query_batch_start, int32_t *host_target_batch_start, int32_t *host_query_batch_end, int32_t *host_target_batch_end, int algo, int start) { cudaError_t err; if (actual_n_alns <= 0) { fprintf(stderr, "[GASAL ERROR:] actual_n_alns <= 0\n"); exit(EXIT_FAILURE); } if (actual_query_batch_bytes <= 0) { fprintf(stderr, "[GASAL ERROR:] actual_query_batch_bytes <= 0\n"); exit(EXIT_FAILURE); } if (actual_target_batch_bytes <= 0) { fprintf(stderr, "[GASAL ERROR:] actual_target_batch_bytes <= 0\n"); exit(EXIT_FAILURE); } if (actual_query_batch_bytes % 8) { fprintf(stderr, "[GASAL ERROR:] actual_query_batch_bytes=%d is not a multiple of 8\n", actual_query_batch_bytes); exit(EXIT_FAILURE); } if (actual_target_batch_bytes % 8) { fprintf(stderr, "[GASAL ERROR:] actual_target_batch_bytes=%d is not a multiple of 8\n", actual_target_batch_bytes); exit(EXIT_FAILURE); } //--------------if pre-allocated memory is less, allocate more-------------------------- if (gpu_storage->gpu_max_query_batch_bytes < actual_query_batch_bytes) { int i = 2; while ( (gpu_storage->gpu_max_query_batch_bytes * i) < actual_query_batch_bytes) i++; gpu_storage->gpu_max_query_batch_bytes = gpu_storage->gpu_max_query_batch_bytes * i; fprintf(stderr, "[GASAL WARNING:] actual_query_batch_bytes(%d) > Allocated GPU memory (gpu_max_query_batch_bytes=%d). Therefore, allocating %d bytes on GPU (gpu_max_query_batch_bytes=%d). Performance may be lost if this is repeated many times.\n", actual_query_batch_bytes, gpu_storage->gpu_max_query_batch_bytes, gpu_storage->gpu_max_query_batch_bytes*i, gpu_storage->gpu_max_query_batch_bytes*i); if (gpu_storage->unpacked_query_batch != NULL) CHECKCUDAERROR(cudaFree(gpu_storage->unpacked_query_batch)); if (gpu_storage->packed_query_batch != NULL) CHECKCUDAERROR(cudaFree(gpu_storage->packed_query_batch)); CHECKCUDAERROR(cudaMalloc(&(gpu_storage->unpacked_query_batch), gpu_storage->gpu_max_query_batch_bytes * sizeof(uint8_t))); CHECKCUDAERROR(cudaMalloc(&(gpu_storage->packed_query_batch), (gpu_storage->gpu_max_query_batch_bytes/8) * sizeof(uint32_t))); } if (gpu_storage->gpu_max_target_batch_bytes < actual_target_batch_bytes) { int i = 2; while ( (gpu_storage->gpu_max_target_batch_bytes * i) < actual_target_batch_bytes) i++; gpu_storage->gpu_max_target_batch_bytes = gpu_storage->gpu_max_target_batch_bytes * i; fprintf(stderr, "[GASAL WARNING:] actual_target_batch_bytes(%d) > Allocated GPU memory (gpu_max_target_batch_bytes=%d). Therefore, allocating %d bytes on GPU (gpu_max_target_batch_bytes=%d). Performance may be lost if this is repeated many times.\n", actual_target_batch_bytes, gpu_storage->gpu_max_target_batch_bytes, gpu_storage->gpu_max_target_batch_bytes*i, gpu_storage->gpu_max_target_batch_bytes*i); if (gpu_storage->unpacked_target_batch != NULL) CHECKCUDAERROR(cudaFree(gpu_storage->unpacked_target_batch)); if (gpu_storage->packed_target_batch != NULL) CHECKCUDAERROR(cudaFree(gpu_storage->packed_target_batch)); CHECKCUDAERROR(cudaMalloc(&(gpu_storage->unpacked_target_batch), gpu_storage->gpu_max_target_batch_bytes * sizeof(uint8_t))); CHECKCUDAERROR(cudaMalloc(&(gpu_storage->packed_target_batch), (gpu_storage->gpu_max_target_batch_bytes/8) * sizeof(uint32_t))); } if (gpu_storage->gpu_max_n_alns < actual_n_alns) { fprintf(stderr, "[GASAL] gpu_max_n_alns(%d) should be >= acutal_n_alns(%d)\n", gpu_storage->gpu_max_n_alns, actual_n_alns); int i = 2; while ( (gpu_storage->gpu_max_n_alns * i) < actual_n_alns) i++; gpu_storage->gpu_max_n_alns = gpu_storage->gpu_max_n_alns * i; fprintf(stderr, "[GASAL WARNING:] actual_n_alns(%d) > gpu_max_n_alns(%d). Therefore, allocating memory for %d alignments on GPU (gpu_max_n_alns=%d). Performance may be lost if this is repeated many times.\n", actual_n_alns, gpu_storage->gpu_max_n_alns, gpu_storage->gpu_max_n_alns*i, gpu_storage->gpu_max_n_alns*i); if (gpu_storage->query_batch_offsets != NULL) CHECKCUDAERROR(cudaFree(gpu_storage->query_batch_offsets)); if (gpu_storage->target_batch_offsets != NULL) CHECKCUDAERROR(cudaFree(gpu_storage->target_batch_offsets)); if (gpu_storage->query_batch_lens != NULL) CHECKCUDAERROR(cudaFree(gpu_storage->query_batch_lens)); if (gpu_storage->target_batch_lens != NULL) CHECKCUDAERROR(cudaFree(gpu_storage->target_batch_lens)); if (gpu_storage->aln_score != NULL) CHECKCUDAERROR(cudaFree(gpu_storage->aln_score)); if (gpu_storage->query_batch_start != NULL) CHECKCUDAERROR(cudaFree(gpu_storage->query_batch_start)); if (gpu_storage->target_batch_start != NULL) CHECKCUDAERROR(cudaFree(gpu_storage->target_batch_start)); if (gpu_storage->query_batch_end != NULL) CHECKCUDAERROR(cudaFree(gpu_storage->query_batch_end)); if (gpu_storage->target_batch_end != NULL) CHECKCUDAERROR(cudaFree(gpu_storage->target_batch_end)); CHECKCUDAERROR(cudaMalloc(&(gpu_storage->query_batch_lens), gpu_storage->gpu_max_n_alns * sizeof(uint32_t))); CHECKCUDAERROR(cudaMalloc(&(gpu_storage->target_batch_lens), gpu_storage->gpu_max_n_alns * sizeof(uint32_t))); CHECKCUDAERROR(cudaMalloc(&(gpu_storage->query_batch_offsets), gpu_storage->gpu_max_n_alns * sizeof(uint32_t))); CHECKCUDAERROR(cudaMalloc(&(gpu_storage->target_batch_offsets), gpu_storage->gpu_max_n_alns * sizeof(uint32_t))); CHECKCUDAERROR(cudaMalloc(&(gpu_storage->aln_score),gpu_storage->gpu_max_n_alns * sizeof(int32_t))); if (algo == GLOBAL) { gpu_storage->query_batch_start = NULL; gpu_storage->query_batch_end = NULL; gpu_storage->target_batch_start = NULL; gpu_storage->target_batch_end = NULL; } else { CHECKCUDAERROR( cudaMalloc(&(gpu_storage->target_batch_end), gpu_storage->gpu_max_n_alns * sizeof(uint32_t))); if (start == WITH_START) { CHECKCUDAERROR( cudaMalloc(&(gpu_storage->target_batch_start), gpu_storage->gpu_max_n_alns * sizeof(uint32_t))); } else gpu_storage->target_batch_start = NULL; if (algo == LOCAL) { CHECKCUDAERROR( cudaMalloc(&(gpu_storage->query_batch_end), gpu_storage->gpu_max_n_alns * sizeof(uint32_t))); if (start == WITH_START) { CHECKCUDAERROR( cudaMalloc(&(gpu_storage->query_batch_start), gpu_storage->gpu_max_n_alns * sizeof(uint32_t))); } else gpu_storage->query_batch_start = NULL; } else { gpu_storage->query_batch_start = NULL; gpu_storage->query_batch_end = NULL; } } } //------------------------------------------------------------------------------------------- //------------------------copy sequence batches from CPU to GPU--------------------------- CHECKCUDAERROR(cudaMemcpy(gpu_storage->unpacked_query_batch, query_batch, actual_query_batch_bytes, cudaMemcpyHostToDevice)); CHECKCUDAERROR(cudaMemcpy(gpu_storage->unpacked_target_batch, target_batch, actual_target_batch_bytes, cudaMemcpyHostToDevice)); //---------------------------------------------------------------------------------------- uint32_t BLOCKDIM = 128; uint32_t N_BLOCKS = (actual_n_alns + BLOCKDIM - 1) / BLOCKDIM; int query_batch_tasks_per_thread = (int)ceil((double)actual_query_batch_bytes/(8*BLOCKDIM*N_BLOCKS)); int target_batch_tasks_per_thread = (int)ceil((double)actual_target_batch_bytes/(8*BLOCKDIM*N_BLOCKS)); //launch packing kernel gasal_pack_kernel<<<N_BLOCKS, BLOCKDIM>>>((uint32_t*)(gpu_storage->unpacked_query_batch), (uint32_t*)(gpu_storage->unpacked_target_batch), gpu_storage->packed_query_batch, gpu_storage->packed_target_batch, query_batch_tasks_per_thread, target_batch_tasks_per_thread, actual_query_batch_bytes/4, actual_target_batch_bytes/4); cudaError_t pack_kernel_err = cudaGetLastError(); if ( cudaSuccess != pack_kernel_err ) { fprintf(stderr, "[GASAL CUDA ERROR:] %s(CUDA error no.=%d). Line no. %d in file %s\n", cudaGetErrorString(pack_kernel_err), pack_kernel_err, __LINE__, __FILE__); exit(EXIT_FAILURE); } //----------------------copy sequence offsets and lengths from CPU to GPU-------------------------------------- CHECKCUDAERROR(cudaMemcpy(gpu_storage->query_batch_lens, query_batch_lens, actual_n_alns * sizeof(uint32_t), cudaMemcpyHostToDevice)); CHECKCUDAERROR(cudaMemcpy(gpu_storage->target_batch_lens, target_batch_lens, actual_n_alns * sizeof(uint32_t), cudaMemcpyHostToDevice)); CHECKCUDAERROR(cudaMemcpy(gpu_storage->query_batch_offsets, query_batch_offsets, actual_n_alns * sizeof(uint32_t), cudaMemcpyHostToDevice)); CHECKCUDAERROR(cudaMemcpy(gpu_storage->target_batch_offsets, target_batch_offsets, actual_n_alns * sizeof(uint32_t), cudaMemcpyHostToDevice)); //------------------------------------------------------------------------------------------------------------------------ //--------------------------------------launch alignment kernels-------------------------------------------------------------- if(algo == LOCAL) { if (start == WITH_START) { gasal_local_with_start_kernel<<<N_BLOCKS, BLOCKDIM>>>(gpu_storage->packed_query_batch, gpu_storage->packed_target_batch, gpu_storage->query_batch_lens, gpu_storage->target_batch_lens, gpu_storage->query_batch_offsets, gpu_storage->target_batch_offsets, gpu_storage->aln_score, gpu_storage->query_batch_end, gpu_storage->target_batch_end, gpu_storage->query_batch_start, gpu_storage->target_batch_start, actual_n_alns); } else { gasal_local_kernel<<<N_BLOCKS, BLOCKDIM>>>(gpu_storage->packed_query_batch, gpu_storage->packed_target_batch, gpu_storage->query_batch_lens, gpu_storage->target_batch_lens, gpu_storage->query_batch_offsets, gpu_storage->target_batch_offsets, gpu_storage->aln_score, gpu_storage->query_batch_end, gpu_storage->target_batch_end, actual_n_alns); } } else if (algo == SEMI_GLOBAL) { if (start == WITH_START) { gasal_semi_global_with_start_kernel<<<N_BLOCKS, BLOCKDIM>>>(gpu_storage->packed_query_batch, gpu_storage->packed_target_batch, gpu_storage->query_batch_lens, gpu_storage->target_batch_lens, gpu_storage->query_batch_offsets, gpu_storage->target_batch_offsets, gpu_storage->aln_score, gpu_storage->target_batch_end, gpu_storage->target_batch_start, actual_n_alns); } else { gasal_semi_global_kernel<<<N_BLOCKS, BLOCKDIM>>>(gpu_storage->packed_query_batch, gpu_storage->packed_target_batch, gpu_storage->query_batch_lens, gpu_storage->target_batch_lens, gpu_storage->query_batch_offsets, gpu_storage->target_batch_offsets, gpu_storage->aln_score, gpu_storage->target_batch_end, actual_n_alns); } } else if (algo == GLOBAL) { gasal_global_kernel<<<N_BLOCKS, BLOCKDIM>>>(gpu_storage->packed_query_batch, gpu_storage->packed_target_batch, gpu_storage->query_batch_lens, gpu_storage->target_batch_lens, gpu_storage->query_batch_offsets, gpu_storage->target_batch_offsets, gpu_storage->aln_score, actual_n_alns); } else { fprintf(stderr, "[GASAL ERROR:] Algo type invalid\n"); exit(EXIT_FAILURE); } //----------------------------------------------------------------------------------------------------------------------- cudaError_t aln_kernel_err = cudaGetLastError(); if ( cudaSuccess != aln_kernel_err ) { fprintf(stderr, "[GASAL CUDA ERROR:] %s(CUDA error no.=%d). Line no. %d in file %s\n", cudaGetErrorString(aln_kernel_err), aln_kernel_err, __LINE__, __FILE__); exit(EXIT_FAILURE); } //------------------------copy alignment results from GPU to CPU-------------------------------------- if (host_aln_score != NULL && gpu_storage->aln_score != NULL) CHECKCUDAERROR(cudaMemcpy(host_aln_score, gpu_storage->aln_score, actual_n_alns * sizeof(int32_t), cudaMemcpyDeviceToHost)); else { fprintf(stderr, "[GASAL ERROR:] The *host_aln_score input can't be NULL\n"); exit(EXIT_FAILURE); } if (host_query_batch_start != NULL && gpu_storage->query_batch_start != NULL) CHECKCUDAERROR(cudaMemcpy(host_query_batch_start, gpu_storage->query_batch_start, actual_n_alns * sizeof(int32_t), cudaMemcpyDeviceToHost)); if (host_target_batch_start != NULL && gpu_storage->target_batch_start != NULL) CHECKCUDAERROR(cudaMemcpy(host_target_batch_start, gpu_storage->target_batch_start, actual_n_alns * sizeof(int32_t), cudaMemcpyDeviceToHost)); if (host_query_batch_end != NULL && gpu_storage->query_batch_end != NULL) CHECKCUDAERROR(cudaMemcpy(host_query_batch_end, gpu_storage->query_batch_end, actual_n_alns * sizeof(int32_t), cudaMemcpyDeviceToHost)); if (host_target_batch_end != NULL && gpu_storage->target_batch_end != NULL) CHECKCUDAERROR(cudaMemcpy(host_target_batch_end, gpu_storage->target_batch_end, actual_n_alns * sizeof(int32_t), cudaMemcpyDeviceToHost)); //------------------------------------------------------------------------------------------------------ } //GASAL2 asynchronous (a.k.a non-blocking) alignment function void gasal_aln_async(gasal_gpu_storage_t *gpu_storage, const uint32_t actual_query_batch_bytes, const uint32_t actual_target_batch_bytes, const uint32_t actual_n_alns, int algo, int start) { cudaError_t err; if (actual_n_alns <= 0) { fprintf(stderr, "[GASAL ERROR:] actual_n_alns <= 0\n"); exit(EXIT_FAILURE); } if (actual_query_batch_bytes <= 0) { fprintf(stderr, "[GASAL ERROR:] actual_query_batch_bytes <= 0\n"); exit(EXIT_FAILURE); } if (actual_target_batch_bytes <= 0) { fprintf(stderr, "[GASAL ERROR:] actual_target_batch_bytes <= 0\n"); exit(EXIT_FAILURE); } if (actual_query_batch_bytes % 8) { fprintf(stderr, "[GASAL ERROR:] actual_query_batch_bytes=%d is not a multiple of 8\n", actual_query_batch_bytes); exit(EXIT_FAILURE); } if (actual_target_batch_bytes % 8) { fprintf(stderr, "[GASAL ERROR:] actual_target_batch_bytes=%d is not a multiple of 8\n", actual_target_batch_bytes); exit(EXIT_FAILURE); } if (actual_query_batch_bytes > gpu_storage->host_max_query_batch_bytes) { fprintf(stderr, "[GASAL ERROR:] actual_query_batch_bytes(%d) > host_max_query_batch_bytes(%d)\n", actual_query_batch_bytes, gpu_storage->host_max_query_batch_bytes); exit(EXIT_FAILURE); } if (actual_target_batch_bytes > gpu_storage->host_max_target_batch_bytes) { fprintf(stderr, "[GASAL ERROR:] actual_target_batch_bytes(%d) > host_max_target_batch_bytes(%d)\n", actual_target_batch_bytes, gpu_storage->host_max_target_batch_bytes); exit(EXIT_FAILURE); } if (actual_n_alns > gpu_storage->host_max_n_alns) { fprintf(stderr, "[GASAL ERROR:] actual_n_alns(%d) > host_max_n_alns(%d)\n", actual_n_alns, gpu_storage->host_max_n_alns); exit(EXIT_FAILURE); } //--------------if pre-allocated memory is less, allocate more-------------------------- if (gpu_storage->gpu_max_query_batch_bytes < actual_query_batch_bytes) { int i = 2; while ( (gpu_storage->gpu_max_query_batch_bytes * i) < actual_query_batch_bytes) i++; gpu_storage->gpu_max_query_batch_bytes = gpu_storage->gpu_max_query_batch_bytes * i; fprintf(stderr, "[GASAL WARNING:] actual_query_batch_bytes(%d) > Allocated GPU memory (gpu_max_query_batch_bytes=%d). Therefore, allocating %d bytes on GPU (gpu_max_query_batch_bytes=%d). Performance may be lost if this is repeated many times.\n", actual_query_batch_bytes, gpu_storage->gpu_max_query_batch_bytes, gpu_storage->gpu_max_query_batch_bytes*i, gpu_storage->gpu_max_query_batch_bytes*i); if (gpu_storage->unpacked_query_batch != NULL) CHECKCUDAERROR(cudaFree(gpu_storage->unpacked_query_batch)); if (gpu_storage->packed_query_batch != NULL) CHECKCUDAERROR(cudaFree(gpu_storage->packed_query_batch)); CHECKCUDAERROR(cudaMalloc(&(gpu_storage->unpacked_query_batch), gpu_storage->gpu_max_query_batch_bytes * sizeof(uint8_t))); CHECKCUDAERROR(cudaMalloc(&(gpu_storage->packed_query_batch), (gpu_storage->gpu_max_query_batch_bytes/8) * sizeof(uint32_t))); } if (gpu_storage->gpu_max_target_batch_bytes < actual_target_batch_bytes) { int i = 2; while ( (gpu_storage->gpu_max_target_batch_bytes * i) < actual_target_batch_bytes) i++; gpu_storage->gpu_max_target_batch_bytes = gpu_storage->gpu_max_target_batch_bytes * i; fprintf(stderr, "[GASAL WARNING:] actual_target_batch_bytes(%d) > Allocated GPU memory (gpu_max_target_batch_bytes=%d). Therefore, allocating %d bytes on GPU (gpu_max_target_batch_bytes=%d). Performance may be lost if this is repeated many times.\n", actual_target_batch_bytes, gpu_storage->gpu_max_target_batch_bytes, gpu_storage->gpu_max_target_batch_bytes*i, gpu_storage->gpu_max_target_batch_bytes*i); if (gpu_storage->unpacked_target_batch != NULL) CHECKCUDAERROR(cudaFree(gpu_storage->unpacked_target_batch)); if (gpu_storage->packed_target_batch != NULL) CHECKCUDAERROR(cudaFree(gpu_storage->packed_target_batch)); CHECKCUDAERROR(cudaMalloc(&(gpu_storage->unpacked_target_batch), gpu_storage->gpu_max_target_batch_bytes * sizeof(uint8_t))); CHECKCUDAERROR(cudaMalloc(&(gpu_storage->packed_target_batch), (gpu_storage->gpu_max_target_batch_bytes/8) * sizeof(uint32_t))); } if (gpu_storage->gpu_max_n_alns < actual_n_alns) { int i = 2; while ( (gpu_storage->gpu_max_n_alns * i) < actual_n_alns) i++; gpu_storage->gpu_max_n_alns = gpu_storage->gpu_max_n_alns * i; fprintf(stderr, "[GASAL WARNING:] actual_n_alns(%d) > gpu_max_n_alns(%d). Therefore, allocating memory for %d alignments on GPU (gpu_max_n_alns=%d). Performance may be lost if this is repeated many times.\n", actual_n_alns, gpu_storage->gpu_max_n_alns, gpu_storage->gpu_max_n_alns*i, gpu_storage->gpu_max_n_alns*i); if (gpu_storage->query_batch_offsets != NULL) CHECKCUDAERROR(cudaFree(gpu_storage->query_batch_offsets)); if (gpu_storage->target_batch_offsets != NULL) CHECKCUDAERROR(cudaFree(gpu_storage->target_batch_offsets)); if (gpu_storage->query_batch_lens != NULL) CHECKCUDAERROR(cudaFree(gpu_storage->query_batch_lens)); if (gpu_storage->target_batch_lens != NULL) CHECKCUDAERROR(cudaFree(gpu_storage->target_batch_lens)); if (gpu_storage->aln_score != NULL) CHECKCUDAERROR(cudaFree(gpu_storage->aln_score)); if (gpu_storage->query_batch_start != NULL) CHECKCUDAERROR(cudaFree(gpu_storage->query_batch_start)); if (gpu_storage->target_batch_start != NULL) CHECKCUDAERROR(cudaFree(gpu_storage->target_batch_start)); if (gpu_storage->query_batch_end != NULL) CHECKCUDAERROR(cudaFree(gpu_storage->query_batch_end)); if (gpu_storage->target_batch_end != NULL) CHECKCUDAERROR(cudaFree(gpu_storage->target_batch_end)); CHECKCUDAERROR(cudaMalloc(&(gpu_storage->query_batch_lens), gpu_storage->gpu_max_n_alns * sizeof(uint32_t))); CHECKCUDAERROR(cudaMalloc(&(gpu_storage->target_batch_lens), gpu_storage->gpu_max_n_alns * sizeof(uint32_t))); CHECKCUDAERROR(cudaMalloc(&(gpu_storage->query_batch_offsets), gpu_storage->gpu_max_n_alns * sizeof(uint32_t))); CHECKCUDAERROR(cudaMalloc(&(gpu_storage->target_batch_offsets), gpu_storage->gpu_max_n_alns * sizeof(uint32_t))); CHECKCUDAERROR(cudaMalloc(&(gpu_storage->aln_score), gpu_storage->gpu_max_n_alns * sizeof(int32_t))); if (algo == GLOBAL) { gpu_storage->query_batch_start = NULL; gpu_storage->target_batch_start = NULL; gpu_storage->query_batch_end = NULL; gpu_storage->target_batch_end = NULL; } else if (algo == SEMI_GLOBAL) { gpu_storage->query_batch_start = NULL; gpu_storage->query_batch_end = NULL; if (start == WITH_START) { CHECKCUDAERROR( cudaMalloc(&(gpu_storage->target_batch_start), gpu_storage->gpu_max_n_alns * sizeof(uint32_t))); CHECKCUDAERROR( cudaMalloc(&(gpu_storage->target_batch_end), gpu_storage->gpu_max_n_alns * sizeof(uint32_t))); } else { CHECKCUDAERROR( cudaMalloc(&(gpu_storage->target_batch_end), gpu_storage->gpu_max_n_alns * sizeof(uint32_t))); gpu_storage->target_batch_start = NULL; } } else { if (start == WITH_START) { CHECKCUDAERROR( cudaMalloc(&(gpu_storage->query_batch_start), gpu_storage->gpu_max_n_alns * sizeof(uint32_t))); CHECKCUDAERROR( cudaMalloc(&(gpu_storage->target_batch_start), gpu_storage->gpu_max_n_alns * sizeof(uint32_t))); CHECKCUDAERROR( cudaMalloc(&(gpu_storage->query_batch_end), gpu_storage->gpu_max_n_alns * sizeof(uint32_t))); CHECKCUDAERROR( cudaMalloc(&(gpu_storage->target_batch_end), gpu_storage->gpu_max_n_alns * sizeof(uint32_t))); } else { CHECKCUDAERROR( cudaMalloc(&(gpu_storage->query_batch_end), gpu_storage->gpu_max_n_alns * sizeof(uint32_t))); CHECKCUDAERROR( cudaMalloc(&(gpu_storage->target_batch_end), gpu_storage->gpu_max_n_alns * sizeof(uint32_t))); gpu_storage->query_batch_start = NULL; gpu_storage->target_batch_start = NULL; } } } //------------------------------------------ //------------------------launch copying of sequence batches from CPU to GPU--------------------------- CHECKCUDAERROR(cudaMemcpyAsync(gpu_storage->unpacked_query_batch, gpu_storage->host_unpacked_query_batch, actual_query_batch_bytes, cudaMemcpyHostToDevice, gpu_storage->str)); CHECKCUDAERROR(cudaMemcpyAsync(gpu_storage->unpacked_target_batch, gpu_storage->host_unpacked_target_batch, actual_target_batch_bytes, cudaMemcpyHostToDevice, gpu_storage->str)); //----------------------------------------------------------------------------------------------------------- uint32_t BLOCKDIM = 128; uint32_t N_BLOCKS = (actual_n_alns + BLOCKDIM - 1) / BLOCKDIM; int query_batch_tasks_per_thread = (int)ceil((double)actual_query_batch_bytes/(8*BLOCKDIM*N_BLOCKS)); int target_batch_tasks_per_thread = (int)ceil((double)actual_target_batch_bytes/(8*BLOCKDIM*N_BLOCKS)); //-------------------------------------------launch packing kernel gasal_pack_kernel<<<N_BLOCKS, BLOCKDIM, 0, gpu_storage->str>>>((uint32_t*)(gpu_storage->unpacked_query_batch), (uint32_t*)(gpu_storage->unpacked_target_batch), gpu_storage->packed_query_batch, gpu_storage->packed_target_batch, query_batch_tasks_per_thread, target_batch_tasks_per_thread, actual_query_batch_bytes/4, actual_target_batch_bytes/4); cudaError_t pack_kernel_err = cudaGetLastError(); if ( cudaSuccess != pack_kernel_err ) { fprintf(stderr, "[GASAL CUDA ERROR:] %s(CUDA error no.=%d). Line no. %d in file %s\n", cudaGetErrorString(pack_kernel_err), pack_kernel_err, __LINE__, __FILE__); exit(EXIT_FAILURE); } //----------------------launch copying of sequence offsets and lengths from CPU to GPU-------------------------------------- CHECKCUDAERROR(cudaMemcpyAsync(gpu_storage->query_batch_lens, gpu_storage->host_query_batch_lens, actual_n_alns * sizeof(uint32_t), cudaMemcpyHostToDevice, gpu_storage->str)); CHECKCUDAERROR(cudaMemcpyAsync(gpu_storage->target_batch_lens, gpu_storage->host_target_batch_lens, actual_n_alns * sizeof(uint32_t), cudaMemcpyHostToDevice, gpu_storage->str)); CHECKCUDAERROR(cudaMemcpyAsync(gpu_storage->query_batch_offsets, gpu_storage->host_query_batch_offsets, actual_n_alns * sizeof(uint32_t), cudaMemcpyHostToDevice, gpu_storage->str)); CHECKCUDAERROR(cudaMemcpyAsync(gpu_storage->target_batch_offsets, gpu_storage->host_target_batch_offsets, actual_n_alns * sizeof(uint32_t), cudaMemcpyHostToDevice, gpu_storage->str)); //--------------------------------------------------------------------------------------------------------------- //--------------------------------------launch alignment kernels-------------------------------------------------------------- if(algo == LOCAL) { if (start == WITH_START) { gasal_local_with_start_kernel<<<N_BLOCKS, BLOCKDIM, 0, gpu_storage->str>>>(gpu_storage->packed_query_batch, gpu_storage->packed_target_batch, gpu_storage->query_batch_lens, gpu_storage->target_batch_lens, gpu_storage->query_batch_offsets, gpu_storage->target_batch_offsets, gpu_storage->aln_score, gpu_storage->query_batch_end, gpu_storage->target_batch_end, gpu_storage->query_batch_start, gpu_storage->target_batch_start, actual_n_alns); } else { gasal_local_kernel<<<N_BLOCKS, BLOCKDIM, 0, gpu_storage->str>>>(gpu_storage->packed_query_batch, gpu_storage->packed_target_batch, gpu_storage->query_batch_lens, gpu_storage->target_batch_lens, gpu_storage->query_batch_offsets, gpu_storage->target_batch_offsets, gpu_storage->aln_score, gpu_storage->query_batch_end, gpu_storage->target_batch_end, actual_n_alns); } } else if (algo == SEMI_GLOBAL) { if (start == WITH_START) { gasal_semi_global_with_start_kernel<<<N_BLOCKS, BLOCKDIM, 0, gpu_storage->str>>>(gpu_storage->packed_query_batch, gpu_storage->packed_target_batch, gpu_storage->query_batch_lens, gpu_storage->target_batch_lens, gpu_storage->query_batch_offsets, gpu_storage->target_batch_offsets, gpu_storage->aln_score, gpu_storage->target_batch_end, gpu_storage->target_batch_start, actual_n_alns); } else { gasal_semi_global_kernel<<<N_BLOCKS, BLOCKDIM, 0, gpu_storage->str>>>(gpu_storage->packed_query_batch, gpu_storage->packed_target_batch, gpu_storage->query_batch_lens, gpu_storage->target_batch_lens, gpu_storage->query_batch_offsets, gpu_storage->target_batch_offsets, gpu_storage->aln_score, gpu_storage->target_batch_end, actual_n_alns); } } else if (algo == GLOBAL) { gasal_global_kernel<<<N_BLOCKS, BLOCKDIM, 0, gpu_storage->str>>>(gpu_storage->packed_query_batch, gpu_storage->packed_target_batch, gpu_storage->query_batch_lens, gpu_storage->target_batch_lens, gpu_storage->query_batch_offsets, gpu_storage->target_batch_offsets, gpu_storage->aln_score, actual_n_alns); } else { fprintf(stderr, "[GASAL ERROR:] Algo type invalid\n"); exit(EXIT_FAILURE); } //----------------------------------------------------------------------------------------------------------------------- cudaError_t aln_kernel_err = cudaGetLastError(); if ( cudaSuccess != aln_kernel_err ) { fprintf(stderr, "[GASAL CUDA ERROR:] %s(CUDA error no.=%d). Line no. %d in file %s\n", cudaGetErrorString(aln_kernel_err), aln_kernel_err, __LINE__, __FILE__); exit(EXIT_FAILURE); } //------------------------launch the copying of alignment results from GPU to CPU-------------------------------------- if (gpu_storage->host_aln_score != NULL && gpu_storage->aln_score != NULL) CHECKCUDAERROR(cudaMemcpyAsync(gpu_storage->host_aln_score, gpu_storage->aln_score, actual_n_alns * sizeof(int32_t), cudaMemcpyDeviceToHost, gpu_storage->str)); if (gpu_storage->host_query_batch_start != NULL && gpu_storage->query_batch_start != NULL) CHECKCUDAERROR(cudaMemcpyAsync(gpu_storage->host_query_batch_start, gpu_storage->query_batch_start, actual_n_alns * sizeof(int32_t), cudaMemcpyDeviceToHost, gpu_storage->str)); if (gpu_storage->host_target_batch_start != NULL && gpu_storage->target_batch_start != NULL) CHECKCUDAERROR(cudaMemcpyAsync(gpu_storage->host_target_batch_start, gpu_storage->target_batch_start, actual_n_alns * sizeof(int32_t), cudaMemcpyDeviceToHost, gpu_storage->str)); if (gpu_storage->host_query_batch_end != NULL && gpu_storage->query_batch_end != NULL) CHECKCUDAERROR(cudaMemcpyAsync(gpu_storage->host_query_batch_end, gpu_storage->query_batch_end, actual_n_alns * sizeof(int32_t), cudaMemcpyDeviceToHost, gpu_storage->str)); if (gpu_storage->host_target_batch_end != NULL && gpu_storage->target_batch_end != NULL) CHECKCUDAERROR(cudaMemcpyAsync(gpu_storage->host_target_batch_end, gpu_storage->target_batch_end, actual_n_alns * sizeof(int32_t), cudaMemcpyDeviceToHost, gpu_storage->str)); //----------------------------------------------------------------------------------------------------------------------- gpu_storage->is_free = 0; //set the availability of current stream to false } int gasal_is_aln_async_done(gasal_gpu_storage_t *gpu_storage) { cudaError_t err; if(gpu_storage->is_free == 1) return -2;//if no work is launced in this stream, return -2 err = cudaStreamQuery(gpu_storage->str);//check to see if the stream is finished if (err != cudaSuccess ) { if (err == cudaErrorNotReady) return -1; else{ fprintf(stderr, "[GASAL CUDA ERROR:] %s(CUDA error no.=%d). Line no. %d in file %s\n", cudaGetErrorString(err), err, __LINE__, __FILE__); exit(EXIT_FAILURE); } } gpu_storage->is_free = 1; return 0; } void gasal_gpu_mem_alloc(gasal_gpu_storage_t *gpu_storage, int gpu_max_query_batch_bytes, int gpu_max_target_batch_bytes, int gpu_max_n_alns, int algo, int start) { cudaError_t err; // if (gpu_storage->gpu_max_query_batch_bytes % 8) { // fprintf(stderr, "[GASAL ERROR:] max_query_batch_bytes=%d is not a multiple of 8\n", gpu_storage->gpu_max_query_batch_bytes % 8); // exit(EXIT_FAILURE); // } // if (gpu_storage->gpu_max_target_batch_bytes % 8) { // fprintf(stderr, "[GASAL ERROR:] max_target_batch_bytes=%d is not a multiple of 8\n", gpu_storage->gpu_max_target_batch_bytes % 8); // exit(EXIT_FAILURE); // } CHECKCUDAERROR(cudaMalloc(&(gpu_storage->unpacked_query_batch), gpu_max_query_batch_bytes * sizeof(uint8_t))); CHECKCUDAERROR(cudaMalloc(&(gpu_storage->unpacked_target_batch), gpu_max_target_batch_bytes * sizeof(uint8_t))); CHECKCUDAERROR(cudaMalloc(&(gpu_storage->packed_query_batch), (gpu_max_query_batch_bytes/8) * sizeof(uint32_t))); CHECKCUDAERROR(cudaMalloc(&(gpu_storage->packed_target_batch), (gpu_max_target_batch_bytes/8) * sizeof(uint32_t))); CHECKCUDAERROR(cudaMalloc(&(gpu_storage->query_batch_lens), gpu_max_n_alns * sizeof(uint32_t))); CHECKCUDAERROR(cudaMalloc(&(gpu_storage->target_batch_lens), gpu_max_n_alns * sizeof(uint32_t))); CHECKCUDAERROR(cudaMalloc(&(gpu_storage->query_batch_offsets), gpu_max_n_alns * sizeof(uint32_t))); CHECKCUDAERROR(cudaMalloc(&(gpu_storage->target_batch_offsets), gpu_max_n_alns * sizeof(uint32_t))); CHECKCUDAERROR(cudaMalloc(&(gpu_storage->aln_score), gpu_max_n_alns * sizeof(int32_t))); if (algo == GLOBAL) { gpu_storage->query_batch_start = NULL; gpu_storage->query_batch_end = NULL; gpu_storage->target_batch_start = NULL; gpu_storage->target_batch_end = NULL; } else { CHECKCUDAERROR( cudaMalloc(&(gpu_storage->target_batch_end), gpu_max_n_alns * sizeof(uint32_t))); if (start == WITH_START) { CHECKCUDAERROR( cudaMalloc(&(gpu_storage->target_batch_start), gpu_max_n_alns * sizeof(uint32_t))); } else gpu_storage->target_batch_start = NULL; if (algo == LOCAL) { CHECKCUDAERROR( cudaMalloc(&(gpu_storage->query_batch_end), gpu_max_n_alns * sizeof(uint32_t))); if (start == WITH_START) { CHECKCUDAERROR( cudaMalloc(&(gpu_storage->query_batch_start), gpu_max_n_alns * sizeof(uint32_t))); } else gpu_storage->query_batch_start = NULL; } else { gpu_storage->query_batch_start = NULL; gpu_storage->query_batch_end = NULL; } } gpu_storage->gpu_max_query_batch_bytes = gpu_max_query_batch_bytes; gpu_storage->gpu_max_target_batch_bytes = gpu_max_target_batch_bytes; gpu_storage->gpu_max_n_alns = gpu_max_n_alns; } void gasal_init_streams(gasal_gpu_storage_v *gpu_storage_vec, int host_max_query_batch_bytes, int gpu_max_query_batch_bytes, int host_max_target_batch_bytes, int gpu_max_target_batch_bytes, int host_max_n_alns, int gpu_max_n_alns, int algo, int start) { cudaError_t err; int i; for (i = 0; i < gpu_storage_vec->n; i++) { CHECKCUDAERROR(cudaMallocHost(&(gpu_storage_vec->a[i].host_unpacked_query_batch), host_max_query_batch_bytes * sizeof(uint8_t))); CHECKCUDAERROR(cudaMallocHost(&(gpu_storage_vec->a[i].host_unpacked_target_batch), host_max_target_batch_bytes * sizeof(uint8_t))); CHECKCUDAERROR(cudaMalloc(&(gpu_storage_vec->a[i].unpacked_query_batch), gpu_max_query_batch_bytes * sizeof(uint8_t))); CHECKCUDAERROR(cudaMalloc(&(gpu_storage_vec->a[i].unpacked_target_batch), gpu_max_target_batch_bytes * sizeof(uint8_t))); CHECKCUDAERROR(cudaMalloc(&(gpu_storage_vec->a[i].packed_query_batch), (gpu_max_query_batch_bytes/8) * sizeof(uint32_t))); CHECKCUDAERROR(cudaMalloc(&(gpu_storage_vec->a[i].packed_target_batch), (gpu_max_target_batch_bytes/8) * sizeof(uint32_t))); CHECKCUDAERROR(cudaMallocHost(&(gpu_storage_vec->a[i].host_query_batch_lens), host_max_n_alns * sizeof(uint32_t))); CHECKCUDAERROR(cudaMallocHost(&(gpu_storage_vec->a[i].host_target_batch_lens), host_max_n_alns * sizeof(uint32_t))); CHECKCUDAERROR(cudaMallocHost(&(gpu_storage_vec->a[i].host_query_batch_offsets), host_max_n_alns * sizeof(uint32_t))); CHECKCUDAERROR(cudaMallocHost(&(gpu_storage_vec->a[i].host_target_batch_offsets), host_max_n_alns * sizeof(uint32_t))); CHECKCUDAERROR(cudaMallocHost(&(gpu_storage_vec->a[i].host_aln_score), host_max_n_alns * sizeof(int32_t))); CHECKCUDAERROR(cudaMalloc(&(gpu_storage_vec->a[i].query_batch_lens), gpu_max_n_alns * sizeof(uint32_t))); CHECKCUDAERROR(cudaMalloc(&(gpu_storage_vec->a[i].target_batch_lens), gpu_max_n_alns * sizeof(uint32_t))); CHECKCUDAERROR(cudaMalloc(&(gpu_storage_vec->a[i].query_batch_offsets), gpu_max_n_alns * sizeof(uint32_t))); CHECKCUDAERROR(cudaMalloc(&(gpu_storage_vec->a[i].target_batch_offsets), gpu_max_n_alns * sizeof(uint32_t))); CHECKCUDAERROR(cudaMalloc(&(gpu_storage_vec->a[i].aln_score), gpu_max_n_alns * sizeof(int32_t))); if (algo == GLOBAL) { gpu_storage_vec->a[i].host_query_batch_start = NULL; gpu_storage_vec->a[i].host_target_batch_start = NULL; gpu_storage_vec->a[i].host_query_batch_end = NULL; gpu_storage_vec->a[i].host_target_batch_end = NULL; gpu_storage_vec->a[i].query_batch_start = NULL; gpu_storage_vec->a[i].target_batch_start = NULL; gpu_storage_vec->a[i].query_batch_end = NULL; gpu_storage_vec->a[i].target_batch_end = NULL; } else if (algo == SEMI_GLOBAL) { gpu_storage_vec->a[i].host_query_batch_start = NULL; gpu_storage_vec->a[i].host_query_batch_end = NULL; gpu_storage_vec->a[i].query_batch_start = NULL; gpu_storage_vec->a[i].query_batch_end = NULL; if (start == WITH_START) { CHECKCUDAERROR(cudaMallocHost(&(gpu_storage_vec->a[i].host_target_batch_start),host_max_n_alns * sizeof(uint32_t))); CHECKCUDAERROR(cudaMallocHost(&(gpu_storage_vec->a[i].host_target_batch_end),host_max_n_alns * sizeof(uint32_t))); CHECKCUDAERROR( cudaMalloc(&(gpu_storage_vec->a[i].target_batch_start), gpu_max_n_alns * sizeof(uint32_t))); CHECKCUDAERROR( cudaMalloc(&(gpu_storage_vec->a[i].target_batch_end), gpu_max_n_alns * sizeof(uint32_t))); } else { CHECKCUDAERROR(cudaMallocHost(&(gpu_storage_vec->a[i].host_target_batch_end),host_max_n_alns * sizeof(uint32_t))); CHECKCUDAERROR( cudaMalloc(&(gpu_storage_vec->a[i].target_batch_end), gpu_max_n_alns * sizeof(uint32_t))); gpu_storage_vec->a[i].host_target_batch_start = NULL; gpu_storage_vec->a[i].target_batch_start = NULL; } } else { if (start == WITH_START) { CHECKCUDAERROR(cudaMallocHost(&(gpu_storage_vec->a[i].host_query_batch_start),host_max_n_alns * sizeof(uint32_t))); CHECKCUDAERROR(cudaMallocHost(&(gpu_storage_vec->a[i].host_target_batch_start),host_max_n_alns * sizeof(uint32_t))); CHECKCUDAERROR(cudaMallocHost(&(gpu_storage_vec->a[i].host_query_batch_end),host_max_n_alns * sizeof(uint32_t))); CHECKCUDAERROR(cudaMallocHost(&(gpu_storage_vec->a[i].host_target_batch_end),host_max_n_alns * sizeof(uint32_t))); CHECKCUDAERROR( cudaMalloc(&(gpu_storage_vec->a[i].query_batch_start), gpu_max_n_alns * sizeof(uint32_t))); CHECKCUDAERROR( cudaMalloc(&(gpu_storage_vec->a[i].target_batch_start), gpu_max_n_alns * sizeof(uint32_t))); CHECKCUDAERROR( cudaMalloc(&(gpu_storage_vec->a[i].query_batch_end), gpu_max_n_alns * sizeof(uint32_t))); CHECKCUDAERROR( cudaMalloc(&(gpu_storage_vec->a[i].target_batch_end), gpu_max_n_alns * sizeof(uint32_t))); } else { CHECKCUDAERROR(cudaMallocHost(&(gpu_storage_vec->a[i].host_query_batch_end),host_max_n_alns * sizeof(uint32_t))); CHECKCUDAERROR(cudaMallocHost(&(gpu_storage_vec->a[i].host_target_batch_end),host_max_n_alns * sizeof(uint32_t))); CHECKCUDAERROR( cudaMalloc(&(gpu_storage_vec->a[i].query_batch_end), gpu_max_n_alns * sizeof(uint32_t))); CHECKCUDAERROR( cudaMalloc(&(gpu_storage_vec->a[i].target_batch_end), gpu_max_n_alns * sizeof(uint32_t))); gpu_storage_vec->a[i].host_query_batch_start = NULL; gpu_storage_vec->a[i].host_target_batch_start = NULL; gpu_storage_vec->a[i].query_batch_start = NULL; gpu_storage_vec->a[i].target_batch_start = NULL; } } CHECKCUDAERROR(cudaStreamCreate(&(gpu_storage_vec->a[i].str))); gpu_storage_vec->a[i].is_free = 1; gpu_storage_vec->a[i].host_max_query_batch_bytes = host_max_query_batch_bytes; gpu_storage_vec->a[i].host_max_target_batch_bytes = host_max_target_batch_bytes; gpu_storage_vec->a[i].host_max_n_alns = host_max_n_alns; gpu_storage_vec->a[i].gpu_max_query_batch_bytes = gpu_max_query_batch_bytes; gpu_storage_vec->a[i].gpu_max_target_batch_bytes = gpu_max_target_batch_bytes; gpu_storage_vec->a[i].gpu_max_n_alns = gpu_max_n_alns; } } void gasal_gpu_mem_free(gasal_gpu_storage_t *gpu_storage) { cudaError_t err; if (gpu_storage->unpacked_query_batch != NULL) CHECKCUDAERROR(cudaFree(gpu_storage->unpacked_query_batch)); if (gpu_storage->unpacked_target_batch != NULL) CHECKCUDAERROR(cudaFree(gpu_storage->unpacked_target_batch)); if (gpu_storage->packed_query_batch != NULL) CHECKCUDAERROR(cudaFree(gpu_storage->packed_query_batch)); if (gpu_storage->packed_target_batch != NULL) CHECKCUDAERROR(cudaFree(gpu_storage->packed_target_batch)); if (gpu_storage->query_batch_offsets != NULL) CHECKCUDAERROR(cudaFree(gpu_storage->query_batch_offsets)); if (gpu_storage->target_batch_offsets != NULL) CHECKCUDAERROR(cudaFree(gpu_storage->target_batch_offsets)); if (gpu_storage->query_batch_lens != NULL) CHECKCUDAERROR(cudaFree(gpu_storage->query_batch_lens)); if (gpu_storage->target_batch_lens != NULL) CHECKCUDAERROR(cudaFree(gpu_storage->target_batch_lens)); if (gpu_storage->aln_score != NULL) CHECKCUDAERROR(cudaFree(gpu_storage->aln_score)); if (gpu_storage->query_batch_start != NULL) CHECKCUDAERROR(cudaFree(gpu_storage->query_batch_start)); if (gpu_storage->target_batch_start != NULL) CHECKCUDAERROR(cudaFree(gpu_storage->target_batch_start)); if (gpu_storage->query_batch_end != NULL) CHECKCUDAERROR(cudaFree(gpu_storage->query_batch_end)); if (gpu_storage->target_batch_end != NULL) CHECKCUDAERROR(cudaFree(gpu_storage->target_batch_end)); } void gasal_destroy_streams(gasal_gpu_storage_v *gpu_storage_vec) { cudaError_t err; int i; for (i = 0; i < gpu_storage_vec->n; i ++) { if (gpu_storage_vec->a[i].host_unpacked_query_batch != NULL) CHECKCUDAERROR(cudaFreeHost(gpu_storage_vec->a[i].host_unpacked_query_batch)); if (gpu_storage_vec->a[i].host_unpacked_target_batch != NULL) CHECKCUDAERROR(cudaFreeHost(gpu_storage_vec->a[i].host_unpacked_target_batch)); if (gpu_storage_vec->a[i].host_query_batch_offsets != NULL) CHECKCUDAERROR(cudaFreeHost(gpu_storage_vec->a[i].host_query_batch_offsets)); if (gpu_storage_vec->a[i].host_target_batch_offsets != NULL) CHECKCUDAERROR(cudaFreeHost(gpu_storage_vec->a[i].host_target_batch_offsets)); if (gpu_storage_vec->a[i].host_query_batch_lens != NULL) CHECKCUDAERROR(cudaFreeHost(gpu_storage_vec->a[i].host_query_batch_lens)); if (gpu_storage_vec->a[i].host_target_batch_lens != NULL) CHECKCUDAERROR(cudaFreeHost(gpu_storage_vec->a[i].host_target_batch_lens)); if (gpu_storage_vec->a[i].host_aln_score != NULL) CHECKCUDAERROR(cudaFreeHost(gpu_storage_vec->a[i].host_aln_score)); if (gpu_storage_vec->a[i].host_query_batch_start != NULL) CHECKCUDAERROR(cudaFreeHost(gpu_storage_vec->a[i].host_query_batch_start)); if (gpu_storage_vec->a[i].host_target_batch_start != NULL) CHECKCUDAERROR(cudaFreeHost(gpu_storage_vec->a[i].host_target_batch_start)); if (gpu_storage_vec->a[i].host_query_batch_end != NULL) CHECKCUDAERROR(cudaFreeHost(gpu_storage_vec->a[i].host_query_batch_end)); if (gpu_storage_vec->a[i].host_target_batch_end != NULL) CHECKCUDAERROR(cudaFreeHost(gpu_storage_vec->a[i].host_target_batch_end)); if (gpu_storage_vec->a[i].unpacked_query_batch != NULL) CHECKCUDAERROR(cudaFree(gpu_storage_vec->a[i].unpacked_query_batch)); if (gpu_storage_vec->a[i].unpacked_target_batch != NULL) CHECKCUDAERROR(cudaFree(gpu_storage_vec->a[i].unpacked_target_batch)); if (gpu_storage_vec->a[i].packed_query_batch != NULL) CHECKCUDAERROR(cudaFree(gpu_storage_vec->a[i].packed_query_batch)); if (gpu_storage_vec->a[i].packed_target_batch != NULL) CHECKCUDAERROR(cudaFree(gpu_storage_vec->a[i].packed_target_batch)); if (gpu_storage_vec->a[i].query_batch_offsets != NULL) CHECKCUDAERROR(cudaFree(gpu_storage_vec->a[i].query_batch_offsets)); if (gpu_storage_vec->a[i].target_batch_offsets != NULL) CHECKCUDAERROR(cudaFree(gpu_storage_vec->a[i].target_batch_offsets)); if (gpu_storage_vec->a[i].query_batch_lens != NULL) CHECKCUDAERROR(cudaFree(gpu_storage_vec->a[i].query_batch_lens)); if (gpu_storage_vec->a[i].target_batch_lens != NULL) CHECKCUDAERROR(cudaFree(gpu_storage_vec->a[i].target_batch_lens)); if (gpu_storage_vec->a[i].aln_score != NULL) CHECKCUDAERROR(cudaFree(gpu_storage_vec->a[i].aln_score)); if (gpu_storage_vec->a[i].query_batch_start != NULL) CHECKCUDAERROR(cudaFree(gpu_storage_vec->a[i].query_batch_start)); if (gpu_storage_vec->a[i].target_batch_start != NULL) CHECKCUDAERROR(cudaFree(gpu_storage_vec->a[i].target_batch_start)); if (gpu_storage_vec->a[i].query_batch_end != NULL) CHECKCUDAERROR(cudaFree(gpu_storage_vec->a[i].query_batch_end)); if (gpu_storage_vec->a[i].target_batch_end != NULL) CHECKCUDAERROR(cudaFree(gpu_storage_vec->a[i].target_batch_end)); if (gpu_storage_vec->a[i].str != NULL)CHECKCUDAERROR(cudaStreamDestroy(gpu_storage_vec->a[i].str)); } } void gasal_destroy_gpu_storage_v(gasal_gpu_storage_v *gpu_storage_vec) { if(gpu_storage_vec->a != NULL) free(gpu_storage_vec->a); } void gasal_copy_subst_scores(gasal_subst_scores *subst){ cudaError_t err; CHECKCUDAERROR(cudaMemcpyToSymbol(_cudaGapO, &(subst->gap_open), sizeof(int32_t), 0, cudaMemcpyHostToDevice)); CHECKCUDAERROR(cudaMemcpyToSymbol(_cudaGapExtend, &(subst->gap_extend), sizeof(int32_t), 0, cudaMemcpyHostToDevice)); int32_t gapoe = (subst->gap_open + subst->gap_extend); CHECKCUDAERROR(cudaMemcpyToSymbol(_cudaGapOE, &(gapoe), sizeof(int32_t), 0, cudaMemcpyHostToDevice)); CHECKCUDAERROR(cudaMemcpyToSymbol(_cudaMatchScore, &(subst->match), sizeof(int32_t), 0, cudaMemcpyHostToDevice)); CHECKCUDAERROR(cudaMemcpyToSymbol(_cudaMismatchScore, &(subst->mismatch), sizeof(int32_t), 0, cudaMemcpyHostToDevice)); return; }
011b5256b59c5193c9370725d617cb0882e4e0bd.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <stdio.h> #include <math.h> #include "Boid.h" #include "Flock.h" __global__ void kernel_calculateRoatateAngle(Boid* boids) { int index = blockIdx.x * blockDim.x + threadIdx.x; float rotateAngle = 0.0f; double pi = atan(1.0) * 4; float vec_assit[2] = { 0.0f, 0.0f }; float new_v2[2] = { 0.0f, 0.0f }; vec_assit[0] = (boids[index].tri_v0[0] + boids[index].tri_v1[0]) / 2; vec_assit[1] = (boids[index].tri_v0[1] + boids[index].tri_v1[1]) / 2; new_v2[0] = boids[index].tri_v2[0] - vec_assit[0]; new_v2[1] = boids[index].tri_v2[1] - vec_assit[1]; float product_vec = new_v2[0] * boids[index].velocity[1] - new_v2[1] * boids[index].velocity[0]; float factor = 1; if (product_vec < 0) factor = -1; float dot_product = (new_v2[0] * boids[index].velocity[0]) + (new_v2[1] * boids[index].velocity[1]); float magnitude1 = powf((powf(new_v2[0], 2) + powf(new_v2[1], 2)), 0.5); float magnitude2 = powf((powf(boids[index].velocity[0], 2) + powf(boids[index].velocity[1], 2)), 0.5); float angle_rad = factor * acos(dot_product / (magnitude1 * magnitude2)); rotateAngle = angle_rad / pi * 180; boids[index].rotate_angle = rotateAngle; } void calculateRoatateAngleCuda(Flock &flock) { int N = flock.flockSize; int flockSize = N * sizeof(Boid); Boid* d_boids; hipMalloc((void**)&d_boids, flockSize); // Copy boids from the host to the device hipMemcpy(d_boids, flock.boids, flockSize, hipMemcpyHostToDevice); // Run kernel hipLaunchKernelGGL(( kernel_calculateRoatateAngle), dim3(1), dim3(flock.flockSize), 0, 0, d_boids); // Wait for GPU to finish before accessing on host hipDeviceSynchronize(); // Copy boids from the device to the host hipMemcpy(flock.boids, d_boids, flockSize, hipMemcpyDeviceToHost); hipFree(d_boids); }
011b5256b59c5193c9370725d617cb0882e4e0bd.cu
#include <cuda.h> #include <cuda_runtime.h> #include <stdio.h> #include <math.h> #include "Boid.h" #include "Flock.h" __global__ void kernel_calculateRoatateAngle(Boid* boids) { int index = blockIdx.x * blockDim.x + threadIdx.x; float rotateAngle = 0.0f; double pi = atan(1.0) * 4; float vec_assit[2] = { 0.0f, 0.0f }; float new_v2[2] = { 0.0f, 0.0f }; vec_assit[0] = (boids[index].tri_v0[0] + boids[index].tri_v1[0]) / 2; vec_assit[1] = (boids[index].tri_v0[1] + boids[index].tri_v1[1]) / 2; new_v2[0] = boids[index].tri_v2[0] - vec_assit[0]; new_v2[1] = boids[index].tri_v2[1] - vec_assit[1]; float product_vec = new_v2[0] * boids[index].velocity[1] - new_v2[1] * boids[index].velocity[0]; float factor = 1; if (product_vec < 0) factor = -1; float dot_product = (new_v2[0] * boids[index].velocity[0]) + (new_v2[1] * boids[index].velocity[1]); float magnitude1 = powf((powf(new_v2[0], 2) + powf(new_v2[1], 2)), 0.5); float magnitude2 = powf((powf(boids[index].velocity[0], 2) + powf(boids[index].velocity[1], 2)), 0.5); float angle_rad = factor * acos(dot_product / (magnitude1 * magnitude2)); rotateAngle = angle_rad / pi * 180; boids[index].rotate_angle = rotateAngle; } void calculateRoatateAngleCuda(Flock &flock) { int N = flock.flockSize; int flockSize = N * sizeof(Boid); Boid* d_boids; cudaMalloc((void**)&d_boids, flockSize); // Copy boids from the host to the device cudaMemcpy(d_boids, flock.boids, flockSize, cudaMemcpyHostToDevice); // Run kernel kernel_calculateRoatateAngle<<<1, flock.flockSize>>>(d_boids); // Wait for GPU to finish before accessing on host cudaDeviceSynchronize(); // Copy boids from the device to the host cudaMemcpy(flock.boids, d_boids, flockSize, cudaMemcpyDeviceToHost); cudaFree(d_boids); }
df0866687ad01cd0381610e8afc283144872ca72.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <cstdlib> #include <cassert> #include <numa.h> // Add a scalar to the vector __global__ void vadd(int *const v, int const a, size_t const len) { const unsigned int gid = blockDim.x * blockIdx.x + threadIdx.x; const unsigned int gsize = gridDim.x * blockDim.x; for (size_t i = gid; i < len; i += gsize) { v[i] += a; } } int main() { // Vector length constexpr size_t LEN = 100'000; // NUMA node constexpr int NODE = 0; // GPU kernel parameters constexpr unsigned int grid = 160; constexpr unsigned int block = 1024; // Allocate vector int *data = nullptr; data = reinterpret_cast<int *>(numa_alloc_onnode(LEN * sizeof(int), NODE)); if (data == nullptr) { std::cerr << "Failed to allocate memory" << std::endl; std::exit(EXIT_FAILURE); } // Initialize vector with some data for (size_t i = 0; i < LEN; ++i) { data[i] = i; } // Call a function to do some work hipLaunchKernelGGL(( vadd), dim3(grid), dim3(block), 0, 0, data, 1, LEN); // Wait for the GPU kernel to finish execution. hipDeviceSynchronize(); // Verify that result is correct unsigned long long sum = 0; for (size_t i = 0; i < LEN; ++i) { sum += data[i]; } assert(sum == (LEN * (LEN + 1)) / 2); // Free vector numa_free(data, LEN * sizeof(int)); std::exit(EXIT_SUCCESS); }
df0866687ad01cd0381610e8afc283144872ca72.cu
#include <iostream> #include <cstdlib> #include <cassert> #include <numa.h> // Add a scalar to the vector __global__ void vadd(int *const v, int const a, size_t const len) { const unsigned int gid = blockDim.x * blockIdx.x + threadIdx.x; const unsigned int gsize = gridDim.x * blockDim.x; for (size_t i = gid; i < len; i += gsize) { v[i] += a; } } int main() { // Vector length constexpr size_t LEN = 100'000; // NUMA node constexpr int NODE = 0; // GPU kernel parameters constexpr unsigned int grid = 160; constexpr unsigned int block = 1024; // Allocate vector int *data = nullptr; data = reinterpret_cast<int *>(numa_alloc_onnode(LEN * sizeof(int), NODE)); if (data == nullptr) { std::cerr << "Failed to allocate memory" << std::endl; std::exit(EXIT_FAILURE); } // Initialize vector with some data for (size_t i = 0; i < LEN; ++i) { data[i] = i; } // Call a function to do some work vadd<<<grid, block>>>(data, 1, LEN); // Wait for the GPU kernel to finish execution. cudaDeviceSynchronize(); // Verify that result is correct unsigned long long sum = 0; for (size_t i = 0; i < LEN; ++i) { sum += data[i]; } assert(sum == (LEN * (LEN + 1)) / 2); // Free vector numa_free(data, LEN * sizeof(int)); std::exit(EXIT_SUCCESS); }
c9eec4a9e005cc74a0d462a5e08323eef82b1f5f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //===----------------------------------------------------------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // These are loosely adapted from libc++'s tests. In general, we don't care a // ton about verifying the return types or results we get, on the assumption // that our standard library is correct. But we care deeply about calling every // overload of every function (so that we verify that everything compiles). // // We do care about the results of complex multiplication / division, since // these use code we've written. #include <stdio.h> // These tests are pretty annoying to write without C++11, so we require that. // // In addition, these tests don't work in C++14 mode with pre-C++14 versions of // libstdc++ (compile errors in <complex>). #if __cplusplus >= 201103L && (__cplusplus < 201402L || STDLIB_VERSION >= 2014) // Support for non-fp std::complex is unspecified: // http://eel.is/c++draft/complex.numbers.general#2.sentence-1 #if defined(__GLIBCXX__) && _GLIBCXX_RELEASE >= 9 // newer versions of libstdc++ do not support implicit conversion from such // types. #undef TEST_NONFLOAT_COMPLEX #else // libc++ and the older versions of libstdc++ have better support for non-float // complex, so we can still test them. #define TEST_NONFLOAT_COMPLEX 1 #endif #include <assert.h> #include <complex> #include <type_traits> template <class T> __device__ double promote( T, typename std::enable_if<std::is_integral<T>::value>::type* = 0); __device__ float promote(float); __device__ double promote(double); __device__ void is_about(float x, float y) { assert(std::abs((x - y) / (x + y)) < 1.e-6); } __device__ void is_about(double x, double y) { assert(std::abs((x - y) / (x + y)) < 1.e-14); } template <class T> __device__ void test_promotion_impl(T x) { assert(std::imag(x) == 0); assert(std::real(x) == x); using Promoted = decltype(promote(x)); assert(std::arg(x) == arg(std::complex<Promoted>(x, 0))); assert(std::conj(x) == conj(std::complex<Promoted>(x, 0))); assert(std::norm(x) == norm(std::complex<Promoted>(x, 0))); #ifndef __GLIBCXX__ // libstdc++'s implementation of proj is completely broken, see // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=61761. assert(std::proj(x) == proj(std::complex<Promoted>(x, 0))); #endif } __device__ void test_promotion() { int vals[] = {0, 1, 10}; for (int i : vals) { test_promotion_impl<float>(i); test_promotion_impl<double>(i); test_promotion_impl<int>(i); test_promotion_impl<unsigned>(i); test_promotion_impl<long long>(i); } } __device__ void test_literals() { #if __cplusplus >= 201402L && STDLIB_VERSION >= 2014 using namespace std::literals::complex_literals; { std::complex<double> c1 = 3.0i; assert(c1 == std::complex<double>(0, 3.0)); auto c2 = 3i; assert(c1 == c2); } { std::complex<float> c1 = 3.0if; assert(c1 == std::complex<float>(0, 3.0)); auto c2 = 3if; assert(c1 == c2); } #endif } template <class T> __device__ void test_assignment_real() { std::complex<T> c; c = 1.5; assert(c.real() == 1.5); assert(c.imag() == 0); } template <class T, class U> __device__ void test_assignment_complex() { std::complex<T> c; std::complex<T> c2(1.5, 2.5); c = c2; assert(c.real() == 1.5); assert(c.imag() == 2.5); } template <class T> __device__ void test_plus_equals() { { std::complex<T> c; c += 1.5; assert(c.real() == 1.5); assert(c.imag() == 0); } { std::complex<T> c; const std::complex<T> c2(1.5, 2.5); c += c2; c += c2; assert(c.real() == 3); assert(c.imag() == 5); std::complex<T> c3; #if TEST_NONFLOAT_COMPLEX c3 = c; std::complex<int> ic(1, 1); c3 += ic; assert(c3.real() == 4); assert(c3.imag() == 6); #endif c3 = c; std::complex<float> fc(1, 1); c3 += fc; assert(c3.real() == 4); assert(c3.imag() == 6); } } template <class T> __device__ void test_minus_equals() { { std::complex<T> c; c -= 1.5; assert(c.real() == -1.5); assert(c.imag() == 0); } { std::complex<T> c; const std::complex<T> c2(1.5, 2.5); assert(c.real() == 0); assert(c.imag() == 0); c -= c2; assert(c.real() == -1.5); assert(c.imag() == -2.5); c -= c2; assert(c.real() == -3); assert(c.imag() == -5); std::complex<T> c3; #if TEST_NONFLOAT_COMPLEX c3 = c; std::complex<int> ic (1,1); c3 -= ic; assert(c3.real() == -4); assert(c3.imag() == -6); #endif c3 = c; std::complex<float> fc (1,1); c3 -= fc; assert(c3.real() == -4); assert(c3.imag() == -6); } } template <class T> __device__ void test_times_equals() { { std::complex<T> c(1); c *= 1.5; c *= 1.5; c *= -1.5; c.imag(2); c *= 1.5; assert(c.real() == -5.0625); assert(c.imag() == 3); } { std::complex<T> c(1); const std::complex<T> c2(1.5, 2.5); c *= c2; c *= c2; assert(c.real() == -4); assert(c.imag() == 7.5); std::complex<T> c3; #if TEST_NONFLOAT_COMPLEX c3 = c; std::complex<int> ic (1,1); c3 *= ic; assert(c3.real() == -11.5); assert(c3.imag() == 3.5); #endif c3 = c; std::complex<float> fc (1,1); c3 *= fc; assert(c3.real() == -11.5); assert(c3.imag() == 3.5); } } template <class T> __device__ void test_divide_equals() { { std::complex<T> c(1); c /= 0.5; c /= 0.5; c /= -0.5; c.imag(2); c /= 0.5; assert(c.real() == -16); assert(c.imag() == 4); } { std::complex<T> c(-4, 7.5); const std::complex<T> c2(1.5, 2.5); assert(c.real() == -4); assert(c.imag() == 7.5); c /= c2; assert(c.real() == 1.5); assert(c.imag() == 2.5); c /= c2; assert(c.real() == 1); assert(c.imag() == 0); std::complex<T> c3; c3 = c; #if TEST_NONFLOAT_COMPLEX std::complex<int> ic (1,1); c3 /= ic; assert(c3.real() == 0.5); assert(c3.imag() == -0.5); #endif c3 = c; std::complex<float> fc (1,1); c3 /= fc; assert(c3.real() == 0.5); assert(c3.imag() == -0.5); } } template <class T> __device__ void test_construct() { { const std::complex<T> c; assert(c.real() == 0); assert(c.imag() == 0); } { const std::complex<T> c = 7.5; assert(c.real() == 7.5); assert(c.imag() == 0); } { const std::complex<T> c(8.5); assert(c.real() == 8.5); assert(c.imag() == 0); } { const std::complex<T> c(10.5, -9.5); assert(c.real() == 10.5); assert(c.imag() == -9.5); } #if __cplusplus >= 201103L { constexpr std::complex<T> c; static_assert(c.real() == 0, ""); static_assert(c.imag() == 0, ""); } { constexpr std::complex<T> c = 7.5; static_assert(c.real() == 7.5, ""); static_assert(c.imag() == 0, ""); } { constexpr std::complex<T> c(8.5); static_assert(c.real() == 8.5, ""); static_assert(c.imag() == 0, ""); } { constexpr std::complex<T> c(10.5, -9.5); static_assert(c.real() == 10.5, ""); static_assert(c.imag() == -9.5, ""); } #endif } template <class T> __device__ void test_construct_integral() { #if __cplusplus >= 201402L constexpr std::complex<T> c1; static_assert(c1.real() == 0, ""); static_assert(c1.imag() == 0, ""); constexpr std::complex<T> c2(3); static_assert(c2.real() == 3, ""); static_assert(c2.imag() == 0, ""); constexpr std::complex<T> c3(3, 4); static_assert(c3.real() == 3, ""); static_assert(c3.imag() == 4, ""); #endif } template <class T> __device__ void test_set_real_imag() { std::complex<T> c; c.real(3.5); assert(c.real() == 3.5); assert(c.imag() == 0); c.imag(4.5); assert(c.real() == 3.5); assert(c.imag() == 4.5); } template <class T> __device__ void test_transcendentals_etc() { assert(sin(std::complex<T>(0, 0)) == std::complex<T>(0, 0)); assert(sinh(std::complex<T>(0, 0)) == std::complex<T>(0, 0)); assert(asin(std::complex<T>(0, 0)) == std::complex<T>(0, 0)); assert(asinh(std::complex<T>(0, 0)) == std::complex<T>(0, 0)); assert(cos(std::complex<T>(0, 0)) == std::complex<T>(1, 0)); assert(cosh(std::complex<T>(0, 0)) == std::complex<T>(1, 0)); { std::complex<T> c = acos(std::complex<T>(0, 0)); is_about(real(c), T(M_PI_2)); assert(std::abs(imag(c)) < 1.e-6); } { std::complex<T> c = acosh(std::complex<T>(0, 0)); assert(std::abs(real(c)) < 1.e-6); is_about(imag(c), T(M_PI_2)); } assert(tan(std::complex<T>(0, 0)) == std::complex<T>(0, 0)); assert(tanh(std::complex<T>(0, 0)) == std::complex<T>(0, 0)); assert(atan(std::complex<T>(0, 0)) == std::complex<T>(0, 0)); assert(atanh(std::complex<T>(0, 0)) == std::complex<T>(0, 0)); assert(exp(std::complex<T>(0, 0)) == std::complex<T>(1, 0)); assert(log10(std::complex<T>(0, 0)) == std::complex<T>(-INFINITY, 0)); assert(log(std::complex<T>(0, 0)) == std::complex<T>(-INFINITY, 0)); { std::complex<T> c = pow(std::complex<T>(2, 3), std::complex<T>(2, 0)); is_about(real(c), -5); is_about(imag(c), 12); } { std::complex<T> c = pow(std::complex<T>(2, 3), T(2)); is_about(real(c), -5); is_about(imag(c), 12); } { std::complex<T> c = pow(T(2), std::complex<T>(2)); is_about(real(c), 4); assert(std::abs(imag(c)) < 1.e-6); } { std::complex<T> c = sqrt(std::complex<T>(64, 0)); is_about(real(c), 8); assert(std::abs(imag(c)) < 1.e-6); } // "etc." assert(abs(std::complex<T>(3, 4)) == 5); assert(norm(std::complex<T>(3, 4)) == 25); assert(arg(std::complex<T>(1, 0)) == 0); assert(conj(std::complex<T>(1, 2)) == std::complex<T>(1, -2)); assert(std::polar(T(0)) == std::complex<T>(0, 0)); assert(std::polar(T(1)) == std::complex<T>(1, 0)); assert(std::polar(T(100)) == std::complex<T>(100, 0)); assert(std::polar(T(0), T(0)) == std::complex<T>(0, 0)); assert(std::polar(T(1), T(0)) == std::complex<T>(1, 0)); assert(std::polar(T(100), T(0)) == std::complex<T>(100, 0)); #ifndef __GLIBCXX__ // libstdc++'s implementation of proj is completely broken, see // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=61761. assert(std::proj(std::complex<T>(1, 2)) == std::complex<T>(1, 2)); assert(std::proj(std::complex<T>(-1, 2)) == std::complex<T>(-1, 2)); assert(std::proj(std::complex<T>(1, -2)) == std::complex<T>(1, -2)); assert(std::proj(std::complex<T>(-1, -2)) == std::complex<T>(-1, -2)); #endif } __global__ void tests() { test_promotion(); test_literals(); test_assignment_real<float>(); test_assignment_real<double>(); test_assignment_complex<float, float>(); test_assignment_complex<float, double>(); test_assignment_complex<double, float>(); test_assignment_complex<double, double>(); test_plus_equals<float>(); test_plus_equals<double>(); test_minus_equals<float>(); test_minus_equals<double>(); test_times_equals<float>(); test_times_equals<double>(); test_divide_equals<float>(); test_divide_equals<double>(); test_construct<float>(); test_construct<double>(); test_construct_integral<int>(); test_set_real_imag<float>(); test_set_real_imag<double>(); test_transcendentals_etc<float>(); test_transcendentals_etc<double>(); } #else __global__ void tests() {} #endif int main() { hipLaunchKernelGGL(( tests), dim3(1), dim3(1), 0, 0, ); hipError_t err = hipDeviceSynchronize(); if (err != hipSuccess) { printf("CUDA error %d\n", (int)err); return 1; } printf("Success!\n"); return 0; }
c9eec4a9e005cc74a0d462a5e08323eef82b1f5f.cu
//===----------------------------------------------------------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // These are loosely adapted from libc++'s tests. In general, we don't care a // ton about verifying the return types or results we get, on the assumption // that our standard library is correct. But we care deeply about calling every // overload of every function (so that we verify that everything compiles). // // We do care about the results of complex multiplication / division, since // these use code we've written. #include <stdio.h> // These tests are pretty annoying to write without C++11, so we require that. // // In addition, these tests don't work in C++14 mode with pre-C++14 versions of // libstdc++ (compile errors in <complex>). #if __cplusplus >= 201103L && (__cplusplus < 201402L || STDLIB_VERSION >= 2014) // Support for non-fp std::complex is unspecified: // http://eel.is/c++draft/complex.numbers.general#2.sentence-1 #if defined(__GLIBCXX__) && _GLIBCXX_RELEASE >= 9 // newer versions of libstdc++ do not support implicit conversion from such // types. #undef TEST_NONFLOAT_COMPLEX #else // libc++ and the older versions of libstdc++ have better support for non-float // complex, so we can still test them. #define TEST_NONFLOAT_COMPLEX 1 #endif #include <assert.h> #include <complex> #include <type_traits> template <class T> __device__ double promote( T, typename std::enable_if<std::is_integral<T>::value>::type* = 0); __device__ float promote(float); __device__ double promote(double); __device__ void is_about(float x, float y) { assert(std::abs((x - y) / (x + y)) < 1.e-6); } __device__ void is_about(double x, double y) { assert(std::abs((x - y) / (x + y)) < 1.e-14); } template <class T> __device__ void test_promotion_impl(T x) { assert(std::imag(x) == 0); assert(std::real(x) == x); using Promoted = decltype(promote(x)); assert(std::arg(x) == arg(std::complex<Promoted>(x, 0))); assert(std::conj(x) == conj(std::complex<Promoted>(x, 0))); assert(std::norm(x) == norm(std::complex<Promoted>(x, 0))); #ifndef __GLIBCXX__ // libstdc++'s implementation of proj is completely broken, see // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=61761. assert(std::proj(x) == proj(std::complex<Promoted>(x, 0))); #endif } __device__ void test_promotion() { int vals[] = {0, 1, 10}; for (int i : vals) { test_promotion_impl<float>(i); test_promotion_impl<double>(i); test_promotion_impl<int>(i); test_promotion_impl<unsigned>(i); test_promotion_impl<long long>(i); } } __device__ void test_literals() { #if __cplusplus >= 201402L && STDLIB_VERSION >= 2014 using namespace std::literals::complex_literals; { std::complex<double> c1 = 3.0i; assert(c1 == std::complex<double>(0, 3.0)); auto c2 = 3i; assert(c1 == c2); } { std::complex<float> c1 = 3.0if; assert(c1 == std::complex<float>(0, 3.0)); auto c2 = 3if; assert(c1 == c2); } #endif } template <class T> __device__ void test_assignment_real() { std::complex<T> c; c = 1.5; assert(c.real() == 1.5); assert(c.imag() == 0); } template <class T, class U> __device__ void test_assignment_complex() { std::complex<T> c; std::complex<T> c2(1.5, 2.5); c = c2; assert(c.real() == 1.5); assert(c.imag() == 2.5); } template <class T> __device__ void test_plus_equals() { { std::complex<T> c; c += 1.5; assert(c.real() == 1.5); assert(c.imag() == 0); } { std::complex<T> c; const std::complex<T> c2(1.5, 2.5); c += c2; c += c2; assert(c.real() == 3); assert(c.imag() == 5); std::complex<T> c3; #if TEST_NONFLOAT_COMPLEX c3 = c; std::complex<int> ic(1, 1); c3 += ic; assert(c3.real() == 4); assert(c3.imag() == 6); #endif c3 = c; std::complex<float> fc(1, 1); c3 += fc; assert(c3.real() == 4); assert(c3.imag() == 6); } } template <class T> __device__ void test_minus_equals() { { std::complex<T> c; c -= 1.5; assert(c.real() == -1.5); assert(c.imag() == 0); } { std::complex<T> c; const std::complex<T> c2(1.5, 2.5); assert(c.real() == 0); assert(c.imag() == 0); c -= c2; assert(c.real() == -1.5); assert(c.imag() == -2.5); c -= c2; assert(c.real() == -3); assert(c.imag() == -5); std::complex<T> c3; #if TEST_NONFLOAT_COMPLEX c3 = c; std::complex<int> ic (1,1); c3 -= ic; assert(c3.real() == -4); assert(c3.imag() == -6); #endif c3 = c; std::complex<float> fc (1,1); c3 -= fc; assert(c3.real() == -4); assert(c3.imag() == -6); } } template <class T> __device__ void test_times_equals() { { std::complex<T> c(1); c *= 1.5; c *= 1.5; c *= -1.5; c.imag(2); c *= 1.5; assert(c.real() == -5.0625); assert(c.imag() == 3); } { std::complex<T> c(1); const std::complex<T> c2(1.5, 2.5); c *= c2; c *= c2; assert(c.real() == -4); assert(c.imag() == 7.5); std::complex<T> c3; #if TEST_NONFLOAT_COMPLEX c3 = c; std::complex<int> ic (1,1); c3 *= ic; assert(c3.real() == -11.5); assert(c3.imag() == 3.5); #endif c3 = c; std::complex<float> fc (1,1); c3 *= fc; assert(c3.real() == -11.5); assert(c3.imag() == 3.5); } } template <class T> __device__ void test_divide_equals() { { std::complex<T> c(1); c /= 0.5; c /= 0.5; c /= -0.5; c.imag(2); c /= 0.5; assert(c.real() == -16); assert(c.imag() == 4); } { std::complex<T> c(-4, 7.5); const std::complex<T> c2(1.5, 2.5); assert(c.real() == -4); assert(c.imag() == 7.5); c /= c2; assert(c.real() == 1.5); assert(c.imag() == 2.5); c /= c2; assert(c.real() == 1); assert(c.imag() == 0); std::complex<T> c3; c3 = c; #if TEST_NONFLOAT_COMPLEX std::complex<int> ic (1,1); c3 /= ic; assert(c3.real() == 0.5); assert(c3.imag() == -0.5); #endif c3 = c; std::complex<float> fc (1,1); c3 /= fc; assert(c3.real() == 0.5); assert(c3.imag() == -0.5); } } template <class T> __device__ void test_construct() { { const std::complex<T> c; assert(c.real() == 0); assert(c.imag() == 0); } { const std::complex<T> c = 7.5; assert(c.real() == 7.5); assert(c.imag() == 0); } { const std::complex<T> c(8.5); assert(c.real() == 8.5); assert(c.imag() == 0); } { const std::complex<T> c(10.5, -9.5); assert(c.real() == 10.5); assert(c.imag() == -9.5); } #if __cplusplus >= 201103L { constexpr std::complex<T> c; static_assert(c.real() == 0, ""); static_assert(c.imag() == 0, ""); } { constexpr std::complex<T> c = 7.5; static_assert(c.real() == 7.5, ""); static_assert(c.imag() == 0, ""); } { constexpr std::complex<T> c(8.5); static_assert(c.real() == 8.5, ""); static_assert(c.imag() == 0, ""); } { constexpr std::complex<T> c(10.5, -9.5); static_assert(c.real() == 10.5, ""); static_assert(c.imag() == -9.5, ""); } #endif } template <class T> __device__ void test_construct_integral() { #if __cplusplus >= 201402L constexpr std::complex<T> c1; static_assert(c1.real() == 0, ""); static_assert(c1.imag() == 0, ""); constexpr std::complex<T> c2(3); static_assert(c2.real() == 3, ""); static_assert(c2.imag() == 0, ""); constexpr std::complex<T> c3(3, 4); static_assert(c3.real() == 3, ""); static_assert(c3.imag() == 4, ""); #endif } template <class T> __device__ void test_set_real_imag() { std::complex<T> c; c.real(3.5); assert(c.real() == 3.5); assert(c.imag() == 0); c.imag(4.5); assert(c.real() == 3.5); assert(c.imag() == 4.5); } template <class T> __device__ void test_transcendentals_etc() { assert(sin(std::complex<T>(0, 0)) == std::complex<T>(0, 0)); assert(sinh(std::complex<T>(0, 0)) == std::complex<T>(0, 0)); assert(asin(std::complex<T>(0, 0)) == std::complex<T>(0, 0)); assert(asinh(std::complex<T>(0, 0)) == std::complex<T>(0, 0)); assert(cos(std::complex<T>(0, 0)) == std::complex<T>(1, 0)); assert(cosh(std::complex<T>(0, 0)) == std::complex<T>(1, 0)); { std::complex<T> c = acos(std::complex<T>(0, 0)); is_about(real(c), T(M_PI_2)); assert(std::abs(imag(c)) < 1.e-6); } { std::complex<T> c = acosh(std::complex<T>(0, 0)); assert(std::abs(real(c)) < 1.e-6); is_about(imag(c), T(M_PI_2)); } assert(tan(std::complex<T>(0, 0)) == std::complex<T>(0, 0)); assert(tanh(std::complex<T>(0, 0)) == std::complex<T>(0, 0)); assert(atan(std::complex<T>(0, 0)) == std::complex<T>(0, 0)); assert(atanh(std::complex<T>(0, 0)) == std::complex<T>(0, 0)); assert(exp(std::complex<T>(0, 0)) == std::complex<T>(1, 0)); assert(log10(std::complex<T>(0, 0)) == std::complex<T>(-INFINITY, 0)); assert(log(std::complex<T>(0, 0)) == std::complex<T>(-INFINITY, 0)); { std::complex<T> c = pow(std::complex<T>(2, 3), std::complex<T>(2, 0)); is_about(real(c), -5); is_about(imag(c), 12); } { std::complex<T> c = pow(std::complex<T>(2, 3), T(2)); is_about(real(c), -5); is_about(imag(c), 12); } { std::complex<T> c = pow(T(2), std::complex<T>(2)); is_about(real(c), 4); assert(std::abs(imag(c)) < 1.e-6); } { std::complex<T> c = sqrt(std::complex<T>(64, 0)); is_about(real(c), 8); assert(std::abs(imag(c)) < 1.e-6); } // "etc." assert(abs(std::complex<T>(3, 4)) == 5); assert(norm(std::complex<T>(3, 4)) == 25); assert(arg(std::complex<T>(1, 0)) == 0); assert(conj(std::complex<T>(1, 2)) == std::complex<T>(1, -2)); assert(std::polar(T(0)) == std::complex<T>(0, 0)); assert(std::polar(T(1)) == std::complex<T>(1, 0)); assert(std::polar(T(100)) == std::complex<T>(100, 0)); assert(std::polar(T(0), T(0)) == std::complex<T>(0, 0)); assert(std::polar(T(1), T(0)) == std::complex<T>(1, 0)); assert(std::polar(T(100), T(0)) == std::complex<T>(100, 0)); #ifndef __GLIBCXX__ // libstdc++'s implementation of proj is completely broken, see // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=61761. assert(std::proj(std::complex<T>(1, 2)) == std::complex<T>(1, 2)); assert(std::proj(std::complex<T>(-1, 2)) == std::complex<T>(-1, 2)); assert(std::proj(std::complex<T>(1, -2)) == std::complex<T>(1, -2)); assert(std::proj(std::complex<T>(-1, -2)) == std::complex<T>(-1, -2)); #endif } __global__ void tests() { test_promotion(); test_literals(); test_assignment_real<float>(); test_assignment_real<double>(); test_assignment_complex<float, float>(); test_assignment_complex<float, double>(); test_assignment_complex<double, float>(); test_assignment_complex<double, double>(); test_plus_equals<float>(); test_plus_equals<double>(); test_minus_equals<float>(); test_minus_equals<double>(); test_times_equals<float>(); test_times_equals<double>(); test_divide_equals<float>(); test_divide_equals<double>(); test_construct<float>(); test_construct<double>(); test_construct_integral<int>(); test_set_real_imag<float>(); test_set_real_imag<double>(); test_transcendentals_etc<float>(); test_transcendentals_etc<double>(); } #else __global__ void tests() {} #endif int main() { tests<<<1, 1>>>(); cudaError_t err = cudaDeviceSynchronize(); if (err != cudaSuccess) { printf("CUDA error %d\n", (int)err); return 1; } printf("Success!\n"); return 0; }
71c889e37ae1580ddc35c1a37e9ff175dd246611.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* * NOTE: this file is generated by ivf_pq_compute_similarity_00_generate.py * * Make changes there and run in this directory: * * > python ivf_pq_compute_similarity_00_generate.py * */ #include <raft/neighbors/detail/ivf_pq_compute_similarity-inl.cuh> #include <raft/neighbors/detail/ivf_pq_fp_8bit.cuh> #define instantiate_raft_neighbors_ivf_pq_detail_compute_similarity_select( \ OutT, LutT, IvfSampleFilterT) \ template auto \ raft::neighbors::ivf_pq::detail::compute_similarity_select<OutT, LutT, IvfSampleFilterT>( \ const hipDeviceProp_t& dev_props, \ bool manage_local_topk, \ int locality_hint, \ double preferred_shmem_carveout, \ uint32_t pq_bits, \ uint32_t pq_dim, \ uint32_t precomp_data_count, \ uint32_t n_queries, \ uint32_t n_probes, \ uint32_t topk) \ ->raft::neighbors::ivf_pq::detail::selected<OutT, LutT, IvfSampleFilterT>; \ \ template void \ raft::neighbors::ivf_pq::detail::compute_similarity_run<OutT, LutT, IvfSampleFilterT>( \ raft::neighbors::ivf_pq::detail::selected<OutT, LutT, IvfSampleFilterT> s, \ rmm::cuda_stream_view stream, \ uint32_t dim, \ uint32_t n_probes, \ uint32_t pq_dim, \ uint32_t n_queries, \ uint32_t queries_offset, \ raft::distance::DistanceType metric, \ raft::neighbors::ivf_pq::codebook_gen codebook_kind, \ uint32_t topk, \ uint32_t max_samples, \ const float* cluster_centers, \ const float* pq_centers, \ const uint8_t* const* pq_dataset, \ const uint32_t* cluster_labels, \ const uint32_t* _chunk_indices, \ const float* queries, \ const uint32_t* index_list, \ float* query_kths, \ IvfSampleFilterT sample_filter, \ LutT* lut_scores, \ OutT* _out_scores, \ uint32_t* _out_indices); #define COMMA , instantiate_raft_neighbors_ivf_pq_detail_compute_similarity_select( float, raft::neighbors::ivf_pq::detail::fp_8bit<5u COMMA false>, raft::neighbors::filtering::none_ivf_sample_filter); #undef COMMA #undef instantiate_raft_neighbors_ivf_pq_detail_compute_similarity_select
71c889e37ae1580ddc35c1a37e9ff175dd246611.cu
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* * NOTE: this file is generated by ivf_pq_compute_similarity_00_generate.py * * Make changes there and run in this directory: * * > python ivf_pq_compute_similarity_00_generate.py * */ #include <raft/neighbors/detail/ivf_pq_compute_similarity-inl.cuh> #include <raft/neighbors/detail/ivf_pq_fp_8bit.cuh> #define instantiate_raft_neighbors_ivf_pq_detail_compute_similarity_select( \ OutT, LutT, IvfSampleFilterT) \ template auto \ raft::neighbors::ivf_pq::detail::compute_similarity_select<OutT, LutT, IvfSampleFilterT>( \ const cudaDeviceProp& dev_props, \ bool manage_local_topk, \ int locality_hint, \ double preferred_shmem_carveout, \ uint32_t pq_bits, \ uint32_t pq_dim, \ uint32_t precomp_data_count, \ uint32_t n_queries, \ uint32_t n_probes, \ uint32_t topk) \ ->raft::neighbors::ivf_pq::detail::selected<OutT, LutT, IvfSampleFilterT>; \ \ template void \ raft::neighbors::ivf_pq::detail::compute_similarity_run<OutT, LutT, IvfSampleFilterT>( \ raft::neighbors::ivf_pq::detail::selected<OutT, LutT, IvfSampleFilterT> s, \ rmm::cuda_stream_view stream, \ uint32_t dim, \ uint32_t n_probes, \ uint32_t pq_dim, \ uint32_t n_queries, \ uint32_t queries_offset, \ raft::distance::DistanceType metric, \ raft::neighbors::ivf_pq::codebook_gen codebook_kind, \ uint32_t topk, \ uint32_t max_samples, \ const float* cluster_centers, \ const float* pq_centers, \ const uint8_t* const* pq_dataset, \ const uint32_t* cluster_labels, \ const uint32_t* _chunk_indices, \ const float* queries, \ const uint32_t* index_list, \ float* query_kths, \ IvfSampleFilterT sample_filter, \ LutT* lut_scores, \ OutT* _out_scores, \ uint32_t* _out_indices); #define COMMA , instantiate_raft_neighbors_ivf_pq_detail_compute_similarity_select( float, raft::neighbors::ivf_pq::detail::fp_8bit<5u COMMA false>, raft::neighbors::filtering::none_ivf_sample_filter); #undef COMMA #undef instantiate_raft_neighbors_ivf_pq_detail_compute_similarity_select
4e3ce76831dd3d2f783ec7ec35769c0a88bbe949.hip
// !!! This is a file automatically generated by hipify!!! /* Using cuSPARSE for matrix vector multplication of completed affinity */ #include <hip/hip_runtime.h> #include <hipsparse.h> #include "utils.h" int main(int argc, char *argv[]) { /*********************************************** * initialize program's input parameters * ***********************************************/ double alpha = 1; double beta = 1; double norm = 0; int bin_width = 10; hipsparseHandle_t handle = 0; hipsparseMatDescr_t descr = 0; hipsparseCreate(&handle); hipsparseCreateMatDescr(&descr); h_vec_t<int> distance_1; int num_feat_1 = atoi(argv[2]); ReadMatrix(distance_1, argv[1], num_feat_1); //#ifdef ACCELERATE // std::cout << "CUDA" << std::endl; // d_vec_t<unsigned> d_distance_1 = distance_1; //#endif h_vec_t<double> distance_2; int num_feat_2 = atoi(argv[4]); ReadMatrix(distance_2, argv[3], num_feat_2); //#ifdef ACCELERATE // d_vec_t<double> d_distance_2 = distance_2; //#endif int num_iters = 1; if (8 == argc) num_iters = atoi(argv[7]); /************************************************** * find unique values of distance1 and their indices ***************************************************/ //#ifdef ACCELERATE // d_vec_t<unsigned> d_uniq_keys = FindUniques(d_distance_1); // d_uniq_keys.erase( // remove_if(d_uniq_keys.begin(), d_uniq_keys.end(), // IsLessThan(bin_width)), // d_uniq_keys.end()); //#else std::cout << "HOST" << std::endl; h_vec_t<unsigned> uniq_keys = FindUniques(distance_1); uniq_keys.erase( remove_if(uniq_keys.begin(), uniq_keys.end(), IsLessThan(bin_width)), uniq_keys.end()); //#endif // //#ifdef ACCELERATE // d_vec_t<int> *d_keys_idcs = new d_vec_t<int>[d_uniq_keys.size()]; // for (unsigned i = 0; i < d_uniq_keys.size(); ++i) { // d_keys_idcs[i].resize(d_distance_1.size()); // } //#else h_vec_t<int> *keys_idcs = new h_vec_t<int>[uniq_keys.size()]; for (unsigned i = 0; i < uniq_keys.size(); ++i) { keys_idcs[i].resize(distance_1.size()); } //#endif counting_iterator<unsigned> first_idx(0); counting_iterator<unsigned> last_idx1 = first_idx + distance_1.size(); //#ifdef ACCELERATE // for (unsigned i = 0; i < d_uniq_keys.size(); ++i) { // transform(ZIP2(d_distance_1.begin(), first_idx), // ZIP2(d_distance_1.end(), last_idx), d_keys_idcs[i].begin(), // IsEqual(d_uniq_keys[i])); // // d_keys_idcs[i].erase( // remove(d_keys_idcs[i].begin(), d_keys_idcs[i].end(), -1), // d_keys_idcs[i].end()); // } //#else for (unsigned i = 0; i < uniq_keys.size(); ++i) { transform(ZIP2(distance_1.begin(), first_idx), ZIP2(distance_1.end(), last_idx1), keys_idcs[i].begin(), IsEqual(uniq_keys[i])); keys_idcs[i].erase(remove(keys_idcs[i].begin(), keys_idcs[i].end(), -1), keys_idcs[i].end()); } //#endif /*************************************************** * construct COO sparse respresentative of affinity * ***************************************************/ unsigned len_affinity_block = num_feat_2 * num_feat_2; counting_iterator<unsigned> last_idx2 = first_idx + len_affinity_block; h_vec_t<double> *h_coo_val = new h_vec_t<double>[uniq_keys.size()]; h_vec_t<int> *h_coo_row = new h_vec_t<int>[uniq_keys.size()]; h_vec_t<int> *h_coo_col = new h_vec_t<int>[uniq_keys.size()]; d_vec_t<double> *d_coo_val = new d_vec_t<double>[uniq_keys.size()]; d_vec_t<int> *d_coo_row = new d_vec_t<int>[uniq_keys.size()]; d_vec_t<int> *d_coo_col = new d_vec_t<int>[uniq_keys.size()]; d_vec_t<int> *d_csr_row = new d_vec_t<int>[uniq_keys.size()]; for (int i = 0; i < uniq_keys.size(); ++i) { int key = uniq_keys[i]; h_coo_val[i].resize(len_affinity_block); h_coo_row[i].resize(len_affinity_block); h_coo_col[i].resize(len_affinity_block); for_each( ZIP2(ZIP2(distance_2.begin(), first_idx), ZIP3(h_coo_val[i].begin(), h_coo_row[i].begin(), h_coo_col[i].begin())), ZIP2(ZIP2(distance_2.end(), last_idx2), ZIP3(h_coo_val[i].end(), h_coo_row[i].end(), h_coo_col[i].end())), createCOO(key, num_feat_2)); h_coo_val[i].erase( remove_if(h_coo_val[i].begin(), h_coo_val[i].end(), IsLessThan(0)), h_coo_val[i].end()); h_coo_row[i].erase( remove_if(h_coo_row[i].begin(), h_coo_row[i].end(), IsLessThan(0)), h_coo_row[i].end()); h_coo_col[i].erase( remove_if(h_coo_col[i].begin(), h_coo_col[i].end(), IsLessThan(0)), h_coo_col[i].end()); d_coo_val[i] = h_coo_val[i]; d_coo_row[i] = h_coo_row[i]; d_coo_col[i] = h_coo_col[i]; d_csr_row[i].resize(num_feat_2 + 1); hipsparseXcoo2csr(handle, raw_pointer_cast(d_coo_row[i].data()), d_coo_row[i].size(), num_feat_2, raw_pointer_cast(d_csr_row[i].data()), HIPSPARSE_INDEX_BASE_ZERO); } // make_tuple(h_coo_val, h_coo_row, h_coo_col); std::cout << "affinity" << std::endl; for (int i = 0; i < uniq_keys.size(); ++i) { std::cout << " unq keys: " << uniq_keys[i] << std::endl; std::cout << " values " << " " << "columns" << " " << "rows" << std::endl; for (int j = 0; j < h_coo_val[i].size(); ++j) { std::cout << h_coo_val[i][j] << " " << h_coo_col[i][j] << " " << h_coo_row[i][j] << std::endl; } } std::cout << std::endl; // hipsparseDestroy(handle); /****************************************************** * initialize eigen vectors * ******************************************************/ // hipsparseCreate(&handle); int len_eigen_vec = num_feat_1 * num_feat_2; d_vec_t<double> d_eigen_vec_new(len_eigen_vec); d_vec_t<double> d_eigen_vec_old(len_eigen_vec); norm = 1.0 / sqrt(len_eigen_vec); fill(d_eigen_vec_old.begin(), d_eigen_vec_old.end(), norm); /****************************************************** * compute eigen vectors * ******************************************************/ for (int iter = 0; iter < num_iters; ++iter) { // Create a stream for each operation hipStream_t *streams = (hipStream_t *)malloc(uniq_keys.size() * sizeof(hipStream_t)); for (int i = 0; i < uniq_keys.size(); i++) hipStreamCreate(&streams[i]); for (int i = 0; i < uniq_keys.size(); ++i) { hipsparseSetStream(handle, streams[i]); for (int j = 0; j < keys_idcs[i].size(); ++j) { int row = keys_idcs[i][j] / num_feat_1; int col = keys_idcs[i][j] % num_feat_1; hipsparseDcsrmv( handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, num_feat_2, num_feat_2, d_coo_val[i].size(), &alpha, descr, raw_pointer_cast(d_coo_val[i].data()), raw_pointer_cast(d_csr_row[i].data()), raw_pointer_cast(d_coo_col[i].data()), raw_pointer_cast(d_eigen_vec_old.data()) + col * num_feat_2, &beta, raw_pointer_cast(d_eigen_vec_new.data()) + row * num_feat_2); } } double init = 0; norm = std::sqrt(transform_reduce(d_eigen_vec_new.begin(), d_eigen_vec_new.end(), square(), init, thrust::plus<double>())); transform(d_eigen_vec_new.begin(), d_eigen_vec_new.end(), d_eigen_vec_old.begin(), division(norm)); fill(d_eigen_vec_new.begin(), d_eigen_vec_new.end(), 0); } for (int i = 0; i < d_eigen_vec_old.size(); i++) { std::cout << "d_eigen new value = " << d_eigen_vec_new[i] << " "; std::cout << "d_eigen old value = " << d_eigen_vec_old[i] << std::endl; } hipsparseDestroy(handle); return 0; }
4e3ce76831dd3d2f783ec7ec35769c0a88bbe949.cu
/* Using cuSPARSE for matrix vector multplication of completed affinity */ #include <cuda_runtime.h> #include <cusparse.h> #include "utils.h" int main(int argc, char *argv[]) { /*********************************************** * initialize program's input parameters * ***********************************************/ double alpha = 1; double beta = 1; double norm = 0; int bin_width = 10; cusparseHandle_t handle = 0; cusparseMatDescr_t descr = 0; cusparseCreate(&handle); cusparseCreateMatDescr(&descr); h_vec_t<int> distance_1; int num_feat_1 = atoi(argv[2]); ReadMatrix(distance_1, argv[1], num_feat_1); //#ifdef ACCELERATE // std::cout << "CUDA" << std::endl; // d_vec_t<unsigned> d_distance_1 = distance_1; //#endif h_vec_t<double> distance_2; int num_feat_2 = atoi(argv[4]); ReadMatrix(distance_2, argv[3], num_feat_2); //#ifdef ACCELERATE // d_vec_t<double> d_distance_2 = distance_2; //#endif int num_iters = 1; if (8 == argc) num_iters = atoi(argv[7]); /************************************************** * find unique values of distance1 and their indices ***************************************************/ //#ifdef ACCELERATE // d_vec_t<unsigned> d_uniq_keys = FindUniques(d_distance_1); // d_uniq_keys.erase( // remove_if(d_uniq_keys.begin(), d_uniq_keys.end(), // IsLessThan(bin_width)), // d_uniq_keys.end()); //#else std::cout << "HOST" << std::endl; h_vec_t<unsigned> uniq_keys = FindUniques(distance_1); uniq_keys.erase( remove_if(uniq_keys.begin(), uniq_keys.end(), IsLessThan(bin_width)), uniq_keys.end()); //#endif // //#ifdef ACCELERATE // d_vec_t<int> *d_keys_idcs = new d_vec_t<int>[d_uniq_keys.size()]; // for (unsigned i = 0; i < d_uniq_keys.size(); ++i) { // d_keys_idcs[i].resize(d_distance_1.size()); // } //#else h_vec_t<int> *keys_idcs = new h_vec_t<int>[uniq_keys.size()]; for (unsigned i = 0; i < uniq_keys.size(); ++i) { keys_idcs[i].resize(distance_1.size()); } //#endif counting_iterator<unsigned> first_idx(0); counting_iterator<unsigned> last_idx1 = first_idx + distance_1.size(); //#ifdef ACCELERATE // for (unsigned i = 0; i < d_uniq_keys.size(); ++i) { // transform(ZIP2(d_distance_1.begin(), first_idx), // ZIP2(d_distance_1.end(), last_idx), d_keys_idcs[i].begin(), // IsEqual(d_uniq_keys[i])); // // d_keys_idcs[i].erase( // remove(d_keys_idcs[i].begin(), d_keys_idcs[i].end(), -1), // d_keys_idcs[i].end()); // } //#else for (unsigned i = 0; i < uniq_keys.size(); ++i) { transform(ZIP2(distance_1.begin(), first_idx), ZIP2(distance_1.end(), last_idx1), keys_idcs[i].begin(), IsEqual(uniq_keys[i])); keys_idcs[i].erase(remove(keys_idcs[i].begin(), keys_idcs[i].end(), -1), keys_idcs[i].end()); } //#endif /*************************************************** * construct COO sparse respresentative of affinity * ***************************************************/ unsigned len_affinity_block = num_feat_2 * num_feat_2; counting_iterator<unsigned> last_idx2 = first_idx + len_affinity_block; h_vec_t<double> *h_coo_val = new h_vec_t<double>[uniq_keys.size()]; h_vec_t<int> *h_coo_row = new h_vec_t<int>[uniq_keys.size()]; h_vec_t<int> *h_coo_col = new h_vec_t<int>[uniq_keys.size()]; d_vec_t<double> *d_coo_val = new d_vec_t<double>[uniq_keys.size()]; d_vec_t<int> *d_coo_row = new d_vec_t<int>[uniq_keys.size()]; d_vec_t<int> *d_coo_col = new d_vec_t<int>[uniq_keys.size()]; d_vec_t<int> *d_csr_row = new d_vec_t<int>[uniq_keys.size()]; for (int i = 0; i < uniq_keys.size(); ++i) { int key = uniq_keys[i]; h_coo_val[i].resize(len_affinity_block); h_coo_row[i].resize(len_affinity_block); h_coo_col[i].resize(len_affinity_block); for_each( ZIP2(ZIP2(distance_2.begin(), first_idx), ZIP3(h_coo_val[i].begin(), h_coo_row[i].begin(), h_coo_col[i].begin())), ZIP2(ZIP2(distance_2.end(), last_idx2), ZIP3(h_coo_val[i].end(), h_coo_row[i].end(), h_coo_col[i].end())), createCOO(key, num_feat_2)); h_coo_val[i].erase( remove_if(h_coo_val[i].begin(), h_coo_val[i].end(), IsLessThan(0)), h_coo_val[i].end()); h_coo_row[i].erase( remove_if(h_coo_row[i].begin(), h_coo_row[i].end(), IsLessThan(0)), h_coo_row[i].end()); h_coo_col[i].erase( remove_if(h_coo_col[i].begin(), h_coo_col[i].end(), IsLessThan(0)), h_coo_col[i].end()); d_coo_val[i] = h_coo_val[i]; d_coo_row[i] = h_coo_row[i]; d_coo_col[i] = h_coo_col[i]; d_csr_row[i].resize(num_feat_2 + 1); cusparseXcoo2csr(handle, raw_pointer_cast(d_coo_row[i].data()), d_coo_row[i].size(), num_feat_2, raw_pointer_cast(d_csr_row[i].data()), CUSPARSE_INDEX_BASE_ZERO); } // make_tuple(h_coo_val, h_coo_row, h_coo_col); std::cout << "affinity" << std::endl; for (int i = 0; i < uniq_keys.size(); ++i) { std::cout << " unq keys: " << uniq_keys[i] << std::endl; std::cout << " values " << " " << "columns" << " " << "rows" << std::endl; for (int j = 0; j < h_coo_val[i].size(); ++j) { std::cout << h_coo_val[i][j] << " " << h_coo_col[i][j] << " " << h_coo_row[i][j] << std::endl; } } std::cout << std::endl; // cusparseDestroy(handle); /****************************************************** * initialize eigen vectors * ******************************************************/ // cusparseCreate(&handle); int len_eigen_vec = num_feat_1 * num_feat_2; d_vec_t<double> d_eigen_vec_new(len_eigen_vec); d_vec_t<double> d_eigen_vec_old(len_eigen_vec); norm = 1.0 / sqrt(len_eigen_vec); fill(d_eigen_vec_old.begin(), d_eigen_vec_old.end(), norm); /****************************************************** * compute eigen vectors * ******************************************************/ for (int iter = 0; iter < num_iters; ++iter) { // Create a stream for each operation cudaStream_t *streams = (cudaStream_t *)malloc(uniq_keys.size() * sizeof(cudaStream_t)); for (int i = 0; i < uniq_keys.size(); i++) cudaStreamCreate(&streams[i]); for (int i = 0; i < uniq_keys.size(); ++i) { cusparseSetStream(handle, streams[i]); for (int j = 0; j < keys_idcs[i].size(); ++j) { int row = keys_idcs[i][j] / num_feat_1; int col = keys_idcs[i][j] % num_feat_1; cusparseDcsrmv( handle, CUSPARSE_OPERATION_NON_TRANSPOSE, num_feat_2, num_feat_2, d_coo_val[i].size(), &alpha, descr, raw_pointer_cast(d_coo_val[i].data()), raw_pointer_cast(d_csr_row[i].data()), raw_pointer_cast(d_coo_col[i].data()), raw_pointer_cast(d_eigen_vec_old.data()) + col * num_feat_2, &beta, raw_pointer_cast(d_eigen_vec_new.data()) + row * num_feat_2); } } double init = 0; norm = std::sqrt(transform_reduce(d_eigen_vec_new.begin(), d_eigen_vec_new.end(), square(), init, thrust::plus<double>())); transform(d_eigen_vec_new.begin(), d_eigen_vec_new.end(), d_eigen_vec_old.begin(), division(norm)); fill(d_eigen_vec_new.begin(), d_eigen_vec_new.end(), 0); } for (int i = 0; i < d_eigen_vec_old.size(); i++) { std::cout << "d_eigen new value = " << d_eigen_vec_new[i] << " "; std::cout << "d_eigen old value = " << d_eigen_vec_old[i] << std::endl; } cusparseDestroy(handle); return 0; }
d2caf82522ae01ab464cf13d5055bfa5fb2c40f9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* CreateBlockIndex.cu: CUDA implementation of the CreateBlockIndex operator * * Copyright (C) 2013 Daniel Muscat * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * * Author's contact details can be found at http://www.danielmuscat.com * */ #include "CreateBlockIndex.h" namespace mtimager { namespace CreateBlockIndex_kernels { __global__ void create_blockDataIndx_with_support( int * not_same_support, int * not_same_support_accumulated, int * support, int no_of_records, int2 * out_blockDataIndex ); __global__ void create_blockDataIndx_without_support( int * not_same_support, int * not_same_support_accumulated, int * support, int no_of_records, int * out_blockDataIndex ); __global__ void create_blockDataIndx_with_support( int * not_same_support, int * not_same_support_accumulated, int * support, int no_of_records, int2 * out_blockDataIndex ) { int thread0Entry=blockIdx.x*blockDim.x; int myEntry=thread0Entry+threadIdx.x; if (myEntry<no_of_records) { if (not_same_support[myEntry]==1) { int index=not_same_support_accumulated[myEntry]; int newsupport=support[myEntry]; out_blockDataIndex[index].x=myEntry; out_blockDataIndex[index].y=newsupport; } } else if (myEntry==no_of_records) { int index=not_same_support_accumulated[no_of_records-1]; out_blockDataIndex[no_of_records].x=not_same_support_accumulated[no_of_records-1]; if (support[no_of_records-1]!=0) { out_blockDataIndex[index].x=no_of_records; out_blockDataIndex[index].y=0; } } } __global__ void create_blockDataIndx_without_support( int * not_same_support, int * not_same_support_accumulated, int * support, int no_of_records, int * out_blockDataIndex ) { int thread0Entry=blockIdx.x*blockDim.x; int myEntry=thread0Entry+threadIdx.x; if (myEntry<no_of_records) { if (not_same_support[myEntry]==1) { int index=not_same_support_accumulated[myEntry]; //int newsupport=support[myEntry]; out_blockDataIndex[index]=myEntry; } } else if (myEntry==no_of_records) { int index=not_same_support_accumulated[no_of_records-1]; out_blockDataIndex[no_of_records]=index; if (support[no_of_records-1]!=0) out_blockDataIndex[index]=no_of_records; } } }} using namespace mtimager; using namespace mtimager::CreateBlockIndex_kernels; void CreateBlockIndex::submitToGPU(GAFW::GPU::GPUSubmissionData &data) { dim3 threadsPerBlock; dim3 blocks; int records=data.inputs[0].dim.getNoOfColumns(); threadsPerBlock.x=1024; threadsPerBlock.y=1; threadsPerBlock.z=1; blocks.x=records/threadsPerBlock.x; blocks.x++; blocks.y=1; blocks.z=1; //Inputs //int * not_same_support, // int * not_same_support_accumulated, // int no_of_records, //outputs // int * out_blockDataIndex checkCudaError(hipEventRecord(*data.startEvent,data.stream),"Unable to record event"); if (params.getBoolProperty("with_support")==true) { hipError_t err=hipMemsetAsync(data.outputs[0].pointer,0,(records+1)*sizeof(int)*2,data.stream); if (err!=hipSuccess) throw CudaException("Error with zerofying outputs",err); hipLaunchKernelGGL(( create_blockDataIndx_with_support), dim3(blocks),dim3(threadsPerBlock),0,data.stream, (int*)data.inputs[0].pointer, (int*)data.inputs[1].pointer, (int*)data.inputs[2].pointer, records, (int2*)data.outputs[0].pointer ); } else { hipError_t err=hipMemsetAsync(data.outputs[0].pointer,0,(records+1)*sizeof(int),data.stream); if (err!=hipSuccess) throw CudaException("Error with zerofying outputs",err); hipLaunchKernelGGL(( create_blockDataIndx_without_support), dim3(blocks),dim3(threadsPerBlock),0,data.stream, (int*)data.inputs[0].pointer, (int*)data.inputs[1].pointer, (int*)data.inputs[2].pointer, records, (int*)data.outputs[0].pointer ); } checkCudaError(hipEventRecord(*data.endEvent,data.stream),"Unable to record event"); data.endEventRecorded=true; }
d2caf82522ae01ab464cf13d5055bfa5fb2c40f9.cu
/* CreateBlockIndex.cu: CUDA implementation of the CreateBlockIndex operator * * Copyright (C) 2013 Daniel Muscat * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * * Author's contact details can be found at http://www.danielmuscat.com * */ #include "CreateBlockIndex.h" namespace mtimager { namespace CreateBlockIndex_kernels { __global__ void create_blockDataIndx_with_support( int * not_same_support, int * not_same_support_accumulated, int * support, int no_of_records, int2 * out_blockDataIndex ); __global__ void create_blockDataIndx_without_support( int * not_same_support, int * not_same_support_accumulated, int * support, int no_of_records, int * out_blockDataIndex ); __global__ void create_blockDataIndx_with_support( int * not_same_support, int * not_same_support_accumulated, int * support, int no_of_records, int2 * out_blockDataIndex ) { int thread0Entry=blockIdx.x*blockDim.x; int myEntry=thread0Entry+threadIdx.x; if (myEntry<no_of_records) { if (not_same_support[myEntry]==1) { int index=not_same_support_accumulated[myEntry]; int newsupport=support[myEntry]; out_blockDataIndex[index].x=myEntry; out_blockDataIndex[index].y=newsupport; } } else if (myEntry==no_of_records) { int index=not_same_support_accumulated[no_of_records-1]; out_blockDataIndex[no_of_records].x=not_same_support_accumulated[no_of_records-1]; if (support[no_of_records-1]!=0) { out_blockDataIndex[index].x=no_of_records; out_blockDataIndex[index].y=0; } } } __global__ void create_blockDataIndx_without_support( int * not_same_support, int * not_same_support_accumulated, int * support, int no_of_records, int * out_blockDataIndex ) { int thread0Entry=blockIdx.x*blockDim.x; int myEntry=thread0Entry+threadIdx.x; if (myEntry<no_of_records) { if (not_same_support[myEntry]==1) { int index=not_same_support_accumulated[myEntry]; //int newsupport=support[myEntry]; out_blockDataIndex[index]=myEntry; } } else if (myEntry==no_of_records) { int index=not_same_support_accumulated[no_of_records-1]; out_blockDataIndex[no_of_records]=index; if (support[no_of_records-1]!=0) out_blockDataIndex[index]=no_of_records; } } }} using namespace mtimager; using namespace mtimager::CreateBlockIndex_kernels; void CreateBlockIndex::submitToGPU(GAFW::GPU::GPUSubmissionData &data) { dim3 threadsPerBlock; dim3 blocks; int records=data.inputs[0].dim.getNoOfColumns(); threadsPerBlock.x=1024; threadsPerBlock.y=1; threadsPerBlock.z=1; blocks.x=records/threadsPerBlock.x; blocks.x++; blocks.y=1; blocks.z=1; //Inputs //int * not_same_support, // int * not_same_support_accumulated, // int no_of_records, //outputs // int * out_blockDataIndex checkCudaError(cudaEventRecord(*data.startEvent,data.stream),"Unable to record event"); if (params.getBoolProperty("with_support")==true) { cudaError_t err=cudaMemsetAsync(data.outputs[0].pointer,0,(records+1)*sizeof(int)*2,data.stream); if (err!=cudaSuccess) throw CudaException("Error with zerofying outputs",err); create_blockDataIndx_with_support<<<blocks,threadsPerBlock,0,data.stream>>>( (int*)data.inputs[0].pointer, (int*)data.inputs[1].pointer, (int*)data.inputs[2].pointer, records, (int2*)data.outputs[0].pointer ); } else { cudaError_t err=cudaMemsetAsync(data.outputs[0].pointer,0,(records+1)*sizeof(int),data.stream); if (err!=cudaSuccess) throw CudaException("Error with zerofying outputs",err); create_blockDataIndx_without_support<<<blocks,threadsPerBlock,0,data.stream>>>( (int*)data.inputs[0].pointer, (int*)data.inputs[1].pointer, (int*)data.inputs[2].pointer, records, (int*)data.outputs[0].pointer ); } checkCudaError(cudaEventRecord(*data.endEvent,data.stream),"Unable to record event"); data.endEventRecorded=true; }
d385b6422d850f2bfd869ea13d9b66bac1b49df7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2009-2018 The Regents of the University of Michigan // This file is part of the HOOMD-blue project, released under the BSD 3-Clause License. // Maintainer: dnlebard #include "HarmonicImproperForceGPU.cuh" #include "hoomd/TextureTools.h" #include <assert.h> // SMALL a relatively small number #define SMALL Scalar(0.001) /*! \file HarmonicImproperForceGPU.cu \brief Defines GPU kernel code for calculating the harmonic improper forces. Used by HarmonicImproperForceComputeGPU. */ //! Texture for reading improper parameters scalar2_tex_t improper_params_tex; //! Kernel for caculating harmonic improper forces on the GPU /*! \param d_force Device memory to write computed forces \param d_virial Device memory to write computed virials \param virial_pitch pitch of 2D virial \param N number of particles \param d_pos Device memory of particle positions \param d_params Force field parameters \param box Box dimensions for periodic boundary condition handling \param tlist Improper data to use in calculating the forces \param dihedral_ABCD List of relative atom positions in the dihedrals \param pitch Pitch of 2D dihedral list \param n_dihedrals_list List of numbers of dihedrals per atom */ extern "C" __global__ void gpu_compute_harmonic_improper_forces_kernel(Scalar4* d_force, Scalar* d_virial, const unsigned int virial_pitch, unsigned int N, const Scalar4 *d_pos, const Scalar2 *d_params, BoxDim box, const group_storage<4> *tlist, const unsigned int *dihedral_ABCD, const unsigned int pitch, const unsigned int *n_dihedrals_list) { // start by identifying which particle we are to handle int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= N) return; // load in the length of the list for this thread (MEM TRANSFER: 4 bytes) int n_impropers = n_dihedrals_list[idx]; // read in the position of our b-particle from the a-b-c triplet. (MEM TRANSFER: 16 bytes) Scalar4 idx_postype = d_pos[idx]; // we can be either a, b, or c in the a-b-c-d quartet Scalar3 idx_pos = make_scalar3(idx_postype.x, idx_postype.y, idx_postype.z); Scalar3 pos_a,pos_b,pos_c, pos_d; // allocate space for the a,b, and c atoms in the a-b-c-d quartet // initialize the force to 0 Scalar4 force_idx = make_scalar4(Scalar(0.0), Scalar(0.0), Scalar(0.0), Scalar(0.0)); // initialize the virial to 0 Scalar virial_idx[6]; for (int i = 0; i < 6; i++) virial_idx[i] = Scalar(0.0); // loop over all impropers for (int improper_idx = 0; improper_idx < n_impropers; improper_idx++) { group_storage<4> cur_improper = tlist[pitch*improper_idx + idx]; unsigned int cur_ABCD = dihedral_ABCD[pitch*improper_idx + idx]; int cur_improper_x_idx = cur_improper.idx[0]; int cur_improper_y_idx = cur_improper.idx[1]; int cur_improper_z_idx = cur_improper.idx[2]; int cur_improper_type = cur_improper.idx[3]; int cur_improper_abcd = cur_ABCD; // get the a-particle's position (MEM TRANSFER: 16 bytes) Scalar4 x_postype = d_pos[cur_improper_x_idx]; Scalar3 x_pos = make_scalar3(x_postype.x, x_postype.y, x_postype.z); // get the c-particle's position (MEM TRANSFER: 16 bytes) Scalar4 y_postype = d_pos[cur_improper_y_idx]; Scalar3 y_pos = make_scalar3(y_postype.x, y_postype.y, y_postype.z); // get the c-particle's position (MEM TRANSFER: 16 bytes) Scalar4 z_postype = d_pos[cur_improper_z_idx]; Scalar3 z_pos = make_scalar3(z_postype.x, z_postype.y, z_postype.z); if (cur_improper_abcd == 0) { pos_a = idx_pos; pos_b = x_pos; pos_c = y_pos; pos_d = z_pos; } if (cur_improper_abcd == 1) { pos_b = idx_pos; pos_a = x_pos; pos_c = y_pos; pos_d = z_pos; } if (cur_improper_abcd == 2) { pos_c = idx_pos; pos_a = x_pos; pos_b = y_pos; pos_d = z_pos; } if (cur_improper_abcd == 3) { pos_d = idx_pos; pos_a = x_pos; pos_b = y_pos; pos_c = z_pos; } // calculate dr for a-b,c-b,and a-c Scalar3 dab = pos_a - pos_b; Scalar3 dcb = pos_c - pos_b; Scalar3 ddc = pos_d - pos_c; dab = box.minImage(dab); dcb = box.minImage(dcb); ddc = box.minImage(ddc); // get the improper parameters (MEM TRANSFER: 12 bytes) Scalar2 params = texFetchScalar2(d_params, improper_params_tex, cur_improper_type); Scalar K = params.x; Scalar chi = params.y; Scalar r1 = rsqrtf(dot(dab, dab)); Scalar r2 = rsqrtf(dot(dcb, dcb)); Scalar r3 = rsqrtf(dot(ddc, ddc)); Scalar ss1 = r1 * r1; Scalar ss2 = r2 * r2; Scalar ss3 = r3 * r3; // Cosine and Sin of the angle between the planes Scalar c0 = dot(dab, ddc) * r1 * r3; Scalar c1 = dot(dab, dcb) * r1 * r2; Scalar c2 = -dot(ddc, dcb) * r3 * r2; Scalar s1 = Scalar(1.0) - c1*c1; if (s1 < SMALL) s1 = SMALL; s1 = Scalar(1.0) / s1; Scalar s2 = Scalar(1.0) - c2*c2; if (s2 < SMALL) s2 = SMALL; s2 = Scalar(1.0) / s2; Scalar s12 = sqrtf(s1*s2); Scalar c = (c1*c2 + c0) * s12; if (c > Scalar(1.0)) c = Scalar(1.0); if (c < -Scalar(1.0)) c = -Scalar(1.0); Scalar s = sqrtf(Scalar(1.0) - c*c); if (s < SMALL) s = SMALL; Scalar domega = fast::acos(c) - chi; Scalar a = K * domega; // calculate the energy, 1/4th for each atom //Scalar improper_eng = 0.25*a*domega; Scalar improper_eng = Scalar(0.125)*a*domega; // the .125 term is 1/2 * 1/4 //a = -a * 2.0/s; a = -a /s; // the missing 2.0 factor is to ensure K/2 is factored in for the forces c = c * a; s12 = s12 * a; Scalar a11 = c*ss1*s1; Scalar a22 = -ss2 * (Scalar(2.0)*c0*s12 - c*(s1+s2)); Scalar a33 = c*ss3*s2; Scalar a12 = -r1*r2*(c1*c*s1 + c2*s12); Scalar a13 = -r1*r3*s12; Scalar a23 = r2*r3*(c2*c*s2 + c1*s12); Scalar sx2 = a22*dcb.x + a23*ddc.x + a12*dab.x; Scalar sy2 = a22*dcb.y + a23*ddc.y + a12*dab.y; Scalar sz2 = a22*dcb.z + a23*ddc.z + a12*dab.z; // calculate the forces for each particle Scalar ffax = a12*dcb.x + a13*ddc.x + a11*dab.x; Scalar ffay = a12*dcb.y + a13*ddc.y + a11*dab.y; Scalar ffaz = a12*dcb.z + a13*ddc.z + a11*dab.z; Scalar ffbx = -sx2 - ffax; Scalar ffby = -sy2 - ffay; Scalar ffbz = -sz2 - ffaz; Scalar ffdx = a23*dcb.x + a33*ddc.x + a13*dab.x; Scalar ffdy = a23*dcb.y + a33*ddc.y + a13*dab.y; Scalar ffdz = a23*dcb.z + a33*ddc.z + a13*dab.z; Scalar ffcx = sx2 - ffdx; Scalar ffcy = sy2 - ffdy; Scalar ffcz = sz2 - ffdz; // and calculate the virial (upper triangular version) Scalar improper_virial[6]; improper_virial[0] = Scalar(1./4.)*(dab.x*ffax + dcb.x*ffcx + (ddc.x+dcb.x)*ffdx); improper_virial[1] = Scalar(1./4.)*(dab.y*ffax + dcb.y*ffcx + (ddc.y+dcb.y)*ffdx); improper_virial[2] = Scalar(1./4.)*(dab.z*ffax + dcb.z*ffcx + (ddc.z+dcb.z)*ffdx); improper_virial[3] = Scalar(1./4.)*(dab.y*ffay + dcb.y*ffcy + (ddc.y+dcb.y)*ffdy); improper_virial[4] = Scalar(1./4.)*(dab.z*ffay + dcb.z*ffcy + (ddc.z+dcb.z)*ffdy); improper_virial[5] = Scalar(1./4.)*(dab.z*ffaz + dcb.z*ffcz + (ddc.z+dcb.z)*ffdz); if (cur_improper_abcd == 0) { force_idx.x += ffax; force_idx.y += ffay; force_idx.z += ffaz; } if (cur_improper_abcd == 1) { force_idx.x += ffbx; force_idx.y += ffby; force_idx.z += ffbz; } if (cur_improper_abcd == 2) { force_idx.x += ffcx; force_idx.y += ffcy; force_idx.z += ffcz; } if (cur_improper_abcd == 3) { force_idx.x += ffdx; force_idx.y += ffdy; force_idx.z += ffdz; } force_idx.w += improper_eng; for (int k = 0; k < 6; k++) virial_idx[k] += improper_virial[k]; } // now that the force calculation is complete, write out the result (MEM TRANSFER: 20 bytes) d_force[idx] = force_idx; for (int k = 0; k < 6; k++) d_virial[k*virial_pitch+idx] = virial_idx[k]; } /*! \param d_force Device memory to write computed forces \param d_virial Device memory to write computed virials \param virial_pitch pitch of 2D virial array \param N number of particles \param d_pos particle positions on the device \param box Box dimensions (in GPU format) to use for periodic boundary conditions \param tlist Dihedral data to use in calculating the forces \param dihedral_ABCD List of relative atom positions in the dihedrals \param pitch Pitch of 2D dihedral list \param n_dihedrals_list List of numbers of dihedrals per atom \param d_params K, sign,multiplicity params packed as padded Scalar4 variables \param n_improper_types Number of improper types in d_params \param block_size Block size to use when performing calculations \param copute_capability Compute capability of the device (200, 300, 350, ...) \returns Any error code resulting from the kernel launch \note Always returns hipSuccess in release builds to avoid the hipDeviceSynchronize() \a d_params should include one Scalar4 element per improper type. The x component contains K the spring constant and the y component contains sign, and the z component the multiplicity. */ hipError_t gpu_compute_harmonic_improper_forces(Scalar4* d_force, Scalar* d_virial, const unsigned int virial_pitch, const unsigned int N, const Scalar4 *d_pos, const BoxDim& box, const group_storage<4> *tlist, const unsigned int *dihedral_ABCD, const unsigned int pitch, const unsigned int *n_dihedrals_list, Scalar2 *d_params, unsigned int n_improper_types, int block_size, const unsigned int compute_capability) { assert(d_params); if (N == 0) return hipSuccess; static unsigned int max_block_size = UINT_MAX; if (max_block_size == UINT_MAX) { hipFuncAttributes attr; hipFuncGetAttributes(&attr, (const void *)gpu_compute_harmonic_improper_forces_kernel); max_block_size = attr.maxThreadsPerBlock; } unsigned int run_block_size = min(block_size, max_block_size); // setup the grid to run the kernel dim3 grid( N / run_block_size + 1, 1, 1); dim3 threads(run_block_size, 1, 1); // bind the texture on pre sm35 devices if (compute_capability < 350) { hipError_t error = hipBindTexture(0, improper_params_tex, d_params, sizeof(Scalar2) * n_improper_types); if (error != hipSuccess) return error; } // run the kernel hipLaunchKernelGGL(( gpu_compute_harmonic_improper_forces_kernel), dim3(grid), dim3(threads), 0, 0, d_force, d_virial, virial_pitch, N, d_pos, d_params, box, tlist, dihedral_ABCD, pitch, n_dihedrals_list); return hipSuccess; }
d385b6422d850f2bfd869ea13d9b66bac1b49df7.cu
// Copyright (c) 2009-2018 The Regents of the University of Michigan // This file is part of the HOOMD-blue project, released under the BSD 3-Clause License. // Maintainer: dnlebard #include "HarmonicImproperForceGPU.cuh" #include "hoomd/TextureTools.h" #include <assert.h> // SMALL a relatively small number #define SMALL Scalar(0.001) /*! \file HarmonicImproperForceGPU.cu \brief Defines GPU kernel code for calculating the harmonic improper forces. Used by HarmonicImproperForceComputeGPU. */ //! Texture for reading improper parameters scalar2_tex_t improper_params_tex; //! Kernel for caculating harmonic improper forces on the GPU /*! \param d_force Device memory to write computed forces \param d_virial Device memory to write computed virials \param virial_pitch pitch of 2D virial \param N number of particles \param d_pos Device memory of particle positions \param d_params Force field parameters \param box Box dimensions for periodic boundary condition handling \param tlist Improper data to use in calculating the forces \param dihedral_ABCD List of relative atom positions in the dihedrals \param pitch Pitch of 2D dihedral list \param n_dihedrals_list List of numbers of dihedrals per atom */ extern "C" __global__ void gpu_compute_harmonic_improper_forces_kernel(Scalar4* d_force, Scalar* d_virial, const unsigned int virial_pitch, unsigned int N, const Scalar4 *d_pos, const Scalar2 *d_params, BoxDim box, const group_storage<4> *tlist, const unsigned int *dihedral_ABCD, const unsigned int pitch, const unsigned int *n_dihedrals_list) { // start by identifying which particle we are to handle int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= N) return; // load in the length of the list for this thread (MEM TRANSFER: 4 bytes) int n_impropers = n_dihedrals_list[idx]; // read in the position of our b-particle from the a-b-c triplet. (MEM TRANSFER: 16 bytes) Scalar4 idx_postype = d_pos[idx]; // we can be either a, b, or c in the a-b-c-d quartet Scalar3 idx_pos = make_scalar3(idx_postype.x, idx_postype.y, idx_postype.z); Scalar3 pos_a,pos_b,pos_c, pos_d; // allocate space for the a,b, and c atoms in the a-b-c-d quartet // initialize the force to 0 Scalar4 force_idx = make_scalar4(Scalar(0.0), Scalar(0.0), Scalar(0.0), Scalar(0.0)); // initialize the virial to 0 Scalar virial_idx[6]; for (int i = 0; i < 6; i++) virial_idx[i] = Scalar(0.0); // loop over all impropers for (int improper_idx = 0; improper_idx < n_impropers; improper_idx++) { group_storage<4> cur_improper = tlist[pitch*improper_idx + idx]; unsigned int cur_ABCD = dihedral_ABCD[pitch*improper_idx + idx]; int cur_improper_x_idx = cur_improper.idx[0]; int cur_improper_y_idx = cur_improper.idx[1]; int cur_improper_z_idx = cur_improper.idx[2]; int cur_improper_type = cur_improper.idx[3]; int cur_improper_abcd = cur_ABCD; // get the a-particle's position (MEM TRANSFER: 16 bytes) Scalar4 x_postype = d_pos[cur_improper_x_idx]; Scalar3 x_pos = make_scalar3(x_postype.x, x_postype.y, x_postype.z); // get the c-particle's position (MEM TRANSFER: 16 bytes) Scalar4 y_postype = d_pos[cur_improper_y_idx]; Scalar3 y_pos = make_scalar3(y_postype.x, y_postype.y, y_postype.z); // get the c-particle's position (MEM TRANSFER: 16 bytes) Scalar4 z_postype = d_pos[cur_improper_z_idx]; Scalar3 z_pos = make_scalar3(z_postype.x, z_postype.y, z_postype.z); if (cur_improper_abcd == 0) { pos_a = idx_pos; pos_b = x_pos; pos_c = y_pos; pos_d = z_pos; } if (cur_improper_abcd == 1) { pos_b = idx_pos; pos_a = x_pos; pos_c = y_pos; pos_d = z_pos; } if (cur_improper_abcd == 2) { pos_c = idx_pos; pos_a = x_pos; pos_b = y_pos; pos_d = z_pos; } if (cur_improper_abcd == 3) { pos_d = idx_pos; pos_a = x_pos; pos_b = y_pos; pos_c = z_pos; } // calculate dr for a-b,c-b,and a-c Scalar3 dab = pos_a - pos_b; Scalar3 dcb = pos_c - pos_b; Scalar3 ddc = pos_d - pos_c; dab = box.minImage(dab); dcb = box.minImage(dcb); ddc = box.minImage(ddc); // get the improper parameters (MEM TRANSFER: 12 bytes) Scalar2 params = texFetchScalar2(d_params, improper_params_tex, cur_improper_type); Scalar K = params.x; Scalar chi = params.y; Scalar r1 = rsqrtf(dot(dab, dab)); Scalar r2 = rsqrtf(dot(dcb, dcb)); Scalar r3 = rsqrtf(dot(ddc, ddc)); Scalar ss1 = r1 * r1; Scalar ss2 = r2 * r2; Scalar ss3 = r3 * r3; // Cosine and Sin of the angle between the planes Scalar c0 = dot(dab, ddc) * r1 * r3; Scalar c1 = dot(dab, dcb) * r1 * r2; Scalar c2 = -dot(ddc, dcb) * r3 * r2; Scalar s1 = Scalar(1.0) - c1*c1; if (s1 < SMALL) s1 = SMALL; s1 = Scalar(1.0) / s1; Scalar s2 = Scalar(1.0) - c2*c2; if (s2 < SMALL) s2 = SMALL; s2 = Scalar(1.0) / s2; Scalar s12 = sqrtf(s1*s2); Scalar c = (c1*c2 + c0) * s12; if (c > Scalar(1.0)) c = Scalar(1.0); if (c < -Scalar(1.0)) c = -Scalar(1.0); Scalar s = sqrtf(Scalar(1.0) - c*c); if (s < SMALL) s = SMALL; Scalar domega = fast::acos(c) - chi; Scalar a = K * domega; // calculate the energy, 1/4th for each atom //Scalar improper_eng = 0.25*a*domega; Scalar improper_eng = Scalar(0.125)*a*domega; // the .125 term is 1/2 * 1/4 //a = -a * 2.0/s; a = -a /s; // the missing 2.0 factor is to ensure K/2 is factored in for the forces c = c * a; s12 = s12 * a; Scalar a11 = c*ss1*s1; Scalar a22 = -ss2 * (Scalar(2.0)*c0*s12 - c*(s1+s2)); Scalar a33 = c*ss3*s2; Scalar a12 = -r1*r2*(c1*c*s1 + c2*s12); Scalar a13 = -r1*r3*s12; Scalar a23 = r2*r3*(c2*c*s2 + c1*s12); Scalar sx2 = a22*dcb.x + a23*ddc.x + a12*dab.x; Scalar sy2 = a22*dcb.y + a23*ddc.y + a12*dab.y; Scalar sz2 = a22*dcb.z + a23*ddc.z + a12*dab.z; // calculate the forces for each particle Scalar ffax = a12*dcb.x + a13*ddc.x + a11*dab.x; Scalar ffay = a12*dcb.y + a13*ddc.y + a11*dab.y; Scalar ffaz = a12*dcb.z + a13*ddc.z + a11*dab.z; Scalar ffbx = -sx2 - ffax; Scalar ffby = -sy2 - ffay; Scalar ffbz = -sz2 - ffaz; Scalar ffdx = a23*dcb.x + a33*ddc.x + a13*dab.x; Scalar ffdy = a23*dcb.y + a33*ddc.y + a13*dab.y; Scalar ffdz = a23*dcb.z + a33*ddc.z + a13*dab.z; Scalar ffcx = sx2 - ffdx; Scalar ffcy = sy2 - ffdy; Scalar ffcz = sz2 - ffdz; // and calculate the virial (upper triangular version) Scalar improper_virial[6]; improper_virial[0] = Scalar(1./4.)*(dab.x*ffax + dcb.x*ffcx + (ddc.x+dcb.x)*ffdx); improper_virial[1] = Scalar(1./4.)*(dab.y*ffax + dcb.y*ffcx + (ddc.y+dcb.y)*ffdx); improper_virial[2] = Scalar(1./4.)*(dab.z*ffax + dcb.z*ffcx + (ddc.z+dcb.z)*ffdx); improper_virial[3] = Scalar(1./4.)*(dab.y*ffay + dcb.y*ffcy + (ddc.y+dcb.y)*ffdy); improper_virial[4] = Scalar(1./4.)*(dab.z*ffay + dcb.z*ffcy + (ddc.z+dcb.z)*ffdy); improper_virial[5] = Scalar(1./4.)*(dab.z*ffaz + dcb.z*ffcz + (ddc.z+dcb.z)*ffdz); if (cur_improper_abcd == 0) { force_idx.x += ffax; force_idx.y += ffay; force_idx.z += ffaz; } if (cur_improper_abcd == 1) { force_idx.x += ffbx; force_idx.y += ffby; force_idx.z += ffbz; } if (cur_improper_abcd == 2) { force_idx.x += ffcx; force_idx.y += ffcy; force_idx.z += ffcz; } if (cur_improper_abcd == 3) { force_idx.x += ffdx; force_idx.y += ffdy; force_idx.z += ffdz; } force_idx.w += improper_eng; for (int k = 0; k < 6; k++) virial_idx[k] += improper_virial[k]; } // now that the force calculation is complete, write out the result (MEM TRANSFER: 20 bytes) d_force[idx] = force_idx; for (int k = 0; k < 6; k++) d_virial[k*virial_pitch+idx] = virial_idx[k]; } /*! \param d_force Device memory to write computed forces \param d_virial Device memory to write computed virials \param virial_pitch pitch of 2D virial array \param N number of particles \param d_pos particle positions on the device \param box Box dimensions (in GPU format) to use for periodic boundary conditions \param tlist Dihedral data to use in calculating the forces \param dihedral_ABCD List of relative atom positions in the dihedrals \param pitch Pitch of 2D dihedral list \param n_dihedrals_list List of numbers of dihedrals per atom \param d_params K, sign,multiplicity params packed as padded Scalar4 variables \param n_improper_types Number of improper types in d_params \param block_size Block size to use when performing calculations \param copute_capability Compute capability of the device (200, 300, 350, ...) \returns Any error code resulting from the kernel launch \note Always returns cudaSuccess in release builds to avoid the cudaThreadSynchronize() \a d_params should include one Scalar4 element per improper type. The x component contains K the spring constant and the y component contains sign, and the z component the multiplicity. */ cudaError_t gpu_compute_harmonic_improper_forces(Scalar4* d_force, Scalar* d_virial, const unsigned int virial_pitch, const unsigned int N, const Scalar4 *d_pos, const BoxDim& box, const group_storage<4> *tlist, const unsigned int *dihedral_ABCD, const unsigned int pitch, const unsigned int *n_dihedrals_list, Scalar2 *d_params, unsigned int n_improper_types, int block_size, const unsigned int compute_capability) { assert(d_params); if (N == 0) return cudaSuccess; static unsigned int max_block_size = UINT_MAX; if (max_block_size == UINT_MAX) { cudaFuncAttributes attr; cudaFuncGetAttributes(&attr, (const void *)gpu_compute_harmonic_improper_forces_kernel); max_block_size = attr.maxThreadsPerBlock; } unsigned int run_block_size = min(block_size, max_block_size); // setup the grid to run the kernel dim3 grid( N / run_block_size + 1, 1, 1); dim3 threads(run_block_size, 1, 1); // bind the texture on pre sm35 devices if (compute_capability < 350) { cudaError_t error = cudaBindTexture(0, improper_params_tex, d_params, sizeof(Scalar2) * n_improper_types); if (error != cudaSuccess) return error; } // run the kernel gpu_compute_harmonic_improper_forces_kernel<<< grid, threads>>>(d_force, d_virial, virial_pitch, N, d_pos, d_params, box, tlist, dihedral_ABCD, pitch, n_dihedrals_list); return cudaSuccess; }
ae63defd469415db62ad32a41355ea6226b9fa63.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* This is a automatically generated test. Do not modify */ #include <stdio.h> #include <stdlib.h> #include <math.h> __global__ void compute(float comp, int var_1,float var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11) { if (comp <= +0.0f + var_2 + (-1.5650E-42f - (+1.1859E-37f / var_3 / var_4))) { float tmp_1 = +1.2615E26f; comp = tmp_1 - (var_5 / var_6 * cosf(var_7 - var_8)); comp += +1.8291E35f * (var_9 - -1.3872E-2f - var_10 - +1.5791E-37f); for (int i=0; i < var_1; ++i) { comp += atan2f((+1.0602E-42f - -1.1852E35f - +1.5031E34f * +1.3088E-41f), (-0.0f * var_11 / -1.9901E36f)); } } printf("%.17g\n", comp); } float* initPointer(float v) { float *ret = (float*) malloc(sizeof(float)*10); for(int i=0; i < 10; ++i) ret[i] = v; return ret; } int main(int argc, char** argv) { /* Program variables */ float tmp_1 = atof(argv[1]); int tmp_2 = atoi(argv[2]); float tmp_3 = atof(argv[3]); float tmp_4 = atof(argv[4]); float tmp_5 = atof(argv[5]); float tmp_6 = atof(argv[6]); float tmp_7 = atof(argv[7]); float tmp_8 = atof(argv[8]); float tmp_9 = atof(argv[9]); float tmp_10 = atof(argv[10]); float tmp_11 = atof(argv[11]); float tmp_12 = atof(argv[12]); hipLaunchKernelGGL(( compute), dim3(1),dim3(1), 0, 0, tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12); hipDeviceSynchronize(); return 0; }
ae63defd469415db62ad32a41355ea6226b9fa63.cu
/* This is a automatically generated test. Do not modify */ #include <stdio.h> #include <stdlib.h> #include <math.h> __global__ void compute(float comp, int var_1,float var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11) { if (comp <= +0.0f + var_2 + (-1.5650E-42f - (+1.1859E-37f / var_3 / var_4))) { float tmp_1 = +1.2615E26f; comp = tmp_1 - (var_5 / var_6 * cosf(var_7 - var_8)); comp += +1.8291E35f * (var_9 - -1.3872E-2f - var_10 - +1.5791E-37f); for (int i=0; i < var_1; ++i) { comp += atan2f((+1.0602E-42f - -1.1852E35f - +1.5031E34f * +1.3088E-41f), (-0.0f * var_11 / -1.9901E36f)); } } printf("%.17g\n", comp); } float* initPointer(float v) { float *ret = (float*) malloc(sizeof(float)*10); for(int i=0; i < 10; ++i) ret[i] = v; return ret; } int main(int argc, char** argv) { /* Program variables */ float tmp_1 = atof(argv[1]); int tmp_2 = atoi(argv[2]); float tmp_3 = atof(argv[3]); float tmp_4 = atof(argv[4]); float tmp_5 = atof(argv[5]); float tmp_6 = atof(argv[6]); float tmp_7 = atof(argv[7]); float tmp_8 = atof(argv[8]); float tmp_9 = atof(argv[9]); float tmp_10 = atof(argv[10]); float tmp_11 = atof(argv[11]); float tmp_12 = atof(argv[12]); compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12); cudaDeviceSynchronize(); return 0; }
155e3f6b087cb89c00c6eda1e57ba87eb6a0c56f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "kernel.h" #define TX 32 #define TY 32 #define DIM 2100 struct hipComplex { float r; float i; __device__ hipComplex( float a, float b ) : r(a), i(b) {} __device__ float magnitude2( void ) { return r * r + i * i; } __device__ hipComplex operator*(const hipComplex& a) { return hipComplex(r*a.r - i*a.i, i*a.r + r*a.i); } __device__ hipComplex operator-(const hipComplex& a) { return hipComplex(r-a.r, i-a.i); } __device__ hipComplex operator+(const hipComplex& a) { return hipComplex(r+a.r, i+a.i); } __device__ hipComplex operator/(const hipComplex& a) { return hipComplex((r*a.r + i*a.i)/(a.r*a.r + a.i*a.i), (i*a.r - r*a.i)/(a.r*a.r + a.i*a.i)); } }; __device__ hipComplex conj(hipComplex m) { hipComplex out(m.r,-m.i); return out; } __device__ hipComplex nor(hipComplex m) { hipComplex out(m.r*m.r+m.i*m.i,0.0); return out; } __device__ float norg(hipComplex m) { return sqrtf(m.r*m.r+m.i*m.i); } __device__ hipComplex qpoch(hipComplex a, hipComplex q) { hipComplex out(1.0,0.0); hipComplex unity(1.0,0.0); int i = 0; hipComplex Q = q; if(q.magnitude2()>1.0) { return hipComplex(0.0,0.0); } // We want to formally match the definition of a q-pochhammer symbol. for(i=1;i<80;i++) { out = out * (unity - a*Q); Q = q * Q; } return out; } __device__ hipComplex qp(hipComplex a, hipComplex q, int n) { hipComplex out(1.0,0.0); hipComplex unity(1.0,0.0); int i = 0; hipComplex Q = q; if(q.magnitude2()>1.0) { return hipComplex(0.0,0.0); } // We want to formally match the definition of a q-pochhammer symbol. for(i=1;i<n;i++) { out = out * (unity - a*Q); Q = q * Q; } return out; } __device__ hipComplex ramphi(hipComplex q) { hipComplex out(1.0,0.0); hipComplex mone(-1.0,0.0); hipComplex mq = mone*q; return qpoch(mq,mq)/qpoch(q,mq); } __device__ hipComplex rampsi(hipComplex q) { hipComplex out(1.0,0.0); hipComplex mone(-1.0,0.0); hipComplex mq = mone*q; return qpoch(mq,q)*qpoch(q*q,q*q); } __device__ hipComplex ramchi(hipComplex q) { hipComplex out(1.0,0.0); hipComplex mone(-1.0,0.0); hipComplex mq = mone*q; return qpoch(mq,q*q); } __device__ hipComplex ramf(hipComplex a, hipComplex b) { hipComplex out(1.0,0.0); hipComplex mone(-1.0,0.0); hipComplex ma = mone*a; hipComplex mb = mone*b; return qpoch(ma,a*b)*qpoch(mb,a*b)*qpoch(a*b,a*b); } // complex exponential __device__ hipComplex expc(hipComplex m) { hipComplex out(expf(m.r) * cosf(m.i),expf(m.r) * sinf(m.i)); return out; } __device__ hipComplex powc(hipComplex ag, hipComplex bg) { hipComplex out(0.0,0.0); hipComplex mesp(0.0,0.0); hipComplex frim(0.0,0.0); double radiu, thet; /* get the proper polar form of the complex number */ radiu = sqrtf(ag.r*ag.r + ag.i*ag.i); thet = atan2f(ag.i,ag.r); /* mesp gives R^(c+di) */ mesp.r = powf(radiu,bg.r)*cosf(bg.i*logf(radiu)); mesp.i = powf(radiu,bg.r)*sinf(bg.i*logf(radiu)); /* frim gives e^(i theta (c+di)) */ /* now since we already have the machinery for performing complex exponentiation (just exp), we can just call that here */ frim.r = -1.0 * bg.i * thet; frim.i = bg.r * thet; frim = expc(frim); out = mesp*frim; return out; } // cosine (nothing algorithmically clean) __device__ hipComplex cosc(hipComplex m) { hipComplex ai(0.0,1.0); hipComplex ot(0.5,0.0); hipComplex mone(-1.0,0.0); hipComplex out = ot*(expc(m*ai) + expc(mone*m*ai)); return out; } __device__ hipComplex sins(hipComplex m) { hipComplex ai(0.0,1.0); hipComplex ot(0.0,0.5); hipComplex mone(-1.0,0.0); hipComplex out = ot*(expc(m*ai) - expc(mone*m*ai)); return out; } __device__ hipComplex tans(hipComplex m) { return sins(m)/cosc(m); } __device__ hipComplex moeb(hipComplex t, hipComplex a, hipComplex z) { hipComplex out(0.0,0.0); hipComplex ai(0.0,1.0); hipComplex unity(1.0,0.0); out = expc(ai*t) * (z-a)/(unity-conj(a)*z); return out; } __device__ hipComplex bnewt(hipComplex z) { hipComplex three(3.0,0.0); hipComplex unity(1.0,0.0); hipComplex out(0.0,0.0); hipComplex Z =z; hipComplex L(0.0,0.0); hipComplex R(0.62348980185873359,0.7818314824680298); hipComplex v(0.62348980185873359,0.7818314824680298); int i; for(i=0;i<100;i++) { L = sins(expc(Z)-cosc(Z))-Z; out = out + v*L; v = R * v; Z = Z - L/((expc(Z)+sins(Z))*cosc(expc(Z)-cosc(Z))-unity); } return out; } __device__ hipComplex they3(hipComplex z, hipComplex q) { int u; hipComplex out(0.0,0.0); hipComplex enn(-20.0,0.0); hipComplex onn(1.0,0.0); hipComplex dui(0.0,1.0); for(u=-20;u<20;u++) { out = out + powc(q,enn*enn)*expc(dui*enn*z); enn = enn + onn; } return out; } __device__ hipComplex wahi(hipComplex z) { int u; hipComplex un(1.0,0.0); hipComplex ne(1.0,0.0); hipComplex out(0.0,0.0); for(u=1;u<40;u++) { out = out + powc(z/ne,ne); ne = ne + un; } out = out + un; return out; } __device__ hipComplex dwahi(hipComplex z) { int u; hipComplex un(1.0,0.0); hipComplex ne(1.0,0.0); hipComplex out(0.0,0.0); for(u=1;u<40;u++) { out = out + powc(z/ne,ne-un); ne = ne + un; } return out; } __device__ hipComplex they3p(hipComplex z, hipComplex q) { int u; hipComplex out(0.0,0.0); hipComplex enn(-20.0,0.0); hipComplex onn(1.0,0.0); hipComplex dui(0.0,1.0); for(u=-20;u<20;u++) { out = out + (enn*enn)*powc(q,enn*enn-onn)*expc(dui*enn*z); enn = enn + onn; } return out; } __device__ hipComplex h3ey3p(hipComplex z, hipComplex q) { int u; hipComplex out(0.0,0.0); hipComplex aut(0.0,0.0); hipComplex enn(-20.0,0.0); hipComplex onn(1.0,0.0); hipComplex dui(0.0,1.0); hipComplex vel(0.0,0.0); hipComplex rav(0.0,0.0); for(u=-40;u<40;u++) { vel = expc(dui*enn*z); rav = powc(q,enn*enn); aut = aut + (enn*enn)*rav/q*vel; out = out + rav*vel; enn = enn + onn; } return out/aut; } __device__ hipComplex thess(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ hipComplex the1(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); hipComplex rt(0.25,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return tw*out*powc(q,rt)*sins(z); } __device__ hipComplex the2(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); hipComplex rt(0.25,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return tw*out*powc(q,rt)*cosc(z); } __device__ hipComplex the3(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ hipComplex the4(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } /* routine to generate q-integers */ __device__ hipComplex qin(hipComplex a, hipComplex q) { hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); out = (unity - powc(q, a))/(unity-q); return out; } /* generating function for n^2 */ __device__ hipComplex geffa(hipComplex z, hipComplex q) { hipComplex out(0.0,0.0); hipComplex unity(1.0,0.0); hipComplex wu(0.0,0.0); hipComplex Z=unity; int v; for(v=0;v<20;v++) { out = out + qin(wu*wu,q)* Z; wu = wu + unity; Z = z * Z; } return out; } __device__ hipComplex thratd(hipComplex z, hipComplex q) { int n; hipComplex fau(4.0,0.0); hipComplex too(2.0,0.0); hipComplex unity(1.0,0.0); hipComplex ennn(1.0,0.0); hipComplex ni(-1.0,0.0); hipComplex noo(-1.0,0.0); hipComplex out(0.0,0.0); hipComplex loo = q; hipComplex qoo =q*q; for(n=0;n<80;n++) { out = out + noo*(loo/(unity-qoo))*sins(too*ennn*z); qoo = qoo * q*q; loo = loo * q; ennn = ennn +unity; noo = ni * noo; } return out*fau; } __device__ hipComplex thess4(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<20;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ hipComplex thesk(hipComplex z, hipComplex q, hipComplex r) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); hipComplex roo(1.0,0.0); for(v=0;v<20;v++) { qoo = qoo * q * q; roo = roo * r * r ; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + roo*roo/(r*r)); } return out; } __device__ hipComplex thass(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<20;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * sins(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ hipComplex rogers( hipComplex q) { hipComplex onf(0.2,0.0); hipComplex Q5 = q*q*q*q*q; hipComplex out = powc(q,onf)* qpoch(q,Q5) * qpoch(q*q*q*q,Q5)/ (qpoch(q*q,Q5)*qpoch(q*q*q,Q5)); return out; } __device__ hipComplex flat(hipComplex m) { float ua = sqrtf(m.r*m.r + m.i*m.i); hipComplex out(m.r/ua,m.i/ua); return out; } __device__ hipComplex eff(hipComplex z, hipComplex lambda) { return z*z*z*z+ lambda/(z*z*z*z); } __device__ hipComplex thete(float R, hipComplex tau, hipComplex z) { /* note that as I'm not immediately doing this on the unit circle, as the real action is considered to happen on the z-plane, we don't yet need to fret about whether I'm looking at things in terms of tau or in terms of q, next revision */ /* set accumulant to zero */ hipComplex A(0.0,0.0); /* miscellaneous setup */ hipComplex pai(3.14159265353898,0.0); hipComplex ai(0.0,1.0); hipComplex oo(1.0,0.0); hipComplex oot(2.0,0.0); hipComplex nini(9.0,0.0); hipComplex eigh(-18.0,0.0); /* hipComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */ hipComplex frann(1.0,0.0); frann = pai * ai * tau ; hipComplex shenn(1.0,0.0); shenn = oot * ai * z; hipComplex plenn(1.0,0.0); hipComplex enn(1.0,0.0); hipComplex ann(1.0,0.0); hipComplex bnn(1.0,0.0); hipComplex scrunn(1.0,0.0); float ca, cb,cc; int a, b; for(a=-10;a<10;a++) { ann.r = a; for(b=-10;b<10;b++) { bnn.r = b; if(((a+b)%2)==0) { scrunn.r = a*a + b*b; A = A + expc(frann* scrunn) * expc(shenn* (ann+bnn)); } else { ca = 5.0 + a*a + b*b; cb = 2*(a * cos(R)- b * sin(R)); cc = 4*(b * cos(R)+a*sin(R)); scrunn.r = ca + cb + cc; A = A + expc(frann*scrunn)*expc(shenn*(ann+bnn)); } } } return A; } __device__ hipComplex thetta(hipComplex tau, hipComplex z) { /* note that as I'm not immediately doing this on the unit circle, as the real action is considered to happen on the z-plane, we don't yet need to fret about whether I'm looking at things in terms of tau or in terms of q, next revision */ /* set accumulant to zero */ hipComplex A(0.0,0.0); /* miscellaneous setup */ hipComplex pai(3.14159265353898,0.0); hipComplex ai(0.0,1.0); hipComplex oo(1.0,0.0); hipComplex oot(2.0,0.0); hipComplex nini(9.0,0.0); hipComplex eigh(-18.0,0.0); /* hipComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */ hipComplex frann(1.0,0.0); frann = pai * ai * tau ; hipComplex shenn(1.0,0.0); shenn = oot * ai * z; hipComplex plenn(1.0,0.0); hipComplex enn(1.0,0.0); int n; for(n=-10;n<10;n++) { enn.r = n; plenn = enn * enn; /* this get the hipComplex out of the event loop */ A = A + expc(frann* plenn) * expc(shenn* enn); } return A; } __device__ hipComplex mitlef(hipComplex z,hipComplex c) { hipComplex out(0.0,0.0); hipComplex Z(1.0,0.0); hipComplex frove(0.0,0.0); int v; for(v=0;v<20;v++) { frove.r = tgammaf(c.r*v+c.i); out = out + Z/frove; Z = Z * z; } return out; } __device__ hipComplex helva(hipComplex z) { hipComplex out(j0f(z.r),j1f(z.i)); return out; } __device__ hipComplex hilva(hipComplex z) { hipComplex out(j1f(z.r),j0f(z.i)); return out; } __device__ hipComplex hinva(hipComplex z) { hipComplex out(j1f(z.r),j1f(z.i)); return out; } __device__ hipComplex henga(hipComplex z) { hipComplex out(acoshf(z.r),asinhf(z.i)); return out; } __device__ hipComplex holva(hipComplex z) { hipComplex out(y0f(z.r),y1f(z.i)); return out; } __device__ hipComplex arago(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * hinva(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ hipComplex irigo(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * holva(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ hipComplex urigo(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * powc(hilva(q*z),helva(q*z)) + qoo*qoo/(q*q)); } return out; } __device__ unsigned char clip(int n) { return n > 255 ? 255 : (n < 0 ? 0 : n); } __global__ void distanceKernel(uchar4 *d_out, int w, int h, int2 pos) { const int c = blockIdx.x*blockDim.x + threadIdx.x; const int r= blockIdx.y*blockDim.y + threadIdx.y; const int i = c + r*w; // 1D indexing float pi = 3.1415926535898; hipComplex ip(pi,0.0); const float scale = 10; float fx = -scale * (float)(DIM/2 - c)/(DIM/2); float fy = scale * (float)(DIM/2 - r)/(DIM/2); hipComplex effx(fx,0.0); hipComplex effy(fy,0.0); float LA = -scale * (float)(DIM/2 - pos.x)/(DIM/2); float LB = scale * (float)(DIM/2 - pos.y)/(DIM/2); hipComplex mouse(LA,LB); hipComplex moux(LA,0.0); hipComplex mouy(0.0,LB); hipComplex q(fx,fy); /* hipComplex tik(sin(ticks/40.0f),0.0);*/ /* hipComplex uon(cosf(-2*pi*ticks/16384.0),sinf(-2*pi*ticks/16384.0)); hipComplex aon(cosf(2.6457513110645912*2*pi*ticks/1024),sinf(2.645751311064591*2*pi*ticks/1024)); hipComplex eon(cosf(-2.6457513110645912*2*pi*ticks/1024.0),sinf(2.645751311064591*2*pi*ticks/1024.0));*/ hipComplex fixon(.029348,.828934); hipComplex faxon(.029348,-.828934); hipComplex unity(1.0,0.0); hipComplex ai(0.0,1.0); hipComplex aon = expc(ai*moux); hipComplex uon= expc(mouy); hipComplex flurn(0.0,0.0); hipComplex accume(0.0,0.0); hipComplex eccume(0.0,0.0); hipComplex rhun(1.02871376821872462237195122725097462534904479,0.0); hipComplex cue = q; hipComplex lam(0.73736887807831963, -0.67549029426152396); hipComplex due(3.0,0.0); hipComplex tir(2.0,0.0); hipComplex selga(3.5,0.0); hipComplex vro(-1.0,0.0); hipComplex tle(1.0,0.0); hipComplex sle(4.0,0.0); hipComplex cherra(0.62348980185873359, 0.7818314824680298); hipComplex lerra = cherra*cherra; hipComplex ferra = lerra * cherra; hipComplex terra = ferra * cherra; hipComplex zerra = terra * cherra; hipComplex nerra = zerra * cherra; hipComplex vlarv(1/3.0,0.0); hipComplex sugna(0.70710678118654757, 0.70710678118654746); hipComplex regna(0.99966573338968745, 0.025853848581176047); hipComplex spa(sqrtf(2.0),0.0); hipComplex spb(sqrtf(3.0),0.0); hipComplex spc(sqrtf(4.0),0.0); hipComplex spd(sqrtf(5.0),0.0); hipComplex mrun(1/2.0,0.0); hipComplex gloon (4.0,0.0); hipComplex plenod(-.01,0.0); hipComplex nue = cue; hipComplex bor(-10.0,0.0); hipComplex nat(0.0,-10.0); hipComplex rhus(1.0,0.0); hipComplex D(0.739085133215160641655312087674,0.0); /* if ((c >= w) || (r >= h)) return; // Check if within image bounds const int i = c + r*w; // 1D indexing const int dist = sqrtf((c - pos.x)*(c - pos.x) + (r - pos.y)*(r - pos.y)); const unsigned char intensity = clip(255 - dist);*/ // theta function varying on constant // cue =thess(cue,fixon*mouse); int v=1; int axa=-10; /*while((v<100)&&norg(cue)<2.0) { cue = cue*(cue-mouy)*(cue-moux) -cue * q; v++; }*/ for(v=0;v<20;v++) { cue = cue - conj(q*hilva(conj(cue)+aon*fixon)*uon)/ conj(hilva(conj(cue)+uon*fixon)*aon); /*accume = accume + urigo(powc(cue * aon - conj(cue*uon),aon),uon*fixon);*/ } double tha; tha = ((atan2(cue.i,cue.r) - pi)/(2.0*pi)); d_out[i].x = (unsigned char) (255.0*pow(sin(pi*tha),2)); d_out[i].y = (unsigned char) (255.0*pow(sin(pi*tha+pi/3),2)); d_out[i].z = (unsigned char) (255.0*pow(sin(pi*tha+2*pi/3),2)); d_out[i].w = 255; } void kernelLauncher(uchar4 *d_out, int w, int h, int2 pos) { const dim3 blockSize(TX, TY); const dim3 gridSize = dim3((w + TX - 1)/TX, (h + TY - 1)/TY); hipLaunchKernelGGL(( distanceKernel), dim3(gridSize), dim3(blockSize), 0, 0, d_out, w, h, pos); } /*for(v=1;v<5;v++) { cue = cue - cue * (expc(unity-cue/moux)+expc(cue-unity/mouy))/((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy)); accume = accume + ((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy)); } cue = accume;*/ /*cue = ramchi(moeb(unity,uon*fixon,q))*rampsi(moeb(unity,uon*fixon,q)); rhus = ramchi(uon/moeb(unity,uon*faxon,unity/q))*ramphi(uon/moeb(unity,uon*faxon,unity/q)); cue = rhus+cue; cue = cosc(unity/(unity-uon*cue))*rampsi(moeb(unity,uon*fixon,q));*/ /*for(v=0;v<60;v++){ cue = moeb(aon,fixon,cue) - aon/((expc(uon*cue-sins(cue))-cue)/((aon+cosc(cue)) * expc(uon*cue-sins(cue))-aon)); accume = accume *(unity - (expc(aon*moeb(uon,faxon,cue))-sins(moeb(aon,fixon,cue))-cue)); } cue = accume;*/ /* One for (x+d)/cos(d) -cos(x)/d Tungilipa D = cos(D) cos(sqrt(x*D))/D -1 = 0.0 The other for cos(x)-x Eripgrunna */
155e3f6b087cb89c00c6eda1e57ba87eb6a0c56f.cu
#include "kernel.h" #define TX 32 #define TY 32 #define DIM 2100 struct cuComplex { float r; float i; __device__ cuComplex( float a, float b ) : r(a), i(b) {} __device__ float magnitude2( void ) { return r * r + i * i; } __device__ cuComplex operator*(const cuComplex& a) { return cuComplex(r*a.r - i*a.i, i*a.r + r*a.i); } __device__ cuComplex operator-(const cuComplex& a) { return cuComplex(r-a.r, i-a.i); } __device__ cuComplex operator+(const cuComplex& a) { return cuComplex(r+a.r, i+a.i); } __device__ cuComplex operator/(const cuComplex& a) { return cuComplex((r*a.r + i*a.i)/(a.r*a.r + a.i*a.i), (i*a.r - r*a.i)/(a.r*a.r + a.i*a.i)); } }; __device__ cuComplex conj(cuComplex m) { cuComplex out(m.r,-m.i); return out; } __device__ cuComplex nor(cuComplex m) { cuComplex out(m.r*m.r+m.i*m.i,0.0); return out; } __device__ float norg(cuComplex m) { return sqrtf(m.r*m.r+m.i*m.i); } __device__ cuComplex qpoch(cuComplex a, cuComplex q) { cuComplex out(1.0,0.0); cuComplex unity(1.0,0.0); int i = 0; cuComplex Q = q; if(q.magnitude2()>1.0) { return cuComplex(0.0,0.0); } // We want to formally match the definition of a q-pochhammer symbol. for(i=1;i<80;i++) { out = out * (unity - a*Q); Q = q * Q; } return out; } __device__ cuComplex qp(cuComplex a, cuComplex q, int n) { cuComplex out(1.0,0.0); cuComplex unity(1.0,0.0); int i = 0; cuComplex Q = q; if(q.magnitude2()>1.0) { return cuComplex(0.0,0.0); } // We want to formally match the definition of a q-pochhammer symbol. for(i=1;i<n;i++) { out = out * (unity - a*Q); Q = q * Q; } return out; } __device__ cuComplex ramphi(cuComplex q) { cuComplex out(1.0,0.0); cuComplex mone(-1.0,0.0); cuComplex mq = mone*q; return qpoch(mq,mq)/qpoch(q,mq); } __device__ cuComplex rampsi(cuComplex q) { cuComplex out(1.0,0.0); cuComplex mone(-1.0,0.0); cuComplex mq = mone*q; return qpoch(mq,q)*qpoch(q*q,q*q); } __device__ cuComplex ramchi(cuComplex q) { cuComplex out(1.0,0.0); cuComplex mone(-1.0,0.0); cuComplex mq = mone*q; return qpoch(mq,q*q); } __device__ cuComplex ramf(cuComplex a, cuComplex b) { cuComplex out(1.0,0.0); cuComplex mone(-1.0,0.0); cuComplex ma = mone*a; cuComplex mb = mone*b; return qpoch(ma,a*b)*qpoch(mb,a*b)*qpoch(a*b,a*b); } // complex exponential __device__ cuComplex expc(cuComplex m) { cuComplex out(expf(m.r) * cosf(m.i),expf(m.r) * sinf(m.i)); return out; } __device__ cuComplex powc(cuComplex ag, cuComplex bg) { cuComplex out(0.0,0.0); cuComplex mesp(0.0,0.0); cuComplex frim(0.0,0.0); double radiu, thet; /* get the proper polar form of the complex number */ radiu = sqrtf(ag.r*ag.r + ag.i*ag.i); thet = atan2f(ag.i,ag.r); /* mesp gives R^(c+di) */ mesp.r = powf(radiu,bg.r)*cosf(bg.i*logf(radiu)); mesp.i = powf(radiu,bg.r)*sinf(bg.i*logf(radiu)); /* frim gives e^(i theta (c+di)) */ /* now since we already have the machinery for performing complex exponentiation (just exp), we can just call that here */ frim.r = -1.0 * bg.i * thet; frim.i = bg.r * thet; frim = expc(frim); out = mesp*frim; return out; } // cosine (nothing algorithmically clean) __device__ cuComplex cosc(cuComplex m) { cuComplex ai(0.0,1.0); cuComplex ot(0.5,0.0); cuComplex mone(-1.0,0.0); cuComplex out = ot*(expc(m*ai) + expc(mone*m*ai)); return out; } __device__ cuComplex sins(cuComplex m) { cuComplex ai(0.0,1.0); cuComplex ot(0.0,0.5); cuComplex mone(-1.0,0.0); cuComplex out = ot*(expc(m*ai) - expc(mone*m*ai)); return out; } __device__ cuComplex tans(cuComplex m) { return sins(m)/cosc(m); } __device__ cuComplex moeb(cuComplex t, cuComplex a, cuComplex z) { cuComplex out(0.0,0.0); cuComplex ai(0.0,1.0); cuComplex unity(1.0,0.0); out = expc(ai*t) * (z-a)/(unity-conj(a)*z); return out; } __device__ cuComplex bnewt(cuComplex z) { cuComplex three(3.0,0.0); cuComplex unity(1.0,0.0); cuComplex out(0.0,0.0); cuComplex Z =z; cuComplex L(0.0,0.0); cuComplex R(0.62348980185873359,0.7818314824680298); cuComplex v(0.62348980185873359,0.7818314824680298); int i; for(i=0;i<100;i++) { L = sins(expc(Z)-cosc(Z))-Z; out = out + v*L; v = R * v; Z = Z - L/((expc(Z)+sins(Z))*cosc(expc(Z)-cosc(Z))-unity); } return out; } __device__ cuComplex they3(cuComplex z, cuComplex q) { int u; cuComplex out(0.0,0.0); cuComplex enn(-20.0,0.0); cuComplex onn(1.0,0.0); cuComplex dui(0.0,1.0); for(u=-20;u<20;u++) { out = out + powc(q,enn*enn)*expc(dui*enn*z); enn = enn + onn; } return out; } __device__ cuComplex wahi(cuComplex z) { int u; cuComplex un(1.0,0.0); cuComplex ne(1.0,0.0); cuComplex out(0.0,0.0); for(u=1;u<40;u++) { out = out + powc(z/ne,ne); ne = ne + un; } out = out + un; return out; } __device__ cuComplex dwahi(cuComplex z) { int u; cuComplex un(1.0,0.0); cuComplex ne(1.0,0.0); cuComplex out(0.0,0.0); for(u=1;u<40;u++) { out = out + powc(z/ne,ne-un); ne = ne + un; } return out; } __device__ cuComplex they3p(cuComplex z, cuComplex q) { int u; cuComplex out(0.0,0.0); cuComplex enn(-20.0,0.0); cuComplex onn(1.0,0.0); cuComplex dui(0.0,1.0); for(u=-20;u<20;u++) { out = out + (enn*enn)*powc(q,enn*enn-onn)*expc(dui*enn*z); enn = enn + onn; } return out; } __device__ cuComplex h3ey3p(cuComplex z, cuComplex q) { int u; cuComplex out(0.0,0.0); cuComplex aut(0.0,0.0); cuComplex enn(-20.0,0.0); cuComplex onn(1.0,0.0); cuComplex dui(0.0,1.0); cuComplex vel(0.0,0.0); cuComplex rav(0.0,0.0); for(u=-40;u<40;u++) { vel = expc(dui*enn*z); rav = powc(q,enn*enn); aut = aut + (enn*enn)*rav/q*vel; out = out + rav*vel; enn = enn + onn; } return out/aut; } __device__ cuComplex thess(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ cuComplex the1(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); cuComplex rt(0.25,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return tw*out*powc(q,rt)*sins(z); } __device__ cuComplex the2(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); cuComplex rt(0.25,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return tw*out*powc(q,rt)*cosc(z); } __device__ cuComplex the3(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ cuComplex the4(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } /* routine to generate q-integers */ __device__ cuComplex qin(cuComplex a, cuComplex q) { cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); out = (unity - powc(q, a))/(unity-q); return out; } /* generating function for n^2 */ __device__ cuComplex geffa(cuComplex z, cuComplex q) { cuComplex out(0.0,0.0); cuComplex unity(1.0,0.0); cuComplex wu(0.0,0.0); cuComplex Z=unity; int v; for(v=0;v<20;v++) { out = out + qin(wu*wu,q)* Z; wu = wu + unity; Z = z * Z; } return out; } __device__ cuComplex thratd(cuComplex z, cuComplex q) { int n; cuComplex fau(4.0,0.0); cuComplex too(2.0,0.0); cuComplex unity(1.0,0.0); cuComplex ennn(1.0,0.0); cuComplex ni(-1.0,0.0); cuComplex noo(-1.0,0.0); cuComplex out(0.0,0.0); cuComplex loo = q; cuComplex qoo =q*q; for(n=0;n<80;n++) { out = out + noo*(loo/(unity-qoo))*sins(too*ennn*z); qoo = qoo * q*q; loo = loo * q; ennn = ennn +unity; noo = ni * noo; } return out*fau; } __device__ cuComplex thess4(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<20;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ cuComplex thesk(cuComplex z, cuComplex q, cuComplex r) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); cuComplex roo(1.0,0.0); for(v=0;v<20;v++) { qoo = qoo * q * q; roo = roo * r * r ; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + roo*roo/(r*r)); } return out; } __device__ cuComplex thass(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<20;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * sins(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ cuComplex rogers( cuComplex q) { cuComplex onf(0.2,0.0); cuComplex Q5 = q*q*q*q*q; cuComplex out = powc(q,onf)* qpoch(q,Q5) * qpoch(q*q*q*q,Q5)/ (qpoch(q*q,Q5)*qpoch(q*q*q,Q5)); return out; } __device__ cuComplex flat(cuComplex m) { float ua = sqrtf(m.r*m.r + m.i*m.i); cuComplex out(m.r/ua,m.i/ua); return out; } __device__ cuComplex eff(cuComplex z, cuComplex lambda) { return z*z*z*z+ lambda/(z*z*z*z); } __device__ cuComplex thete(float R, cuComplex tau, cuComplex z) { /* note that as I'm not immediately doing this on the unit circle, as the real action is considered to happen on the z-plane, we don't yet need to fret about whether I'm looking at things in terms of tau or in terms of q, next revision */ /* set accumulant to zero */ cuComplex A(0.0,0.0); /* miscellaneous setup */ cuComplex pai(3.14159265353898,0.0); cuComplex ai(0.0,1.0); cuComplex oo(1.0,0.0); cuComplex oot(2.0,0.0); cuComplex nini(9.0,0.0); cuComplex eigh(-18.0,0.0); /* cuComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */ cuComplex frann(1.0,0.0); frann = pai * ai * tau ; cuComplex shenn(1.0,0.0); shenn = oot * ai * z; cuComplex plenn(1.0,0.0); cuComplex enn(1.0,0.0); cuComplex ann(1.0,0.0); cuComplex bnn(1.0,0.0); cuComplex scrunn(1.0,0.0); float ca, cb,cc; int a, b; for(a=-10;a<10;a++) { ann.r = a; for(b=-10;b<10;b++) { bnn.r = b; if(((a+b)%2)==0) { scrunn.r = a*a + b*b; A = A + expc(frann* scrunn) * expc(shenn* (ann+bnn)); } else { ca = 5.0 + a*a + b*b; cb = 2*(a * cos(R)- b * sin(R)); cc = 4*(b * cos(R)+a*sin(R)); scrunn.r = ca + cb + cc; A = A + expc(frann*scrunn)*expc(shenn*(ann+bnn)); } } } return A; } __device__ cuComplex thetta(cuComplex tau, cuComplex z) { /* note that as I'm not immediately doing this on the unit circle, as the real action is considered to happen on the z-plane, we don't yet need to fret about whether I'm looking at things in terms of tau or in terms of q, next revision */ /* set accumulant to zero */ cuComplex A(0.0,0.0); /* miscellaneous setup */ cuComplex pai(3.14159265353898,0.0); cuComplex ai(0.0,1.0); cuComplex oo(1.0,0.0); cuComplex oot(2.0,0.0); cuComplex nini(9.0,0.0); cuComplex eigh(-18.0,0.0); /* cuComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */ cuComplex frann(1.0,0.0); frann = pai * ai * tau ; cuComplex shenn(1.0,0.0); shenn = oot * ai * z; cuComplex plenn(1.0,0.0); cuComplex enn(1.0,0.0); int n; for(n=-10;n<10;n++) { enn.r = n; plenn = enn * enn; /* this get the cuComplex out of the event loop */ A = A + expc(frann* plenn) * expc(shenn* enn); } return A; } __device__ cuComplex mitlef(cuComplex z,cuComplex c) { cuComplex out(0.0,0.0); cuComplex Z(1.0,0.0); cuComplex frove(0.0,0.0); int v; for(v=0;v<20;v++) { frove.r = tgammaf(c.r*v+c.i); out = out + Z/frove; Z = Z * z; } return out; } __device__ cuComplex helva(cuComplex z) { cuComplex out(j0f(z.r),j1f(z.i)); return out; } __device__ cuComplex hilva(cuComplex z) { cuComplex out(j1f(z.r),j0f(z.i)); return out; } __device__ cuComplex hinva(cuComplex z) { cuComplex out(j1f(z.r),j1f(z.i)); return out; } __device__ cuComplex henga(cuComplex z) { cuComplex out(acoshf(z.r),asinhf(z.i)); return out; } __device__ cuComplex holva(cuComplex z) { cuComplex out(y0f(z.r),y1f(z.i)); return out; } __device__ cuComplex arago(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * hinva(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ cuComplex irigo(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * holva(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ cuComplex urigo(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * powc(hilva(q*z),helva(q*z)) + qoo*qoo/(q*q)); } return out; } __device__ unsigned char clip(int n) { return n > 255 ? 255 : (n < 0 ? 0 : n); } __global__ void distanceKernel(uchar4 *d_out, int w, int h, int2 pos) { const int c = blockIdx.x*blockDim.x + threadIdx.x; const int r= blockIdx.y*blockDim.y + threadIdx.y; const int i = c + r*w; // 1D indexing float pi = 3.1415926535898; cuComplex ip(pi,0.0); const float scale = 10; float fx = -scale * (float)(DIM/2 - c)/(DIM/2); float fy = scale * (float)(DIM/2 - r)/(DIM/2); cuComplex effx(fx,0.0); cuComplex effy(fy,0.0); float LA = -scale * (float)(DIM/2 - pos.x)/(DIM/2); float LB = scale * (float)(DIM/2 - pos.y)/(DIM/2); cuComplex mouse(LA,LB); cuComplex moux(LA,0.0); cuComplex mouy(0.0,LB); cuComplex q(fx,fy); /* cuComplex tik(sin(ticks/40.0f),0.0);*/ /* cuComplex uon(cosf(-2*pi*ticks/16384.0),sinf(-2*pi*ticks/16384.0)); cuComplex aon(cosf(2.6457513110645912*2*pi*ticks/1024),sinf(2.645751311064591*2*pi*ticks/1024)); cuComplex eon(cosf(-2.6457513110645912*2*pi*ticks/1024.0),sinf(2.645751311064591*2*pi*ticks/1024.0));*/ cuComplex fixon(.029348,.828934); cuComplex faxon(.029348,-.828934); cuComplex unity(1.0,0.0); cuComplex ai(0.0,1.0); cuComplex aon = expc(ai*moux); cuComplex uon= expc(mouy); cuComplex flurn(0.0,0.0); cuComplex accume(0.0,0.0); cuComplex eccume(0.0,0.0); cuComplex rhun(1.02871376821872462237195122725097462534904479,0.0); cuComplex cue = q; cuComplex lam(0.73736887807831963, -0.67549029426152396); cuComplex due(3.0,0.0); cuComplex tir(2.0,0.0); cuComplex selga(3.5,0.0); cuComplex vro(-1.0,0.0); cuComplex tle(1.0,0.0); cuComplex sle(4.0,0.0); cuComplex cherra(0.62348980185873359, 0.7818314824680298); cuComplex lerra = cherra*cherra; cuComplex ferra = lerra * cherra; cuComplex terra = ferra * cherra; cuComplex zerra = terra * cherra; cuComplex nerra = zerra * cherra; cuComplex vlarv(1/3.0,0.0); cuComplex sugna(0.70710678118654757, 0.70710678118654746); cuComplex regna(0.99966573338968745, 0.025853848581176047); cuComplex spa(sqrtf(2.0),0.0); cuComplex spb(sqrtf(3.0),0.0); cuComplex spc(sqrtf(4.0),0.0); cuComplex spd(sqrtf(5.0),0.0); cuComplex mrun(1/2.0,0.0); cuComplex gloon (4.0,0.0); cuComplex plenod(-.01,0.0); cuComplex nue = cue; cuComplex bor(-10.0,0.0); cuComplex nat(0.0,-10.0); cuComplex rhus(1.0,0.0); cuComplex D(0.739085133215160641655312087674,0.0); /* if ((c >= w) || (r >= h)) return; // Check if within image bounds const int i = c + r*w; // 1D indexing const int dist = sqrtf((c - pos.x)*(c - pos.x) + (r - pos.y)*(r - pos.y)); const unsigned char intensity = clip(255 - dist);*/ // theta function varying on constant // cue =thess(cue,fixon*mouse); int v=1; int axa=-10; /*while((v<100)&&norg(cue)<2.0) { cue = cue*(cue-mouy)*(cue-moux) -cue * q; v++; }*/ for(v=0;v<20;v++) { cue = cue - conj(q*hilva(conj(cue)+aon*fixon)*uon)/ conj(hilva(conj(cue)+uon*fixon)*aon); /*accume = accume + urigo(powc(cue * aon - conj(cue*uon),aon),uon*fixon);*/ } double tha; tha = ((atan2(cue.i,cue.r) - pi)/(2.0*pi)); d_out[i].x = (unsigned char) (255.0*pow(sin(pi*tha),2)); d_out[i].y = (unsigned char) (255.0*pow(sin(pi*tha+pi/3),2)); d_out[i].z = (unsigned char) (255.0*pow(sin(pi*tha+2*pi/3),2)); d_out[i].w = 255; } void kernelLauncher(uchar4 *d_out, int w, int h, int2 pos) { const dim3 blockSize(TX, TY); const dim3 gridSize = dim3((w + TX - 1)/TX, (h + TY - 1)/TY); distanceKernel<<<gridSize, blockSize>>>(d_out, w, h, pos); } /*for(v=1;v<5;v++) { cue = cue - cue * (expc(unity-cue/moux)+expc(cue-unity/mouy))/((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy)); accume = accume + ((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy)); } cue = accume;*/ /*cue = ramchi(moeb(unity,uon*fixon,q))*rampsi(moeb(unity,uon*fixon,q)); rhus = ramchi(uon/moeb(unity,uon*faxon,unity/q))*ramphi(uon/moeb(unity,uon*faxon,unity/q)); cue = rhus+cue; cue = cosc(unity/(unity-uon*cue))*rampsi(moeb(unity,uon*fixon,q));*/ /*for(v=0;v<60;v++){ cue = moeb(aon,fixon,cue) - aon/((expc(uon*cue-sins(cue))-cue)/((aon+cosc(cue)) * expc(uon*cue-sins(cue))-aon)); accume = accume *(unity - (expc(aon*moeb(uon,faxon,cue))-sins(moeb(aon,fixon,cue))-cue)); } cue = accume;*/ /* One for (x+d)/cos(d) -cos(x)/d Tungilipa D = cos(D) cos(sqrt(x*D))/D -1 = 0.0 The other for cos(x)-x Eripgrunna */
70888c6c7eaa50413ec8cb5102d0c647d46f14f6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <cstdlib> using namespace std; #define BLOCK_SIZE 1024 #define INF 1e9 #define CEIL(a, b) ((a-1)/b +1) __global__ void edge_parallel_bfs(int *d, int *F, int *C, int n, int m, int *depth) { int id = threadIdx.x; for(int i = id; i < n; i+=blockDim.x) { d[i] = INF; } __shared__ int current_depth; __shared__ int done; if(id == 0) { current_depth = 0; done = false; d[0] = 0; } __syncthreads(); while(!done) { if(id == 0) done = true; __syncthreads(); for(int i = id; i < 2*m; i += blockDim.x) { if(d[F[i]] == current_depth) { //int v = F[i]; int u = C[i]; if(d[u] == (int)1e9) { done = false; d[u] = current_depth+1; } } } if(id == 0 && done == 0) { current_depth++; } __syncthreads(); } if(id == 0) *depth = current_depth; } int main(int argc, char *argv[]) { if(argc < 3) { cout<<"Expecting a file as command line arguement..."; return 0; } freopen(argv[1], "r", stdin); int n,m; cin>>n>>m; int *h_R = (int*) malloc((n+1)*sizeof(int)); for(int i = 0; i <= n; i++) { cin>>h_R[i]; } int *h_F = (int*) malloc(2*m*sizeof(int)); for(int i = 0; i < n; i++) { for(int j = h_R[i]; j < h_R[i+1]; j++) { h_F[j] = i; } } int *h_C = (int*) malloc(2*m*sizeof(int)); for(int i = 0; i < 2*m; i++) { cin>>h_C[i]; } int *d_F, *d_C, *d_d, *d_depth; hipMalloc((void**) &d_F, 2*m*sizeof(int)); hipMalloc((void**) &d_C, 2*m*sizeof(int)); hipMalloc((void**) &d_d, n*sizeof(int)); hipMalloc((void**) &d_depth, sizeof(int)); hipMemcpy(d_F, h_F, 2*m*sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_C, h_C, 2*m*sizeof(int), hipMemcpyHostToDevice); hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start); hipLaunchKernelGGL(( edge_parallel_bfs), dim3(1), dim3(BLOCK_SIZE), 0, 0, d_d, d_F, d_C, n, m, d_depth); hipEventRecord(stop); hipEventSynchronize(stop); float milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); cout<<"Compute time in GPU: "<<milliseconds<<"ms"<<endl; int *h_d = (int*) malloc(n*sizeof(int)); int *h_depth = (int*) malloc(sizeof(int)); hipMemcpy(h_d, d_d, n*sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(h_depth, d_depth, sizeof(int), hipMemcpyDeviceToHost); int *h_check_d = (int*)malloc(n*sizeof(int)); freopen(argv[2], "r", stdin); for(int i = 0; i < n; i++) { cin>>h_check_d[i]; } bool flag = true; int count = 0; const int errcount = 20; for(int i = 0; i < n; i++) { if(h_d[i] != h_check_d[i]) { flag = false; if(count < errcount) { cout<<i<<" : "<<h_d[i]<<" "<<h_check_d[i]<<endl; } count++; } } if(flag) { cout<<"Solution is correct!"<<endl; cout<<"The depth of the given graph from node 0 is "<<(*h_depth)<<endl; } else { cout<<"Solution is incorrect!"<<endl; cout<<count<<" testcases failed."<<endl; } return 0; }
70888c6c7eaa50413ec8cb5102d0c647d46f14f6.cu
#include <iostream> #include <cstdlib> using namespace std; #define BLOCK_SIZE 1024 #define INF 1e9 #define CEIL(a, b) ((a-1)/b +1) __global__ void edge_parallel_bfs(int *d, int *F, int *C, int n, int m, int *depth) { int id = threadIdx.x; for(int i = id; i < n; i+=blockDim.x) { d[i] = INF; } __shared__ int current_depth; __shared__ int done; if(id == 0) { current_depth = 0; done = false; d[0] = 0; } __syncthreads(); while(!done) { if(id == 0) done = true; __syncthreads(); for(int i = id; i < 2*m; i += blockDim.x) { if(d[F[i]] == current_depth) { //int v = F[i]; int u = C[i]; if(d[u] == (int)1e9) { done = false; d[u] = current_depth+1; } } } if(id == 0 && done == 0) { current_depth++; } __syncthreads(); } if(id == 0) *depth = current_depth; } int main(int argc, char *argv[]) { if(argc < 3) { cout<<"Expecting a file as command line arguement..."; return 0; } freopen(argv[1], "r", stdin); int n,m; cin>>n>>m; int *h_R = (int*) malloc((n+1)*sizeof(int)); for(int i = 0; i <= n; i++) { cin>>h_R[i]; } int *h_F = (int*) malloc(2*m*sizeof(int)); for(int i = 0; i < n; i++) { for(int j = h_R[i]; j < h_R[i+1]; j++) { h_F[j] = i; } } int *h_C = (int*) malloc(2*m*sizeof(int)); for(int i = 0; i < 2*m; i++) { cin>>h_C[i]; } int *d_F, *d_C, *d_d, *d_depth; cudaMalloc((void**) &d_F, 2*m*sizeof(int)); cudaMalloc((void**) &d_C, 2*m*sizeof(int)); cudaMalloc((void**) &d_d, n*sizeof(int)); cudaMalloc((void**) &d_depth, sizeof(int)); cudaMemcpy(d_F, h_F, 2*m*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_C, h_C, 2*m*sizeof(int), cudaMemcpyHostToDevice); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); edge_parallel_bfs<<<1, BLOCK_SIZE>>>(d_d, d_F, d_C, n, m, d_depth); cudaEventRecord(stop); cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); cout<<"Compute time in GPU: "<<milliseconds<<"ms"<<endl; int *h_d = (int*) malloc(n*sizeof(int)); int *h_depth = (int*) malloc(sizeof(int)); cudaMemcpy(h_d, d_d, n*sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(h_depth, d_depth, sizeof(int), cudaMemcpyDeviceToHost); int *h_check_d = (int*)malloc(n*sizeof(int)); freopen(argv[2], "r", stdin); for(int i = 0; i < n; i++) { cin>>h_check_d[i]; } bool flag = true; int count = 0; const int errcount = 20; for(int i = 0; i < n; i++) { if(h_d[i] != h_check_d[i]) { flag = false; if(count < errcount) { cout<<i<<" : "<<h_d[i]<<" "<<h_check_d[i]<<endl; } count++; } } if(flag) { cout<<"Solution is correct!"<<endl; cout<<"The depth of the given graph from node 0 is "<<(*h_depth)<<endl; } else { cout<<"Solution is incorrect!"<<endl; cout<<count<<" testcases failed."<<endl; } return 0; }
7d9a79ab96378b5ff9d436ad87948566437e98bb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 2014. All rights reserved. * * CUDA Kernel Device code * Rowan Hughes */ #define SDATA(index) sdata[index] #define SMEM(X, Y) sdata[(Y)*bw+(X)] extern "C" __global__ void copyReductionKernel(float4* g_idata, float* g_odata, int chanels, int sizeData, int offset) { // shared memory // the size is determined by the host application extern __shared__ float sdata[]; const int dId = (blockDim.x * blockIdx.x + threadIdx.x); if(dId >= sizeData) return; const unsigned int tIdC = chanels * threadIdx.x; const int pixelIdIn = offset + dId; const int tIdMax = sizeData - blockDim.x*blockIdx.x; // load data from global to shared memory float4 ldata = g_idata[pixelIdIn]; //[thetaDot1, thetaDot2, ttc, first] if(ldata.z >= 0 && ldata.x != 0 ) { // it is a pixel belonging to an object SDATA(tIdC ) = ldata.x; // thetaDot1 SDATA(tIdC+1) = ldata.y; // thetaDot2 SDATA(tIdC+2) = ldata.z; // ttc SDATA(tIdC+3) = 1; // go first if taking thetaDot1 SDATA(tIdC+4) = 1; // go first if taking thetaDot2 if(ldata.z < 3 && ldata.w <= 0) // ttc < 3s and giving a way { if(abs(ldata.x) < abs(ldata.y)) { SDATA(tIdC+3) = -1; // go second if taking thetaDot1 => slow down } else { SDATA(tIdC+4) = -1; // go second if taking thetaDot2 => slow down } } } else { // it is a background pixel SDATA(tIdC+2) = -1; } __syncthreads(); // perform reduction for (unsigned int i=blockDim.x*0.5; i>0; i>>=1) { if(threadIdx.x < i && (threadIdx.x + i < tIdMax)) { int ic = chanels*i+tIdC; if(SDATA(ic+2) >= 0) // if ttc2 >= 0 { if(SDATA(tIdC+2) >= 0) // if ttc1 >= 0 { SDATA(tIdC ) = min(SDATA(tIdC ), SDATA(ic )); SDATA(tIdC+1) = max(SDATA(tIdC+1), SDATA(ic+1)); SDATA(tIdC+2) = min(SDATA(tIdC+2), SDATA(ic+2)); SDATA(tIdC+3) = min(SDATA(tIdC+3), SDATA(ic+3)); SDATA(tIdC+4) = min(SDATA(tIdC+4), SDATA(ic+4)); } else { SDATA(tIdC ) = SDATA(ic); SDATA(tIdC+1) = SDATA(ic+1); SDATA(tIdC+2) = SDATA(ic+2); SDATA(tIdC+3) = SDATA(ic+3); SDATA(tIdC+4) = SDATA(ic+4); } } } __syncthreads(); } // write data to global memory if(threadIdx.x==0) { int bc = chanels*blockIdx.x; g_odata[bc] = SDATA(0); g_odata[bc+1] = SDATA(1); g_odata[bc+2] = SDATA(2); g_odata[bc+3] = SDATA(3); g_odata[bc+4] = SDATA(4); } }
7d9a79ab96378b5ff9d436ad87948566437e98bb.cu
/* * Copyright 2014. All rights reserved. * * CUDA Kernel Device code * Rowan Hughes */ #define SDATA(index) sdata[index] #define SMEM(X, Y) sdata[(Y)*bw+(X)] extern "C" __global__ void copyReductionKernel(float4* g_idata, float* g_odata, int chanels, int sizeData, int offset) { // shared memory // the size is determined by the host application extern __shared__ float sdata[]; const int dId = (blockDim.x * blockIdx.x + threadIdx.x); if(dId >= sizeData) return; const unsigned int tIdC = chanels * threadIdx.x; const int pixelIdIn = offset + dId; const int tIdMax = sizeData - blockDim.x*blockIdx.x; // load data from global to shared memory float4 ldata = g_idata[pixelIdIn]; //[thetaDot1, thetaDot2, ttc, first] if(ldata.z >= 0 && ldata.x != 0 ) { // it is a pixel belonging to an object SDATA(tIdC ) = ldata.x; // thetaDot1 SDATA(tIdC+1) = ldata.y; // thetaDot2 SDATA(tIdC+2) = ldata.z; // ttc SDATA(tIdC+3) = 1; // go first if taking thetaDot1 SDATA(tIdC+4) = 1; // go first if taking thetaDot2 if(ldata.z < 3 && ldata.w <= 0) // ttc < 3s and giving a way { if(abs(ldata.x) < abs(ldata.y)) { SDATA(tIdC+3) = -1; // go second if taking thetaDot1 => slow down } else { SDATA(tIdC+4) = -1; // go second if taking thetaDot2 => slow down } } } else { // it is a background pixel SDATA(tIdC+2) = -1; } __syncthreads(); // perform reduction for (unsigned int i=blockDim.x*0.5; i>0; i>>=1) { if(threadIdx.x < i && (threadIdx.x + i < tIdMax)) { int ic = chanels*i+tIdC; if(SDATA(ic+2) >= 0) // if ttc2 >= 0 { if(SDATA(tIdC+2) >= 0) // if ttc1 >= 0 { SDATA(tIdC ) = min(SDATA(tIdC ), SDATA(ic )); SDATA(tIdC+1) = max(SDATA(tIdC+1), SDATA(ic+1)); SDATA(tIdC+2) = min(SDATA(tIdC+2), SDATA(ic+2)); SDATA(tIdC+3) = min(SDATA(tIdC+3), SDATA(ic+3)); SDATA(tIdC+4) = min(SDATA(tIdC+4), SDATA(ic+4)); } else { SDATA(tIdC ) = SDATA(ic); SDATA(tIdC+1) = SDATA(ic+1); SDATA(tIdC+2) = SDATA(ic+2); SDATA(tIdC+3) = SDATA(ic+3); SDATA(tIdC+4) = SDATA(ic+4); } } } __syncthreads(); } // write data to global memory if(threadIdx.x==0) { int bc = chanels*blockIdx.x; g_odata[bc] = SDATA(0); g_odata[bc+1] = SDATA(1); g_odata[bc+2] = SDATA(2); g_odata[bc+3] = SDATA(3); g_odata[bc+4] = SDATA(4); } }
dd74adfbbb776d6b32ffed606027188e0a98022b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* ****************************************************************************** * * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * See the NOTICE file distributed with this work for additional * information regarding copyright ownership. * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author raver119@gmail.com // #include <ops/declarable/helpers/roll.h> #include <helpers/ConstantTadHelper.h> #include <helpers/PointersManager.h> namespace sd { namespace ops { namespace helpers { template <typename T> static void _CUDA_D rollKernelLinearStage1Dev(const void *vx, const Nd4jLong *xShapeInfo, void *vz, const Nd4jLong *zShapeInfo, Nd4jLong fullLength, int actualShift) { auto x = reinterpret_cast<const T*>(vx); auto z = reinterpret_cast<T*>(vz); auto xEws = shape::elementWiseStride(xShapeInfo); auto zEws = shape::elementWiseStride(zShapeInfo); auto xOrder = shape::order(xShapeInfo); auto zOrder = shape::order(zShapeInfo); auto tid = threadIdx.x + blockIdx.x * blockDim.x; if (xEws > 0 && zEws > 0 && xOrder == zOrder) { for (int i = tid; i < actualShift; i += blockDim.x * gridDim.x) { int sourceIndex = fullLength - actualShift + i; auto eA = x[sourceIndex * xEws]; auto eB = x[i * xEws]; z[i * zEws] = eA; z[sourceIndex * zEws] = eB; } } else { for (int i = tid; i < actualShift; i += blockDim.x * gridDim.x) { int sourceIndex = fullLength - actualShift + i; auto xOffsetA = shape::getIndexOffset(i, xShapeInfo); auto xOffsetB = shape::getIndexOffset(sourceIndex, xShapeInfo); auto zOffsetA = shape::getIndexOffset(i, zShapeInfo); auto zOffsetB = shape::getIndexOffset(sourceIndex, zShapeInfo); auto eA = x[xOffsetA]; auto eB = x[xOffsetB]; z[zOffsetA] = eB; z[zOffsetB] = eA; } } } template <typename T> static void _CUDA_G rollKernelLinearStage1(const void *vx, const Nd4jLong *xShapeInfo, void *vz, const Nd4jLong *zShapeInfo, Nd4jLong fullLength, int actualShift) { rollKernelLinearStage1Dev<T>(vx, xShapeInfo, vz, zShapeInfo, fullLength, actualShift); } template <typename T> static void _CUDA_G rollKernelLinearStage2(const void *vx, const Nd4jLong *xShapeInfo, void *vz, const Nd4jLong *zShapeInfo, Nd4jLong fullLength, int actualShift, int shiftCount) { auto x = reinterpret_cast<const T*>(vx); auto z = reinterpret_cast<T*>(vz); auto xEws = shape::elementWiseStride(xShapeInfo); auto zEws = shape::elementWiseStride(zShapeInfo); auto xOrder = shape::order(xShapeInfo); auto zOrder = shape::order(zShapeInfo); auto tid = threadIdx.x + blockIdx.x * blockDim.x; if (xEws > 0 && zEws > 0 && xOrder == zOrder) { for (int count = 1; count < shiftCount; ++count) { for (int i = tid; i < actualShift; i += blockDim.x * gridDim.x) { int destinationIndex = fullLength - (count + 1) * actualShift + i; int sourceIndex = fullLength - count * actualShift + i; auto eA = x[sourceIndex * xEws]; auto eB = x[destinationIndex * xEws]; z[destinationIndex * zEws] = eA; z[sourceIndex * zEws] = eB; } __syncthreads(); } } else { for (int count = 1; count < shiftCount; ++count) { for (int i = tid; i < actualShift; i += blockDim.x * gridDim.x) { int destinationIndex = fullLength - (count + 1) * actualShift + i; int sourceIndex = fullLength - count * actualShift + i; auto xOffsetA = shape::getIndexOffset(destinationIndex, xShapeInfo); auto xOffsetB = shape::getIndexOffset(sourceIndex, xShapeInfo); auto zOffsetA = shape::getIndexOffset(destinationIndex, zShapeInfo); auto zOffsetB = shape::getIndexOffset(sourceIndex, zShapeInfo); auto eA = x[xOffsetA]; auto eB = x[xOffsetB]; z[zOffsetA] = eB; z[zOffsetB] = eA; } __syncthreads(); } } } template <typename T> static void _CUDA_G rollKernelLinearStage3(const void *vx, const Nd4jLong *xShapeInfo, void *vz, const Nd4jLong *zShapeInfo, Nd4jLong fullLength, int actualShift, int remainShift) { auto x = reinterpret_cast<const T*>(vx); auto z = reinterpret_cast<T*>(vz); auto xEws = shape::elementWiseStride(xShapeInfo); auto zEws = shape::elementWiseStride(zShapeInfo); auto xOrder = shape::order(xShapeInfo); auto zOrder = shape::order(zShapeInfo); auto tid = threadIdx.x + blockIdx.x * blockDim.x; if (xEws > 0 && zEws > 0 && xOrder == zOrder) { for (int i = tid ; i < actualShift; i += blockDim.x * gridDim.x) { int remainIdx = i + actualShift; int sourceIndex = remainIdx + remainShift; auto eA = x[sourceIndex * xEws]; auto eB = x[remainIdx * xEws]; z[remainIdx * zEws] = eA; z[sourceIndex * zEws] = eB; } } else { for (int i = tid; i < actualShift; i += blockDim.x * gridDim.x) { int remainIdx = i + actualShift; int sourceIndex = remainIdx + remainShift; auto xOffsetA = shape::getIndexOffset(remainIdx, xShapeInfo); auto xOffsetB = shape::getIndexOffset(sourceIndex, xShapeInfo); auto zOffsetA = shape::getIndexOffset(remainIdx, zShapeInfo); auto zOffsetB = shape::getIndexOffset(sourceIndex, zShapeInfo); auto eA = x[xOffsetA]; auto eB = x[xOffsetB]; z[zOffsetA] = eB; z[zOffsetB] = eA; } } } template <typename T> static void _CUDA_D swapTadsKernel(void *vx, void *vz, const Nd4jLong *zShapeInfo, Nd4jLong tadLength) { auto x = reinterpret_cast<T*>(vx); auto z = reinterpret_cast<T*>(vz); auto zEws = shape::elementWiseStride(zShapeInfo); auto zOrder = shape::order(zShapeInfo); auto tid = threadIdx.x + blockIdx.x * blockDim.x; if (zEws > 0) { for (int e = threadIdx.x; e < tadLength; e += blockDim.x) { auto eA = x[e * zEws]; auto eB = z[e * zEws]; x[e * zEws] = eB; z[e * zEws] = eA; } } else { for (int e = threadIdx.x; e < tadLength; e += blockDim.x) { auto zOffset = shape::getIndexOffset(e, zShapeInfo); auto eA = x[zOffset]; auto eB = z[zOffset]; x[zOffset] = eB; z[zOffset] = eA; } } } template <typename T> static void _CUDA_G rollKernelFullAnyDimensionStage1(const void *vx, const Nd4jLong *xTadShapeInfo, const Nd4jLong *xTadOffsets, void *vz, const Nd4jLong *zTadShapeInfo, const Nd4jLong *zTadOffsets, int numTads, Nd4jLong tadLength, int dim, Nd4jLong sizeAt, int theShift) { auto x = reinterpret_cast<const T *>(vx); auto z = reinterpret_cast<T *>(vz); for (int e = blockIdx.x + theShift; e < sizeAt - theShift; e += gridDim.x) { int sourceIndex = dim * sizeAt + e - theShift; int targetIndex = dim * sizeAt + e; swapTadsKernel<T>(z + xTadOffsets[sourceIndex], z + xTadOffsets[targetIndex], zTadShapeInfo, tadLength); } } template <typename T> static void _CUDA_G rollKernelFullAnyDimensionStage2(void *vx, const Nd4jLong *xTadShapeInfo, const Nd4jLong *xTadOffsets, void *vz, const Nd4jLong *zTadShapeInfo, const Nd4jLong *zTadOffsets, int numTads, Nd4jLong tadLength, int dim, Nd4jLong sizeAt, int theShift) { auto x = reinterpret_cast<const T *>(vx); auto z = reinterpret_cast<T *>(vz); for (int e = blockIdx.x; e < theShift; e += gridDim.x) { int sourceIndex = dim * sizeAt + sizeAt - theShift + e; int targetIndex = dim * sizeAt + e; swapTadsKernel<T>(z + zTadOffsets[sourceIndex], z + zTadOffsets[targetIndex], zTadShapeInfo, tadLength); } } template <typename T> static void rollFunctorFull_(NDArray* input, NDArray* output, std::vector<int> const& shifts, std::vector<int> const& axes, bool inplace){ if (!inplace) output->assign(input); for (size_t i = 0; i < axes.size(); i++) { int axe = axes[i]; if (axe == input->rankOf() - 1) { // last dimension ResultSet listOfTensors = output->allTensorsAlongDimension({axe}); ResultSet listOfOutTensors = output->allTensorsAlongDimension({axe}); int fullLen = listOfTensors.size(); int theShift = shifts[i]; // if (theShift > 0) { // theShift %= fullLen; // } // else { // theShift -= fullLen * (theShift / fullLen - 1); // } for (int k = 0; k < fullLen; k++) { rollFunctorLinear(output->getContext(), listOfTensors.at(k), listOfOutTensors.at(k), theShift, true); } } else { std::vector<int> dims(input->rankOf() - axe - 1); for (int i = 0; i < dims.size(); ++i) dims[i] = axe + 1 + i; auto packZ = ConstantTadHelper::getInstance().tadForDimensions(output->shapeInfo(), dims); int numTads = packZ.numberOfTads(); int sizeAt = input->sizeAt(axe); auto tadLength = shape::length(packZ.primaryShapeInfo()); int theShift = shifts[i]; // if (theShift > 0) // theShift %= sizeAt; // else // theShift -= sizeAt * (theShift / sizeAt - 1); if (theShift) { for (int dim = 0; dim < numTads / sizeAt; ++dim) { hipLaunchKernelGGL(( rollKernelFullAnyDimensionStage1<T>), dim3(1), dim3(256), 1024, *(output->getContext()->getCudaStream()), output->specialBuffer(), packZ.platformShapeInfo(), packZ.platformOffsets(), output->specialBuffer(), packZ.platformShapeInfo(), packZ.platformOffsets(), numTads, tadLength, dim, sizeAt, theShift); hipLaunchKernelGGL(( rollKernelFullAnyDimensionStage2<T>), dim3(1), dim3(256), 1024, *(output->getContext()->getCudaStream()), output->specialBuffer(), packZ.platformShapeInfo(), packZ.platformOffsets(), output->specialBuffer(), packZ.platformShapeInfo(), packZ.platformOffsets(), numTads, tadLength, dim, sizeAt, theShift); } } } } } template <typename T> static void rollFunctorLinear_(NDArray* input, NDArray* output, int shift, bool inplace){ if (!inplace) output->assign(input); auto fullLen = input->lengthOf(); int actualShift = shift; // % fullLen; // shift already non-negative then if (actualShift < 0) { actualShift -= fullLen * (actualShift / fullLen - 1); } else actualShift %= fullLen; if (actualShift) { int shiftCount = fullLen / actualShift - 1; int remainShift = fullLen % actualShift; // stage 1) swap last actualShift elements with first ones. hipLaunchKernelGGL(( rollKernelLinearStage1<T>), dim3(1), dim3(1), 1024, *(output->getContext()->getCudaStream()), output->specialBuffer(), output->specialShapeInfo(), output->specialBuffer(), output->specialShapeInfo(), fullLen, actualShift); // stage 2) swap swapped actualShift elements with rest remainShiftCount times. hipLaunchKernelGGL(( rollKernelLinearStage2<T>), dim3(1), dim3(1), 1024, *(output->getContext()->getCudaStream()), output->specialBuffer(), output->specialShapeInfo(), output->specialBuffer(), output->specialShapeInfo(), fullLen, actualShift, shiftCount); // FIXME: no parallelism here :( // stage 3) swap remainer of items. if (remainShift && shiftCount) hipLaunchKernelGGL(( rollKernelLinearStage3<T>), dim3(1), dim3(1), 1024, *(output->getContext()->getCudaStream()), output->specialBuffer(), output->specialShapeInfo(), output->specialBuffer(), output->specialShapeInfo(), fullLen, actualShift, remainShift); } } void rollFunctorFull(sd::LaunchContext * context, NDArray* input, NDArray* output, std::vector<int> const& shifts, std::vector<int> const& axes, bool inplace){ input->syncToDevice(); BUILD_SINGLE_SELECTOR(input->dataType(), rollFunctorFull_, (input, output, shifts, axes, inplace), LIBND4J_TYPES); output->tickWriteDevice(); } void rollFunctorLinear(sd::LaunchContext * context, NDArray* input, NDArray* output, int shift, bool inplace){ input->syncToDevice(); BUILD_SINGLE_SELECTOR(input->dataType(), rollFunctorLinear_, (input, output, shift, inplace), LIBND4J_TYPES); output->tickWriteDevice(); } BUILD_SINGLE_TEMPLATE(template void rollFunctorLinear_, (NDArray* input, NDArray* output, int shift, bool inplace), LIBND4J_TYPES); BUILD_SINGLE_TEMPLATE(template void rollFunctorFull_, (NDArray* input, NDArray* output, std::vector<int> const& shifts, std::vector<int> const& axes, bool inplace), LIBND4J_TYPES); } } }
dd74adfbbb776d6b32ffed606027188e0a98022b.cu
/* ****************************************************************************** * * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * See the NOTICE file distributed with this work for additional * information regarding copyright ownership. * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author raver119@gmail.com // #include <ops/declarable/helpers/roll.h> #include <helpers/ConstantTadHelper.h> #include <helpers/PointersManager.h> namespace sd { namespace ops { namespace helpers { template <typename T> static void _CUDA_D rollKernelLinearStage1Dev(const void *vx, const Nd4jLong *xShapeInfo, void *vz, const Nd4jLong *zShapeInfo, Nd4jLong fullLength, int actualShift) { auto x = reinterpret_cast<const T*>(vx); auto z = reinterpret_cast<T*>(vz); auto xEws = shape::elementWiseStride(xShapeInfo); auto zEws = shape::elementWiseStride(zShapeInfo); auto xOrder = shape::order(xShapeInfo); auto zOrder = shape::order(zShapeInfo); auto tid = threadIdx.x + blockIdx.x * blockDim.x; if (xEws > 0 && zEws > 0 && xOrder == zOrder) { for (int i = tid; i < actualShift; i += blockDim.x * gridDim.x) { int sourceIndex = fullLength - actualShift + i; auto eA = x[sourceIndex * xEws]; auto eB = x[i * xEws]; z[i * zEws] = eA; z[sourceIndex * zEws] = eB; } } else { for (int i = tid; i < actualShift; i += blockDim.x * gridDim.x) { int sourceIndex = fullLength - actualShift + i; auto xOffsetA = shape::getIndexOffset(i, xShapeInfo); auto xOffsetB = shape::getIndexOffset(sourceIndex, xShapeInfo); auto zOffsetA = shape::getIndexOffset(i, zShapeInfo); auto zOffsetB = shape::getIndexOffset(sourceIndex, zShapeInfo); auto eA = x[xOffsetA]; auto eB = x[xOffsetB]; z[zOffsetA] = eB; z[zOffsetB] = eA; } } } template <typename T> static void _CUDA_G rollKernelLinearStage1(const void *vx, const Nd4jLong *xShapeInfo, void *vz, const Nd4jLong *zShapeInfo, Nd4jLong fullLength, int actualShift) { rollKernelLinearStage1Dev<T>(vx, xShapeInfo, vz, zShapeInfo, fullLength, actualShift); } template <typename T> static void _CUDA_G rollKernelLinearStage2(const void *vx, const Nd4jLong *xShapeInfo, void *vz, const Nd4jLong *zShapeInfo, Nd4jLong fullLength, int actualShift, int shiftCount) { auto x = reinterpret_cast<const T*>(vx); auto z = reinterpret_cast<T*>(vz); auto xEws = shape::elementWiseStride(xShapeInfo); auto zEws = shape::elementWiseStride(zShapeInfo); auto xOrder = shape::order(xShapeInfo); auto zOrder = shape::order(zShapeInfo); auto tid = threadIdx.x + blockIdx.x * blockDim.x; if (xEws > 0 && zEws > 0 && xOrder == zOrder) { for (int count = 1; count < shiftCount; ++count) { for (int i = tid; i < actualShift; i += blockDim.x * gridDim.x) { int destinationIndex = fullLength - (count + 1) * actualShift + i; int sourceIndex = fullLength - count * actualShift + i; auto eA = x[sourceIndex * xEws]; auto eB = x[destinationIndex * xEws]; z[destinationIndex * zEws] = eA; z[sourceIndex * zEws] = eB; } __syncthreads(); } } else { for (int count = 1; count < shiftCount; ++count) { for (int i = tid; i < actualShift; i += blockDim.x * gridDim.x) { int destinationIndex = fullLength - (count + 1) * actualShift + i; int sourceIndex = fullLength - count * actualShift + i; auto xOffsetA = shape::getIndexOffset(destinationIndex, xShapeInfo); auto xOffsetB = shape::getIndexOffset(sourceIndex, xShapeInfo); auto zOffsetA = shape::getIndexOffset(destinationIndex, zShapeInfo); auto zOffsetB = shape::getIndexOffset(sourceIndex, zShapeInfo); auto eA = x[xOffsetA]; auto eB = x[xOffsetB]; z[zOffsetA] = eB; z[zOffsetB] = eA; } __syncthreads(); } } } template <typename T> static void _CUDA_G rollKernelLinearStage3(const void *vx, const Nd4jLong *xShapeInfo, void *vz, const Nd4jLong *zShapeInfo, Nd4jLong fullLength, int actualShift, int remainShift) { auto x = reinterpret_cast<const T*>(vx); auto z = reinterpret_cast<T*>(vz); auto xEws = shape::elementWiseStride(xShapeInfo); auto zEws = shape::elementWiseStride(zShapeInfo); auto xOrder = shape::order(xShapeInfo); auto zOrder = shape::order(zShapeInfo); auto tid = threadIdx.x + blockIdx.x * blockDim.x; if (xEws > 0 && zEws > 0 && xOrder == zOrder) { for (int i = tid ; i < actualShift; i += blockDim.x * gridDim.x) { int remainIdx = i + actualShift; int sourceIndex = remainIdx + remainShift; auto eA = x[sourceIndex * xEws]; auto eB = x[remainIdx * xEws]; z[remainIdx * zEws] = eA; z[sourceIndex * zEws] = eB; } } else { for (int i = tid; i < actualShift; i += blockDim.x * gridDim.x) { int remainIdx = i + actualShift; int sourceIndex = remainIdx + remainShift; auto xOffsetA = shape::getIndexOffset(remainIdx, xShapeInfo); auto xOffsetB = shape::getIndexOffset(sourceIndex, xShapeInfo); auto zOffsetA = shape::getIndexOffset(remainIdx, zShapeInfo); auto zOffsetB = shape::getIndexOffset(sourceIndex, zShapeInfo); auto eA = x[xOffsetA]; auto eB = x[xOffsetB]; z[zOffsetA] = eB; z[zOffsetB] = eA; } } } template <typename T> static void _CUDA_D swapTadsKernel(void *vx, void *vz, const Nd4jLong *zShapeInfo, Nd4jLong tadLength) { auto x = reinterpret_cast<T*>(vx); auto z = reinterpret_cast<T*>(vz); auto zEws = shape::elementWiseStride(zShapeInfo); auto zOrder = shape::order(zShapeInfo); auto tid = threadIdx.x + blockIdx.x * blockDim.x; if (zEws > 0) { for (int e = threadIdx.x; e < tadLength; e += blockDim.x) { auto eA = x[e * zEws]; auto eB = z[e * zEws]; x[e * zEws] = eB; z[e * zEws] = eA; } } else { for (int e = threadIdx.x; e < tadLength; e += blockDim.x) { auto zOffset = shape::getIndexOffset(e, zShapeInfo); auto eA = x[zOffset]; auto eB = z[zOffset]; x[zOffset] = eB; z[zOffset] = eA; } } } template <typename T> static void _CUDA_G rollKernelFullAnyDimensionStage1(const void *vx, const Nd4jLong *xTadShapeInfo, const Nd4jLong *xTadOffsets, void *vz, const Nd4jLong *zTadShapeInfo, const Nd4jLong *zTadOffsets, int numTads, Nd4jLong tadLength, int dim, Nd4jLong sizeAt, int theShift) { auto x = reinterpret_cast<const T *>(vx); auto z = reinterpret_cast<T *>(vz); for (int e = blockIdx.x + theShift; e < sizeAt - theShift; e += gridDim.x) { int sourceIndex = dim * sizeAt + e - theShift; int targetIndex = dim * sizeAt + e; swapTadsKernel<T>(z + xTadOffsets[sourceIndex], z + xTadOffsets[targetIndex], zTadShapeInfo, tadLength); } } template <typename T> static void _CUDA_G rollKernelFullAnyDimensionStage2(void *vx, const Nd4jLong *xTadShapeInfo, const Nd4jLong *xTadOffsets, void *vz, const Nd4jLong *zTadShapeInfo, const Nd4jLong *zTadOffsets, int numTads, Nd4jLong tadLength, int dim, Nd4jLong sizeAt, int theShift) { auto x = reinterpret_cast<const T *>(vx); auto z = reinterpret_cast<T *>(vz); for (int e = blockIdx.x; e < theShift; e += gridDim.x) { int sourceIndex = dim * sizeAt + sizeAt - theShift + e; int targetIndex = dim * sizeAt + e; swapTadsKernel<T>(z + zTadOffsets[sourceIndex], z + zTadOffsets[targetIndex], zTadShapeInfo, tadLength); } } template <typename T> static void rollFunctorFull_(NDArray* input, NDArray* output, std::vector<int> const& shifts, std::vector<int> const& axes, bool inplace){ if (!inplace) output->assign(input); for (size_t i = 0; i < axes.size(); i++) { int axe = axes[i]; if (axe == input->rankOf() - 1) { // last dimension ResultSet listOfTensors = output->allTensorsAlongDimension({axe}); ResultSet listOfOutTensors = output->allTensorsAlongDimension({axe}); int fullLen = listOfTensors.size(); int theShift = shifts[i]; // if (theShift > 0) { // theShift %= fullLen; // } // else { // theShift -= fullLen * (theShift / fullLen - 1); // } for (int k = 0; k < fullLen; k++) { rollFunctorLinear(output->getContext(), listOfTensors.at(k), listOfOutTensors.at(k), theShift, true); } } else { std::vector<int> dims(input->rankOf() - axe - 1); for (int i = 0; i < dims.size(); ++i) dims[i] = axe + 1 + i; auto packZ = ConstantTadHelper::getInstance().tadForDimensions(output->shapeInfo(), dims); int numTads = packZ.numberOfTads(); int sizeAt = input->sizeAt(axe); auto tadLength = shape::length(packZ.primaryShapeInfo()); int theShift = shifts[i]; // if (theShift > 0) // theShift %= sizeAt; // else // theShift -= sizeAt * (theShift / sizeAt - 1); if (theShift) { for (int dim = 0; dim < numTads / sizeAt; ++dim) { rollKernelFullAnyDimensionStage1<T><<<1, 256, 1024, *(output->getContext()->getCudaStream())>>>(output->specialBuffer(), packZ.platformShapeInfo(), packZ.platformOffsets(), output->specialBuffer(), packZ.platformShapeInfo(), packZ.platformOffsets(), numTads, tadLength, dim, sizeAt, theShift); rollKernelFullAnyDimensionStage2<T><<<1, 256, 1024, *(output->getContext()->getCudaStream())>>>(output->specialBuffer(), packZ.platformShapeInfo(), packZ.platformOffsets(), output->specialBuffer(), packZ.platformShapeInfo(), packZ.platformOffsets(), numTads, tadLength, dim, sizeAt, theShift); } } } } } template <typename T> static void rollFunctorLinear_(NDArray* input, NDArray* output, int shift, bool inplace){ if (!inplace) output->assign(input); auto fullLen = input->lengthOf(); int actualShift = shift; // % fullLen; // shift already non-negative then if (actualShift < 0) { actualShift -= fullLen * (actualShift / fullLen - 1); } else actualShift %= fullLen; if (actualShift) { int shiftCount = fullLen / actualShift - 1; int remainShift = fullLen % actualShift; // stage 1) swap last actualShift elements with first ones. rollKernelLinearStage1<T><<<1, 1, 1024, *(output->getContext()->getCudaStream())>>>(output->specialBuffer(), output->specialShapeInfo(), output->specialBuffer(), output->specialShapeInfo(), fullLen, actualShift); // stage 2) swap swapped actualShift elements with rest remainShiftCount times. rollKernelLinearStage2<T><<<1, 1, 1024, *(output->getContext()->getCudaStream())>>>(output->specialBuffer(), output->specialShapeInfo(), output->specialBuffer(), output->specialShapeInfo(), fullLen, actualShift, shiftCount); // FIXME: no parallelism here :( // stage 3) swap remainer of items. if (remainShift && shiftCount) rollKernelLinearStage3<T><<<1, 1, 1024, *(output->getContext()->getCudaStream())>>>(output->specialBuffer(), output->specialShapeInfo(), output->specialBuffer(), output->specialShapeInfo(), fullLen, actualShift, remainShift); } } void rollFunctorFull(sd::LaunchContext * context, NDArray* input, NDArray* output, std::vector<int> const& shifts, std::vector<int> const& axes, bool inplace){ input->syncToDevice(); BUILD_SINGLE_SELECTOR(input->dataType(), rollFunctorFull_, (input, output, shifts, axes, inplace), LIBND4J_TYPES); output->tickWriteDevice(); } void rollFunctorLinear(sd::LaunchContext * context, NDArray* input, NDArray* output, int shift, bool inplace){ input->syncToDevice(); BUILD_SINGLE_SELECTOR(input->dataType(), rollFunctorLinear_, (input, output, shift, inplace), LIBND4J_TYPES); output->tickWriteDevice(); } BUILD_SINGLE_TEMPLATE(template void rollFunctorLinear_, (NDArray* input, NDArray* output, int shift, bool inplace), LIBND4J_TYPES); BUILD_SINGLE_TEMPLATE(template void rollFunctorFull_, (NDArray* input, NDArray* output, std::vector<int> const& shifts, std::vector<int> const& axes, bool inplace), LIBND4J_TYPES); } } }
558229f64bf8cd9efda1f5e2c617493f215cba33.hip
// !!! This is a file automatically generated by hipify!!! #include <math.h> #include <stdlib.h> #include <stdio.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <helper_functions.h> #include <helper_cuda.h> #include "HOGEngine.h" #include "HOGUtils.h" // Short function to synchronize with our global user-defined stream. static void sync(void) { checkCudaErrors(hipStreamSynchronize(stream)); } //Round a / b to nearest higher integer value int iDivUp(int a, int b) { return (a % b != 0) ? (a / b + 1) : (a / b); } //Round a / b to nearest lower integer value int iDivDown(int a, int b) { return a / b; } //Align a to nearest higher multiple of b int iAlignUp(int a, int b) { return (a % b != 0) ? (a - a % b + b) : a; } //Align a to nearest lower multiple of b int iAlignDown(int a, int b) { return a - a % b; } //Round a / b to nearest higher integer value int iDivUpF(int a, float b) { return (a % int(b) != 0) ? int(a / b + 1) : int(a / b); } int iClosestPowerOfTwo(int x) { x--; x |= x >> 1; x |= x >> 2; x |= x >> 4; x |= x >> 8; x |= x >> 16; x++; return x; } void Uchar4ToFloat4(uchar4 *inputImage, float4 *outputImage, int width, int height) { dim3 threads_in_block(16,16); dim3 blocks(iDivUp(width,16), iDivUp(height,16)); hipLaunchKernelGGL(( uchar4tofloat4), dim3(blocks), dim3(threads_in_block), 0, stream, inputImage, outputImage, width, height); sync(); } void Float4ToUchar4(float4 *inputImage, uchar4 *outputImage, int width, int height) { dim3 threads_in_block(16,16); dim3 blocks(iDivUp(width,16), iDivUp(height,16)); hipLaunchKernelGGL(( float4toUchar4), dim3(blocks), dim3(threads_in_block), 0, stream, inputImage, outputImage, width, height); sync(); } void Float2ToUchar4(float2 *inputImage, uchar4 *outputImage, int width, int height, int index) { dim3 threads_in_block(16,16); dim3 blocks(iDivUp(width,16), iDivUp(height,16)); hipLaunchKernelGGL(( float2toUchar4), dim3(blocks), dim3(threads_in_block), 0, stream, inputImage, outputImage, width, height, index); sync(); } void Float2ToUchar1(float2 *inputImage, uchar1 *outputImage, int width, int height, int index) { dim3 threads_in_block(16,16); dim3 blocks(iDivUp(width,16), iDivUp(height,16)); hipLaunchKernelGGL(( float2toUchar1), dim3(blocks), dim3(threads_in_block), 0, stream, inputImage, outputImage, width, height, index); sync(); } void Float1ToUchar4(float1 *inputImage, uchar4 *outputImage, int width, int height) { dim3 threads_in_block(16,16); dim3 blocks(iDivUp(width,16), iDivUp(height,16)); hipLaunchKernelGGL(( float1toUchar4), dim3(blocks), dim3(threads_in_block), 0, stream, inputImage, outputImage, width, height); sync(); } void Float1ToUchar1(float1 *inputImage, uchar1 *outputImage, int width, int height) { dim3 threads_in_block(16,16); dim3 blocks(iDivUp(width,16), iDivUp(height,16)); hipLaunchKernelGGL(( float1toUchar1), dim3(blocks), dim3(threads_in_block), 0, stream, inputImage, outputImage, width, height); sync(); } __global__ void float4toUchar4(float4 *inputImage, uchar4 *outputImage, int width, int height) { int offsetBlock = blockIdx.x * blockDim.x + blockIdx.y * blockDim.y * width; int offset = offsetBlock + threadIdx.x + threadIdx.y * width; float4 pixelf = inputImage[offset]; uchar4 pixel; pixel.x = (unsigned char) pixelf.x; pixel.y = (unsigned char) pixelf.y; pixel.z = (unsigned char) pixelf.z; pixel.w = (unsigned char) pixelf.w; outputImage[offset] = pixel; } __global__ void float2toUchar4(float2 *inputImage, uchar4 *outputImage, int width, int height, int index) { int offsetBlock = blockIdx.x * blockDim.x + blockIdx.y * blockDim.y * width; int offset = offsetBlock + threadIdx.x + threadIdx.y * width; float2 pixelf = inputImage[offset]; float pixelfIndexed = (index == 0) ? pixelf.x : pixelf.y; uchar4 pixel; pixel.x = (unsigned char) abs(pixelfIndexed); pixel.y = (unsigned char) abs(pixelfIndexed); pixel.z = (unsigned char) abs(pixelfIndexed); pixel.w = (unsigned char) abs(pixelfIndexed); outputImage[offset] = pixel; } __global__ void float2toUchar1(float2 *inputImage, uchar1 *outputImage, int width, int height, int index) { int offsetBlock = blockIdx.x * blockDim.x + blockIdx.y * blockDim.y * width; int offset = offsetBlock + threadIdx.x + threadIdx.y * width; float2 pixelf = inputImage[offset]; float pixelfIndexed = (index == 0) ? pixelf.x : pixelf.y; uchar1 pixel; pixel.x = (unsigned char) pixelfIndexed; outputImage[offset] = pixel; } __global__ void float1toUchar4(float1 *inputImage, uchar4 *outputImage, int width, int height) { int offsetBlock = blockIdx.x * blockDim.x + blockIdx.y * blockDim.y * width; int offset = offsetBlock + threadIdx.x + threadIdx.y * width; float1 pixelf = inputImage[offset]; uchar4 pixel; pixel.x = (unsigned char) pixelf.x; pixel.y = (unsigned char) pixelf.x; pixel.z = (unsigned char) pixelf.x; pixel.w = (unsigned char) pixelf.x; outputImage[offset] = pixel; } __global__ void float1toUchar1(float1 *inputImage, uchar1 *outputImage, int width, int height) { int offsetBlock = blockIdx.x * blockDim.x + blockIdx.y * blockDim.y * width; int offset = offsetBlock + threadIdx.x + threadIdx.y * width; float1 pixelf = inputImage[offset]; uchar1 pixel; pixel.x = (unsigned char) pixelf.x; outputImage[offset] = pixel; } __global__ void uchar4tofloat4(uchar4 *inputImage, float4 *outputImage, int width, int height) { int offsetX = blockIdx.x * blockDim.x + threadIdx.x; int offsetY = blockIdx.y * blockDim.y + threadIdx.y; if (offsetX < width && offsetY < height) { int offsetBlock = blockIdx.x * blockDim.x + blockIdx.y * blockDim.y * width; int offset = offsetBlock + threadIdx.x + threadIdx.y * width; uchar4 pixel = inputImage[offset]; float4 pixelf; pixelf.x = pixel.x; pixelf.y = pixel.y; pixelf.z = pixel.z; pixelf.w = pixel.w; outputImage[offset] = pixelf; } }
558229f64bf8cd9efda1f5e2c617493f215cba33.cu
#include <math.h> #include <stdlib.h> #include <stdio.h> #include <cuda.h> #include <cuda_runtime.h> #include <helper_functions.h> #include <helper_cuda.h> #include "HOGEngine.h" #include "HOGUtils.h" // Short function to synchronize with our global user-defined stream. static void sync(void) { checkCudaErrors(cudaStreamSynchronize(stream)); } //Round a / b to nearest higher integer value int iDivUp(int a, int b) { return (a % b != 0) ? (a / b + 1) : (a / b); } //Round a / b to nearest lower integer value int iDivDown(int a, int b) { return a / b; } //Align a to nearest higher multiple of b int iAlignUp(int a, int b) { return (a % b != 0) ? (a - a % b + b) : a; } //Align a to nearest lower multiple of b int iAlignDown(int a, int b) { return a - a % b; } //Round a / b to nearest higher integer value int iDivUpF(int a, float b) { return (a % int(b) != 0) ? int(a / b + 1) : int(a / b); } int iClosestPowerOfTwo(int x) { x--; x |= x >> 1; x |= x >> 2; x |= x >> 4; x |= x >> 8; x |= x >> 16; x++; return x; } void Uchar4ToFloat4(uchar4 *inputImage, float4 *outputImage, int width, int height) { dim3 threads_in_block(16,16); dim3 blocks(iDivUp(width,16), iDivUp(height,16)); uchar4tofloat4<<<blocks, threads_in_block, 0, stream>>>(inputImage, outputImage, width, height); sync(); } void Float4ToUchar4(float4 *inputImage, uchar4 *outputImage, int width, int height) { dim3 threads_in_block(16,16); dim3 blocks(iDivUp(width,16), iDivUp(height,16)); float4toUchar4<<<blocks, threads_in_block, 0, stream>>>(inputImage, outputImage, width, height); sync(); } void Float2ToUchar4(float2 *inputImage, uchar4 *outputImage, int width, int height, int index) { dim3 threads_in_block(16,16); dim3 blocks(iDivUp(width,16), iDivUp(height,16)); float2toUchar4<<<blocks, threads_in_block, 0, stream>>>(inputImage, outputImage, width, height, index); sync(); } void Float2ToUchar1(float2 *inputImage, uchar1 *outputImage, int width, int height, int index) { dim3 threads_in_block(16,16); dim3 blocks(iDivUp(width,16), iDivUp(height,16)); float2toUchar1<<<blocks, threads_in_block, 0, stream>>>(inputImage, outputImage, width, height, index); sync(); } void Float1ToUchar4(float1 *inputImage, uchar4 *outputImage, int width, int height) { dim3 threads_in_block(16,16); dim3 blocks(iDivUp(width,16), iDivUp(height,16)); float1toUchar4<<<blocks, threads_in_block, 0, stream>>>(inputImage, outputImage, width, height); sync(); } void Float1ToUchar1(float1 *inputImage, uchar1 *outputImage, int width, int height) { dim3 threads_in_block(16,16); dim3 blocks(iDivUp(width,16), iDivUp(height,16)); float1toUchar1<<<blocks, threads_in_block, 0, stream>>>(inputImage, outputImage, width, height); sync(); } __global__ void float4toUchar4(float4 *inputImage, uchar4 *outputImage, int width, int height) { int offsetBlock = blockIdx.x * blockDim.x + blockIdx.y * blockDim.y * width; int offset = offsetBlock + threadIdx.x + threadIdx.y * width; float4 pixelf = inputImage[offset]; uchar4 pixel; pixel.x = (unsigned char) pixelf.x; pixel.y = (unsigned char) pixelf.y; pixel.z = (unsigned char) pixelf.z; pixel.w = (unsigned char) pixelf.w; outputImage[offset] = pixel; } __global__ void float2toUchar4(float2 *inputImage, uchar4 *outputImage, int width, int height, int index) { int offsetBlock = blockIdx.x * blockDim.x + blockIdx.y * blockDim.y * width; int offset = offsetBlock + threadIdx.x + threadIdx.y * width; float2 pixelf = inputImage[offset]; float pixelfIndexed = (index == 0) ? pixelf.x : pixelf.y; uchar4 pixel; pixel.x = (unsigned char) abs(pixelfIndexed); pixel.y = (unsigned char) abs(pixelfIndexed); pixel.z = (unsigned char) abs(pixelfIndexed); pixel.w = (unsigned char) abs(pixelfIndexed); outputImage[offset] = pixel; } __global__ void float2toUchar1(float2 *inputImage, uchar1 *outputImage, int width, int height, int index) { int offsetBlock = blockIdx.x * blockDim.x + blockIdx.y * blockDim.y * width; int offset = offsetBlock + threadIdx.x + threadIdx.y * width; float2 pixelf = inputImage[offset]; float pixelfIndexed = (index == 0) ? pixelf.x : pixelf.y; uchar1 pixel; pixel.x = (unsigned char) pixelfIndexed; outputImage[offset] = pixel; } __global__ void float1toUchar4(float1 *inputImage, uchar4 *outputImage, int width, int height) { int offsetBlock = blockIdx.x * blockDim.x + blockIdx.y * blockDim.y * width; int offset = offsetBlock + threadIdx.x + threadIdx.y * width; float1 pixelf = inputImage[offset]; uchar4 pixel; pixel.x = (unsigned char) pixelf.x; pixel.y = (unsigned char) pixelf.x; pixel.z = (unsigned char) pixelf.x; pixel.w = (unsigned char) pixelf.x; outputImage[offset] = pixel; } __global__ void float1toUchar1(float1 *inputImage, uchar1 *outputImage, int width, int height) { int offsetBlock = blockIdx.x * blockDim.x + blockIdx.y * blockDim.y * width; int offset = offsetBlock + threadIdx.x + threadIdx.y * width; float1 pixelf = inputImage[offset]; uchar1 pixel; pixel.x = (unsigned char) pixelf.x; outputImage[offset] = pixel; } __global__ void uchar4tofloat4(uchar4 *inputImage, float4 *outputImage, int width, int height) { int offsetX = blockIdx.x * blockDim.x + threadIdx.x; int offsetY = blockIdx.y * blockDim.y + threadIdx.y; if (offsetX < width && offsetY < height) { int offsetBlock = blockIdx.x * blockDim.x + blockIdx.y * blockDim.y * width; int offset = offsetBlock + threadIdx.x + threadIdx.y * width; uchar4 pixel = inputImage[offset]; float4 pixelf; pixelf.x = pixel.x; pixelf.y = pixel.y; pixelf.z = pixel.z; pixelf.w = pixel.w; outputImage[offset] = pixelf; } }
09620c55d0373b3ff6d7b1df40fe3930670060bf.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "imageprocessing.cuh" #include <stdio.h> #include <iostream> #include <string.h> #include <math.h> #include <sstream> // TODO: read about the CUDA programming model: https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#programming-model // If everything is setup correctly, this file is compiled by the CUDA/C++ compiler (that is different from the C++ compiler). // The CUDA/C++ compiler understands certain things that your C++ compiler doesn't understand - like '__global__', 'threadIdx', and function calls with triple-angle brackets, e.g., testArray<<<...>>>(); // do not use this method for anything else than verifying cuda compiled, linked and executed __global__ void testArray(float* dst, float value) { unsigned int index = threadIdx.x; dst[index] = value; } void testCudaCall() { // quick and dirty test of CUDA setup const unsigned int N = 1024; float* device_array; hipMalloc(&device_array, N * sizeof(float)); testArray << <1, N >> > (device_array, -0.5f); float x[N]; hipMemcpy(x, device_array, N * sizeof(float), hipMemcpyDeviceToHost); std::cout << "quick and dirty test of CUDA setup: " << x[0] << " " << x[1] << " " << x[1023] << std::endl; hipFree(device_array); } // TODO: implement the image processing operations using CUDA kernels
09620c55d0373b3ff6d7b1df40fe3930670060bf.cu
#include "imageprocessing.cuh" #include <stdio.h> #include <iostream> #include <string.h> #include <math.h> #include <sstream> // TODO: read about the CUDA programming model: https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#programming-model // If everything is setup correctly, this file is compiled by the CUDA/C++ compiler (that is different from the C++ compiler). // The CUDA/C++ compiler understands certain things that your C++ compiler doesn't understand - like '__global__', 'threadIdx', and function calls with triple-angle brackets, e.g., testArray<<<...>>>(); // do not use this method for anything else than verifying cuda compiled, linked and executed __global__ void testArray(float* dst, float value) { unsigned int index = threadIdx.x; dst[index] = value; } void testCudaCall() { // quick and dirty test of CUDA setup const unsigned int N = 1024; float* device_array; cudaMalloc(&device_array, N * sizeof(float)); testArray << <1, N >> > (device_array, -0.5f); float x[N]; cudaMemcpy(x, device_array, N * sizeof(float), cudaMemcpyDeviceToHost); std::cout << "quick and dirty test of CUDA setup: " << x[0] << " " << x[1] << " " << x[1023] << std::endl; cudaFree(device_array); } // TODO: implement the image processing operations using CUDA kernels
15830ed5dae0ab265e8ccc96fdf8bc826447ca1e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" extern "C" { #ifndef REAL #define REAL float #endif #ifndef CAST #define CAST(fun) fun ## f #endif #ifndef REAL2o3 #define REAL2o3 (REAL)0.6666666666666667 #endif #ifndef REAL3o2 #define REAL3o2 (REAL)1.5 #endif } __global__ void uplo_lgamma (const int sd, const int unit, const int bottom, const REAL* a, const int offset_a, const int ld_a, REAL* b, const int offset_b, const int ld_b) { const int gid_0 = blockIdx.x * blockDim.x + threadIdx.x; const int gid_1 = blockIdx.y * blockDim.y + threadIdx.y; const bool valid = (gid_0 < sd) && (gid_1 < sd); const bool check = valid && ((unit == 132) ? bottom * gid_0 > bottom * gid_1 : bottom * gid_0 >= bottom * gid_1); if (check) { b[offset_b + gid_0 + gid_1 * ld_b] = CAST(lgamma)(a[offset_a + gid_0 + gid_1 * ld_a]); } }
15830ed5dae0ab265e8ccc96fdf8bc826447ca1e.cu
#include "includes.h" extern "C" { #ifndef REAL #define REAL float #endif #ifndef CAST #define CAST(fun) fun ## f #endif #ifndef REAL2o3 #define REAL2o3 (REAL)0.6666666666666667 #endif #ifndef REAL3o2 #define REAL3o2 (REAL)1.5 #endif } __global__ void uplo_lgamma (const int sd, const int unit, const int bottom, const REAL* a, const int offset_a, const int ld_a, REAL* b, const int offset_b, const int ld_b) { const int gid_0 = blockIdx.x * blockDim.x + threadIdx.x; const int gid_1 = blockIdx.y * blockDim.y + threadIdx.y; const bool valid = (gid_0 < sd) && (gid_1 < sd); const bool check = valid && ((unit == 132) ? bottom * gid_0 > bottom * gid_1 : bottom * gid_0 >= bottom * gid_1); if (check) { b[offset_b + gid_0 + gid_1 * ld_b] = CAST(lgamma)(a[offset_a + gid_0 + gid_1 * ld_a]); } }
ca9e73786425605821a090da8a75bb272b520c46.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <hip/hip_runtime.h> #include "rocblas.h" #include "hipsparse.h" #include <cmath> #include <vector> #include <fstream> #include <iomanip> // #define M 6 // #define N 5 // #define IDX2F(i,j,ld) ((((j)-1)*(ld))+((i)-1)) /* std::list<int> recommend(int userid, std::vector<std::vector<int> > user_items, std::vector<std::vector<int> > user_factors, std::vector<std::vector<int> > item_factors, int N) { int users = user_items.size(); int items = user_items[0].size(); int factors = user_items[0].size(); std::cout << users << std::endl; std::list<int> ans = {3}; return ans; } */ //Cui will look like this: // Cui[0] = indptr - pointer into indices/data showing where each row starts // Cui[1] = indices - which columns in that row exist // Cui[2] = data - what's in that column double calculate_loss(double** Cui, double** X, double** Y, double reg, int users, int items, int factors, int nnz) { int loss = 0; int total_confidence = 0; int item_norm = 0; int user_norm = 0; hipblasStatus_t err; hipblasHandle_t handle; double** YtY; const double alpha = 1; const double beta = 0; // do transpose err = hipblasDgeam(handle, HIPBLAS_OP_T, HIPBLAS_OP_N, items, factors, &alpha, Y[0], items, &beta, Y[0], items, YtY[0], factors); for (int u = 0; u < users; ++u) { double temp = 1.0; double* r; double* Xu = X[u]; err = hipblasDgemv(handle, HIPBLAS_OP_N, items, factors, &alpha, Y[0], items, Xu, 1, &beta, r, 1); int rowStart = Cui[0][u]; int rowEnd = Cui[0][u+1]; int cols[rowEnd-rowStart]; memcpy(cols, &Cui[1][rowStart], rowEnd-rowStart); //int* cols = Cui[1][rowStart:rowEnd]; double vals[rowEnd-rowStart]; memcpy(vals, &Cui[2][rowStart], rowEnd-rowStart); //double* vals = Cui[2][rowStart:rowEnd]; for (int index = 0; index < rowEnd-rowStart; ++index) { int i = cols[index]; double confidence = vals[index]; double* Yi = Y[i]; double d; err = hipblasDdot(handle, factors, Yi, 1, Xu, 1, &d); temp = (confidence - 1)*d - (2*confidence); err = hipblasDaxpy(handle, factors, &temp, Yi, 1, r, 1); total_confidence += confidence; loss += confidence; } double other_temp; err = hipblasDdot(handle, factors, r, 1, Xu, 1, &other_temp); loss += other_temp; err = hipblasDdot(handle, factors, Xu, 1, Xu, 1, &other_temp); user_norm += other_temp; } for (int i = 0; i < items; ++i) { double* Yi = Y[i]; double other_temp; err = hipblasDdot(handle, factors, Yi, 1, Yi, 1, &other_temp); item_norm += other_temp; } loss += reg * (item_norm + user_norm); return loss / (total_confidence + users * items - nnz); } double rmse(double** user_factors, double** item_factors, int* rows, int* cols, double* ratings, int num_things, int factors) { double error = 0; hipblasHandle_t handle; hipblasStatus_t err; for (int k = 0; k < num_things; ++k) { int uid = rows[k]; int iid = cols[k]; double rating = ratings[k]; double* user = user_factors[uid]; double* item = item_factors[iid]; double guess; err = hipblasDdot(handle, factors, user, 1, item, 1, &guess); error += ::pow((rating-guess), 2); } return std::sqrt(error/num_things); }
ca9e73786425605821a090da8a75bb272b520c46.cu
#include <iostream> #include <cuda_runtime.h> #include "cublas_v2.h" #include "cusparse.h" #include <cmath> #include <vector> #include <fstream> #include <iomanip> // #define M 6 // #define N 5 // #define IDX2F(i,j,ld) ((((j)-1)*(ld))+((i)-1)) /* std::list<int> recommend(int userid, std::vector<std::vector<int> > user_items, std::vector<std::vector<int> > user_factors, std::vector<std::vector<int> > item_factors, int N) { int users = user_items.size(); int items = user_items[0].size(); int factors = user_items[0].size(); std::cout << users << std::endl; std::list<int> ans = {3}; return ans; } */ //Cui will look like this: // Cui[0] = indptr - pointer into indices/data showing where each row starts // Cui[1] = indices - which columns in that row exist // Cui[2] = data - what's in that column double calculate_loss(double** Cui, double** X, double** Y, double reg, int users, int items, int factors, int nnz) { int loss = 0; int total_confidence = 0; int item_norm = 0; int user_norm = 0; cublasStatus_t err; cublasHandle_t handle; double** YtY; const double alpha = 1; const double beta = 0; // do transpose err = cublasDgeam(handle, CUBLAS_OP_T, CUBLAS_OP_N, items, factors, &alpha, Y[0], items, &beta, Y[0], items, YtY[0], factors); for (int u = 0; u < users; ++u) { double temp = 1.0; double* r; double* Xu = X[u]; err = cublasDgemv(handle, CUBLAS_OP_N, items, factors, &alpha, Y[0], items, Xu, 1, &beta, r, 1); int rowStart = Cui[0][u]; int rowEnd = Cui[0][u+1]; int cols[rowEnd-rowStart]; memcpy(cols, &Cui[1][rowStart], rowEnd-rowStart); //int* cols = Cui[1][rowStart:rowEnd]; double vals[rowEnd-rowStart]; memcpy(vals, &Cui[2][rowStart], rowEnd-rowStart); //double* vals = Cui[2][rowStart:rowEnd]; for (int index = 0; index < rowEnd-rowStart; ++index) { int i = cols[index]; double confidence = vals[index]; double* Yi = Y[i]; double d; err = cublasDdot(handle, factors, Yi, 1, Xu, 1, &d); temp = (confidence - 1)*d - (2*confidence); err = cublasDaxpy(handle, factors, &temp, Yi, 1, r, 1); total_confidence += confidence; loss += confidence; } double other_temp; err = cublasDdot(handle, factors, r, 1, Xu, 1, &other_temp); loss += other_temp; err = cublasDdot(handle, factors, Xu, 1, Xu, 1, &other_temp); user_norm += other_temp; } for (int i = 0; i < items; ++i) { double* Yi = Y[i]; double other_temp; err = cublasDdot(handle, factors, Yi, 1, Yi, 1, &other_temp); item_norm += other_temp; } loss += reg * (item_norm + user_norm); return loss / (total_confidence + users * items - nnz); } double rmse(double** user_factors, double** item_factors, int* rows, int* cols, double* ratings, int num_things, int factors) { double error = 0; cublasHandle_t handle; cublasStatus_t err; for (int k = 0; k < num_things; ++k) { int uid = rows[k]; int iid = cols[k]; double rating = ratings[k]; double* user = user_factors[uid]; double* item = item_factors[iid]; double guess; err = cublasDdot(handle, factors, user, 1, item, 1, &guess); error += std::pow((rating-guess), 2); } return std::sqrt(error/num_things); }
e0075c35108877f159d280f609de230d1d6dfc7b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /****************************************************************************** *cr *cr (C) Copyright 2010 The Board of Trustees of the *cr University of Illinois *cr All Rights Reserved *cr ******************************************************************************/ #include <stdio.h> __global__ void mysgemm(int m, int n, int k, const float *A, const float *B, float* C) { /******************************************************************** * * Compute C = A x B * where A is a (m x k) matrix * where B is a (k x n) matrix * where C is a (m x n) matrix * ********************************************************************/ // INSERT KERNEL CODE HERE int row = blockIdx.y*blockDim.y+threadIdx.y; int col = blockIdx.x*blockDim.x+threadIdx.x; float pValue = 0; if (row < m && col < n) { for (int i = 0; i < k; ++i) { pValue += A[row * k + i] * B[i * n + col]; } C[row * n + col] = pValue; } } void basicSgemm(char transa, char transb, int m, int n, int k, float alpha, const float *A, int lda, const float *B, int ldb, float beta, float *C, int ldc) { if ((transa != 'N') && (transa != 'n')) { printf("unsupported value of 'transa'\n"); return; } if ((transb != 'N') && (transb != 'n')) { printf("unsupported value of 'transb'\n"); return; } if ((alpha - 1.0f > 1e-10) || (alpha - 1.0f < -1e-10)) { printf("unsupported value of alpha\n"); return; } if ((beta - 0.0f > 1e-10) || (beta - 0.0f < -1e-10)) { printf("unsupported value of beta\n"); return; } // Initialize thread block and kernel grid dimensions --------------------- const unsigned int BLOCK_SIZE = 16; // Use 16x16 thread blocks //INSERT CODE HERE dim3 threadsPerBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 blocksPerGrid((int)ceil((float)n/BLOCK_SIZE),(int)ceil((float)m/BLOCK_SIZE)); // Invoke CUDA kernel ----------------------------------------------------- //INSERT CODE HERE hipLaunchKernelGGL(( mysgemm), dim3(blocksPerGrid),dim3(threadsPerBlock), 0, 0, m,n,k,A,B,C); }
e0075c35108877f159d280f609de230d1d6dfc7b.cu
/****************************************************************************** *cr *cr (C) Copyright 2010 The Board of Trustees of the *cr University of Illinois *cr All Rights Reserved *cr ******************************************************************************/ #include <stdio.h> __global__ void mysgemm(int m, int n, int k, const float *A, const float *B, float* C) { /******************************************************************** * * Compute C = A x B * where A is a (m x k) matrix * where B is a (k x n) matrix * where C is a (m x n) matrix * ********************************************************************/ // INSERT KERNEL CODE HERE int row = blockIdx.y*blockDim.y+threadIdx.y; int col = blockIdx.x*blockDim.x+threadIdx.x; float pValue = 0; if (row < m && col < n) { for (int i = 0; i < k; ++i) { pValue += A[row * k + i] * B[i * n + col]; } C[row * n + col] = pValue; } } void basicSgemm(char transa, char transb, int m, int n, int k, float alpha, const float *A, int lda, const float *B, int ldb, float beta, float *C, int ldc) { if ((transa != 'N') && (transa != 'n')) { printf("unsupported value of 'transa'\n"); return; } if ((transb != 'N') && (transb != 'n')) { printf("unsupported value of 'transb'\n"); return; } if ((alpha - 1.0f > 1e-10) || (alpha - 1.0f < -1e-10)) { printf("unsupported value of alpha\n"); return; } if ((beta - 0.0f > 1e-10) || (beta - 0.0f < -1e-10)) { printf("unsupported value of beta\n"); return; } // Initialize thread block and kernel grid dimensions --------------------- const unsigned int BLOCK_SIZE = 16; // Use 16x16 thread blocks //INSERT CODE HERE dim3 threadsPerBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 blocksPerGrid((int)ceil((float)n/BLOCK_SIZE),(int)ceil((float)m/BLOCK_SIZE)); // Invoke CUDA kernel ----------------------------------------------------- //INSERT CODE HERE mysgemm<<<blocksPerGrid,threadsPerBlock>>>(m,n,k,A,B,C); }
f89813c032dc32f9b005273ca00b965b26fde73f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * @file pctdemo_processMandelbrotElement.cu * * CUDA code to calculate the Mandelbrot Set on a GPU. * * Copyright 2011 The MathWorks, Inc. */ /** Work out which piece of the global array this thread should operate on */ __device__ size_t calculateGlobalIndex() { // Which block are we? size_t const globalBlockIndex = blockIdx.x + blockIdx.y * gridDim.x; // Which thread are we within the block? size_t const localThreadIdx = threadIdx.x + blockDim.x * threadIdx.y; // How big is each block? size_t const threadsPerBlock = blockDim.x*blockDim.y; // Which thread are we overall? return localThreadIdx + globalBlockIndex*threadsPerBlock; } /** The actual Mandelbrot algorithm for a single location */ __device__ double position( double const x0, double const vx0, double const dt ) { int thread_idx = (blockIdx.x * blockDim.x) + threadIdx.x ; // Initialise: z = z0 double x = x0; double vx = vx0; x = x + 0.5 * vx * dt; return x; } /** Main entry point. * Works out where the current thread should read/write to global memory * and calls doIterations to do the actual work. */ __global__ void processMandelbrotElement( double * xi, double * yi, double * zi, double * vxi, double * vyi, double * vzi, const double dt ) { // Work out which thread we are size_t const globalThreadIdx = calculateGlobalIndex(); // Get our X and Y coords double const x = xi[globalThreadIdx]; double const y = yi[globalThreadIdx]; double const z = zi[globalThreadIdx]; double const vx = vxi[globalThreadIdx]; double const vy = vyi[globalThreadIdx]; double const vz = vzi[globalThreadIdx]; // Run the itearations on this location xi[globalThreadIdx] = position( x, vx, dt ); yi[globalThreadIdx] = position( y, vy, dt ); zi[globalThreadIdx] = position( z, vz, dt ); }
f89813c032dc32f9b005273ca00b965b26fde73f.cu
/** * @file pctdemo_processMandelbrotElement.cu * * CUDA code to calculate the Mandelbrot Set on a GPU. * * Copyright 2011 The MathWorks, Inc. */ /** Work out which piece of the global array this thread should operate on */ __device__ size_t calculateGlobalIndex() { // Which block are we? size_t const globalBlockIndex = blockIdx.x + blockIdx.y * gridDim.x; // Which thread are we within the block? size_t const localThreadIdx = threadIdx.x + blockDim.x * threadIdx.y; // How big is each block? size_t const threadsPerBlock = blockDim.x*blockDim.y; // Which thread are we overall? return localThreadIdx + globalBlockIndex*threadsPerBlock; } /** The actual Mandelbrot algorithm for a single location */ __device__ double position( double const x0, double const vx0, double const dt ) { int thread_idx = (blockIdx.x * blockDim.x) + threadIdx.x ; // Initialise: z = z0 double x = x0; double vx = vx0; x = x + 0.5 * vx * dt; return x; } /** Main entry point. * Works out where the current thread should read/write to global memory * and calls doIterations to do the actual work. */ __global__ void processMandelbrotElement( double * xi, double * yi, double * zi, double * vxi, double * vyi, double * vzi, const double dt ) { // Work out which thread we are size_t const globalThreadIdx = calculateGlobalIndex(); // Get our X and Y coords double const x = xi[globalThreadIdx]; double const y = yi[globalThreadIdx]; double const z = zi[globalThreadIdx]; double const vx = vxi[globalThreadIdx]; double const vy = vyi[globalThreadIdx]; double const vz = vzi[globalThreadIdx]; // Run the itearations on this location xi[globalThreadIdx] = position( x, vx, dt ); yi[globalThreadIdx] = position( y, vy, dt ); zi[globalThreadIdx] = position( z, vz, dt ); }