hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
500a8f598c833b264cf248bef5ed03a58691e4df.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright 2019 Stanford University * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "gnn.h" #include "cuda_helper.h" __global__ void softmax_backward(DATATYPE* logitsGrad, const DATATYPE* labels, const int* mask, int hiddenDim, V_ID numVertices) { CUDA_KERNEL_LOOP(i, hiddenDim * numVertices) { logitsGrad[i] = logitsGrad[i] - labels[i]; int myVtxID = i / hiddenDim; if (mask[myVtxID] != MASK_TRAIN) logitsGrad[i] = 0.0f; } } struct PerfMetrics { float trainLoss; int trainAll, testAll, valAll, trainCorrect, testCorrect, valCorrect; }; __global__ void calc_loss(const DATATYPE* logits, const DATATYPE* labels, const int* mask, PerfMetrics* perf, int hiddenDim, V_ID numVertices) { CUDA_KERNEL_LOOP(v, numVertices) { float maxVal = 0.0f; int trueLabel = -1, myLabel = -1; for (int i = 0; i < hiddenDim; i++) { if (logits[v*hiddenDim+i] > maxVal) { maxVal = logits[v*hiddenDim+i]; myLabel = i; } if (labels[v*hiddenDim+i] > 0.5) { assert(trueLabel == -1); trueLabel = i; } } assert(trueLabel >= 0); if (mask[v] == MASK_TRAIN) { atomicAdd(&(perf->trainLoss), 1 - logits[v*hiddenDim+trueLabel]); atomicAdd(&(perf->trainAll), 1); if (trueLabel == myLabel) atomicAdd(&(perf->trainCorrect), 1); } else if (mask[v] == MASK_VAL) { atomicAdd(&(perf->valAll), 1); if (trueLabel == myLabel) atomicAdd(&(perf->valCorrect), 1); } else if (mask[v] == MASK_TEST) { atomicAdd(&(perf->testAll), 1); if (trueLabel == myLabel) atomicAdd(&(perf->testCorrect), 1); } } } __host__ void SoftmaxCrossEntropy::backward_task(const Task *task, const std::vector<PhysicalRegion>& regions, Context ctx, Runtime* runtime) { assert(regions.size() == 3 || regions.size() == 4); assert(regions.size() == task->regions.size()); const SoftmaxCrossEntropy* op = (SoftmaxCrossEntropy*) task->args; // assert the three inputs need to reset gradient assert(op->resetInputGrads[0]); assert(op->resetInputGrads[1]); assert(op->resetInputGrads[2]); ResourceManager* manager = *((ResourceManager**) task->local_args); assert(manager->proc_id == task->current_proc.id); manager->reset(); TensorAccessorR<DATATYPE, 2> accLogits( regions[0], task->regions[0], FID_DATA, ctx, runtime, manager); TensorAccessorR<DATATYPE, 2> accLabels( regions[1], task->regions[1], FID_DATA, ctx, runtime, manager); TensorAccessorW<DATATYPE, 2> accLogitsGrad( regions[2], task->regions[2], FID_DATA, ctx, runtime, manager, false/*readOutput*/); assert(accLogits.memory.kind() == Memory::Z_COPY_MEM); assert(accLabels.memory.kind() == Memory::Z_COPY_MEM); assert(accLogitsGrad.memory.kind() == Memory::Z_COPY_MEM); V_ID rowLeft = accLogits.rect.lo[1], rowRight = accLogits.rect.hi[1]; int hiddenDim = accLogits.rect.hi[0] - accLogits.rect.lo[0] + 1; if (regions.size() == 4) { TensorAccessorR<int, 2> accMask( regions[3], task->regions[3], FID_DATA, ctx, runtime, manager); assert(accLogits.rect == accLabels.rect); assert(accLogits.rect == accLogitsGrad.rect); assert(accMask.rect.lo[0] == accMask.rect.hi[0]); assert(accMask.rect.lo[1] == rowLeft); assert(accMask.rect.hi[1] == rowRight); cudnnTensorDescriptor_t inputDesc; checkCUDNN(cudnnCreateTensorDescriptor(&inputDesc)); int dims[] = {(int)(rowRight - rowLeft + 1), hiddenDim, 1, 1}; int strides[] = {dims[1] * dims[2] * dims[3], dims[2] * dims[3], dims[3], 1}; checkCUDNN(cudnnSetTensorNdDescriptor(inputDesc, CUDNN_DATA_FLOAT, 4, dims, strides)); float alpha = 1.0f, beta = 0.0f; checkCUDNN(cudnnSoftmaxForward(manager->dnn, CUDNN_SOFTMAX_ACCURATE, CUDNN_SOFTMAX_MODE_INSTANCE, &alpha, inputDesc, accLogits.fbCache, &beta, inputDesc, accLogitsGrad.fbCache)); // Calculate loss PerfMetrics* perf; PerfMetrics perfZC; perfZC.trainLoss = 0.0f; perfZC.trainCorrect = perfZC.trainAll = 0; perfZC.testCorrect = perfZC.testAll = 0; perfZC.valCorrect = perfZC.valAll = 0; checkCUDA(hipMalloc(&perf, sizeof(PerfMetrics))); checkCUDA(hipMemcpy(perf, &perfZC, sizeof(PerfMetrics), hipMemcpyHostToDevice)); hipLaunchKernelGGL(( calc_loss), dim3(GET_BLOCKS(rowRight-rowLeft+1)), dim3(CUDA_NUM_THREADS), 0, 0, accLogitsGrad.fbCache, accLabels.fbCache, accMask.fbCache, perf, hiddenDim, rowRight - rowLeft + 1); checkCUDA(hipMemcpy(&perfZC, perf, sizeof(PerfMetrics), hipMemcpyDeviceToHost)); std::string modeInfo = (op->mode == MD_MODE_TRAIN) ? "[TRAIN]" : " [INFER]"; if (op->mode == MD_MODE_INFER) { fprintf(stderr, "%s[%d] train_loss: %.4lf train_accuracy: %.2lf\%(%d/%d) val_accuracy: %.2lf\%(%d/%d) test_accuracy: %.2lf\%(%d/%d)\n", modeInfo.c_str(), op->epoch_num, perfZC.trainLoss, perfZC.trainCorrect * 100.0f / perfZC.trainAll, perfZC.trainCorrect, perfZC.trainAll, perfZC.valCorrect * 100.0f / perfZC.valAll, perfZC.valCorrect, perfZC.valAll, perfZC.testCorrect * 100.0f / perfZC.testAll, perfZC.testCorrect, perfZC.testAll); printf("%s[%d] train_loss: %.4lf train_accuracy: %.2lf\%(%d/%d) val_accuracy: %.2lf\%(%d/%d) test_accuracy: %.2lf\%(%d/%d)\n", modeInfo.c_str(), op->epoch_num, perfZC.trainLoss, perfZC.trainCorrect * 100.0f / perfZC.trainAll, perfZC.trainCorrect, perfZC.trainAll, perfZC.valCorrect * 100.0f / perfZC.valAll, perfZC.valCorrect, perfZC.valAll, perfZC.testCorrect * 100.0f / perfZC.testAll, perfZC.testCorrect, perfZC.testAll); } // Calculate loss hipLaunchKernelGGL(( softmax_backward), dim3(GET_BLOCKS(accLogits.rect.volume())), dim3(CUDA_NUM_THREADS), 0, 0, accLogitsGrad.fbCache, accLabels.fbCache, accMask.fbCache, hiddenDim, rowRight - rowLeft + 1); } else { // When we don't have traing/val/test masks assert(false); } checkCUDA(hipMemcpy(accLogitsGrad.ptr, accLogitsGrad.fbCache, accLogitsGrad.rect.volume() * sizeof(DATATYPE), hipMemcpyDeviceToHost)); //for (int i = 0; i < 8; i++) // for (int j = 0; j < 8; j++) // printf("[Softmax] input[%d][%d]: %.4lf\n", i, j, accLogits.ptr[i * hiddenDim + j]); //for (int i = 0; i < 8; i++) // for (int j = 0; j < 8; j++) // printf("LogitsBack[%d][%d]: %.4lf\n", i, j, accLogitsGrad.ptr[i * hiddenDim + j]); checkCUDA(hipDeviceSynchronize()); }
500a8f598c833b264cf248bef5ed03a58691e4df.cu
/* Copyright 2019 Stanford University * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "gnn.h" #include "cuda_helper.h" __global__ void softmax_backward(DATATYPE* logitsGrad, const DATATYPE* labels, const int* mask, int hiddenDim, V_ID numVertices) { CUDA_KERNEL_LOOP(i, hiddenDim * numVertices) { logitsGrad[i] = logitsGrad[i] - labels[i]; int myVtxID = i / hiddenDim; if (mask[myVtxID] != MASK_TRAIN) logitsGrad[i] = 0.0f; } } struct PerfMetrics { float trainLoss; int trainAll, testAll, valAll, trainCorrect, testCorrect, valCorrect; }; __global__ void calc_loss(const DATATYPE* logits, const DATATYPE* labels, const int* mask, PerfMetrics* perf, int hiddenDim, V_ID numVertices) { CUDA_KERNEL_LOOP(v, numVertices) { float maxVal = 0.0f; int trueLabel = -1, myLabel = -1; for (int i = 0; i < hiddenDim; i++) { if (logits[v*hiddenDim+i] > maxVal) { maxVal = logits[v*hiddenDim+i]; myLabel = i; } if (labels[v*hiddenDim+i] > 0.5) { assert(trueLabel == -1); trueLabel = i; } } assert(trueLabel >= 0); if (mask[v] == MASK_TRAIN) { atomicAdd(&(perf->trainLoss), 1 - logits[v*hiddenDim+trueLabel]); atomicAdd(&(perf->trainAll), 1); if (trueLabel == myLabel) atomicAdd(&(perf->trainCorrect), 1); } else if (mask[v] == MASK_VAL) { atomicAdd(&(perf->valAll), 1); if (trueLabel == myLabel) atomicAdd(&(perf->valCorrect), 1); } else if (mask[v] == MASK_TEST) { atomicAdd(&(perf->testAll), 1); if (trueLabel == myLabel) atomicAdd(&(perf->testCorrect), 1); } } } __host__ void SoftmaxCrossEntropy::backward_task(const Task *task, const std::vector<PhysicalRegion>& regions, Context ctx, Runtime* runtime) { assert(regions.size() == 3 || regions.size() == 4); assert(regions.size() == task->regions.size()); const SoftmaxCrossEntropy* op = (SoftmaxCrossEntropy*) task->args; // assert the three inputs need to reset gradient assert(op->resetInputGrads[0]); assert(op->resetInputGrads[1]); assert(op->resetInputGrads[2]); ResourceManager* manager = *((ResourceManager**) task->local_args); assert(manager->proc_id == task->current_proc.id); manager->reset(); TensorAccessorR<DATATYPE, 2> accLogits( regions[0], task->regions[0], FID_DATA, ctx, runtime, manager); TensorAccessorR<DATATYPE, 2> accLabels( regions[1], task->regions[1], FID_DATA, ctx, runtime, manager); TensorAccessorW<DATATYPE, 2> accLogitsGrad( regions[2], task->regions[2], FID_DATA, ctx, runtime, manager, false/*readOutput*/); assert(accLogits.memory.kind() == Memory::Z_COPY_MEM); assert(accLabels.memory.kind() == Memory::Z_COPY_MEM); assert(accLogitsGrad.memory.kind() == Memory::Z_COPY_MEM); V_ID rowLeft = accLogits.rect.lo[1], rowRight = accLogits.rect.hi[1]; int hiddenDim = accLogits.rect.hi[0] - accLogits.rect.lo[0] + 1; if (regions.size() == 4) { TensorAccessorR<int, 2> accMask( regions[3], task->regions[3], FID_DATA, ctx, runtime, manager); assert(accLogits.rect == accLabels.rect); assert(accLogits.rect == accLogitsGrad.rect); assert(accMask.rect.lo[0] == accMask.rect.hi[0]); assert(accMask.rect.lo[1] == rowLeft); assert(accMask.rect.hi[1] == rowRight); cudnnTensorDescriptor_t inputDesc; checkCUDNN(cudnnCreateTensorDescriptor(&inputDesc)); int dims[] = {(int)(rowRight - rowLeft + 1), hiddenDim, 1, 1}; int strides[] = {dims[1] * dims[2] * dims[3], dims[2] * dims[3], dims[3], 1}; checkCUDNN(cudnnSetTensorNdDescriptor(inputDesc, CUDNN_DATA_FLOAT, 4, dims, strides)); float alpha = 1.0f, beta = 0.0f; checkCUDNN(cudnnSoftmaxForward(manager->dnn, CUDNN_SOFTMAX_ACCURATE, CUDNN_SOFTMAX_MODE_INSTANCE, &alpha, inputDesc, accLogits.fbCache, &beta, inputDesc, accLogitsGrad.fbCache)); // Calculate loss PerfMetrics* perf; PerfMetrics perfZC; perfZC.trainLoss = 0.0f; perfZC.trainCorrect = perfZC.trainAll = 0; perfZC.testCorrect = perfZC.testAll = 0; perfZC.valCorrect = perfZC.valAll = 0; checkCUDA(cudaMalloc(&perf, sizeof(PerfMetrics))); checkCUDA(cudaMemcpy(perf, &perfZC, sizeof(PerfMetrics), cudaMemcpyHostToDevice)); calc_loss<<<GET_BLOCKS(rowRight-rowLeft+1), CUDA_NUM_THREADS>>>( accLogitsGrad.fbCache, accLabels.fbCache, accMask.fbCache, perf, hiddenDim, rowRight - rowLeft + 1); checkCUDA(cudaMemcpy(&perfZC, perf, sizeof(PerfMetrics), cudaMemcpyDeviceToHost)); std::string modeInfo = (op->mode == MD_MODE_TRAIN) ? "[TRAIN]" : " [INFER]"; if (op->mode == MD_MODE_INFER) { fprintf(stderr, "%s[%d] train_loss: %.4lf train_accuracy: %.2lf\%(%d/%d) val_accuracy: %.2lf\%(%d/%d) test_accuracy: %.2lf\%(%d/%d)\n", modeInfo.c_str(), op->epoch_num, perfZC.trainLoss, perfZC.trainCorrect * 100.0f / perfZC.trainAll, perfZC.trainCorrect, perfZC.trainAll, perfZC.valCorrect * 100.0f / perfZC.valAll, perfZC.valCorrect, perfZC.valAll, perfZC.testCorrect * 100.0f / perfZC.testAll, perfZC.testCorrect, perfZC.testAll); printf("%s[%d] train_loss: %.4lf train_accuracy: %.2lf\%(%d/%d) val_accuracy: %.2lf\%(%d/%d) test_accuracy: %.2lf\%(%d/%d)\n", modeInfo.c_str(), op->epoch_num, perfZC.trainLoss, perfZC.trainCorrect * 100.0f / perfZC.trainAll, perfZC.trainCorrect, perfZC.trainAll, perfZC.valCorrect * 100.0f / perfZC.valAll, perfZC.valCorrect, perfZC.valAll, perfZC.testCorrect * 100.0f / perfZC.testAll, perfZC.testCorrect, perfZC.testAll); } // Calculate loss softmax_backward<<<GET_BLOCKS(accLogits.rect.volume()), CUDA_NUM_THREADS>>>( accLogitsGrad.fbCache, accLabels.fbCache, accMask.fbCache, hiddenDim, rowRight - rowLeft + 1); } else { // When we don't have traing/val/test masks assert(false); } checkCUDA(cudaMemcpy(accLogitsGrad.ptr, accLogitsGrad.fbCache, accLogitsGrad.rect.volume() * sizeof(DATATYPE), cudaMemcpyDeviceToHost)); //for (int i = 0; i < 8; i++) // for (int j = 0; j < 8; j++) // printf("[Softmax] input[%d][%d]: %.4lf\n", i, j, accLogits.ptr[i * hiddenDim + j]); //for (int i = 0; i < 8; i++) // for (int j = 0; j < 8; j++) // printf("LogitsBack[%d][%d]: %.4lf\n", i, j, accLogitsGrad.ptr[i * hiddenDim + j]); checkCUDA(cudaDeviceSynchronize()); }
10fdf899863bdc4628f86377875a74fda2172e2f.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "special.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *d_out = NULL; hipMalloc(&d_out, XSIZE*YSIZE); float *d_in = NULL; hipMalloc(&d_in, XSIZE*YSIZE); int size = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( special), dim3(gridBlock),dim3(threadBlock), 0, 0, d_out,d_in,size); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( special), dim3(gridBlock),dim3(threadBlock), 0, 0, d_out,d_in,size); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( special), dim3(gridBlock),dim3(threadBlock), 0, 0, d_out,d_in,size); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
10fdf899863bdc4628f86377875a74fda2172e2f.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "special.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *d_out = NULL; cudaMalloc(&d_out, XSIZE*YSIZE); float *d_in = NULL; cudaMalloc(&d_in, XSIZE*YSIZE); int size = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); special<<<gridBlock,threadBlock>>>(d_out,d_in,size); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { special<<<gridBlock,threadBlock>>>(d_out,d_in,size); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { special<<<gridBlock,threadBlock>>>(d_out,d_in,size); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
9b5d709571c0ff547843502a86d02bf5d985d47a.hip
// !!! This is a file automatically generated by hipify!!! #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/generate.h> #include <thrust/sort.h> #include <thrust/copy.h> #include <thrust/iterator/transform_iterator.h> #include <thrust/iterator/permutation_iterator.h> #include <thrust/iterator/zip_iterator.h> #include <thrust/transform.h> #include <thrust/reduce.h> #include <thrust/execution_policy.h> #include <algorithm> #include <cstdlib> #include <cstdio> #include <string> #include <cstring> #include <iostream> #include <iomanip> #include <fstream> #include <stdio.h> // The std::chrono namespace provides timer functions in C++ #include <chrono> // std::ratio provides easy conversions between metric units #include <ratio> // // Provide some namespace shortcuts // using std::cout; // using std::chrono::high_resolution_clock; // using std::chrono::duration; //using namespace std; struct f_mult { template <typename Tuple> __host__ __device__ float operator()(Tuple v) { return thrust::get<0>(v) * thrust::get<1>(v); } }; struct f_nextx { template <typename Tuple> __host__ __device__ float operator()(Tuple v) { return ((thrust::get<0>(v) - thrust::get<1>(v)) / thrust::get<2>(v)) + thrust::get<3>(v); //nextX[i] = (b[i] - sum) / A[i][i]; } }; struct divF: thrust::unary_function<int, int> { int n; divF(int n_) : n(n_) {} __host__ __device__ int operator()(int idx) { return idx / n; } }; struct modF: thrust::unary_function<int, int> { int n; modF(int n_) : n(n_) {} __host__ __device__ int operator()(int idx) { return idx % n; } }; // struct diag_index : public thrust::unary_function<int,int> // { // diag_index(int rows) : rows(rows){} // __host__ __device__ // int operator()(const int index) const // { // return (index*rows + (index%rows)); // } // const int rows; // }; struct dmF: thrust::unary_function<int, int> { int n; dmF(int n_) : n(n_) {} __host__ __device__ int operator()(int i) { return i*n+i; } }; typedef thrust::counting_iterator<int> countIt; typedef thrust::transform_iterator<divF, countIt> columnIt; typedef thrust::transform_iterator<modF, countIt> rowIt; typedef thrust::transform_iterator<dmF, countIt> diagIt; void solve(thrust::device_vector<float>& dx, thrust::device_vector<float>& dA, thrust::device_vector<float>& db, thrust::device_vector<float>& dnextX, int size, thrust::device_vector<float>& temp, thrust::device_vector<int>&outkey, thrust::device_vector<float>&sum) { // std::cout <<"dA= "; // for (int i = 0; i<size*size; i++) { // //printf("%f ", v[i]); // std::cout << dA[i]<<" "; // } // //printf("\n"); // std::cout << "\n"; columnIt cv_begin = thrust::make_transform_iterator(thrust::make_counting_iterator(0), divF(size)); columnIt cv_end = cv_begin + (size*size); rowIt rv_begin = thrust::make_transform_iterator(thrust::make_counting_iterator(0), modF(size)); rowIt rv_end = rv_begin + (size*size); diagIt dg_begin = thrust::make_transform_iterator(thrust::make_counting_iterator(0), dmF(size)); diagIt dg_end = dg_begin + (size); // diagIt dg_begin = thrust::make_transform_iterator(thrust::make_counting_iterator(0),diag_index(size)); // diagIt dg_end = dg_begin + (size*size); //thrust::device_vector<float> temp(size*size); thrust::transform(make_zip_iterator( make_tuple( dA.begin(), thrust::make_permutation_iterator(dx.begin(),rv_begin) ) ), make_zip_iterator( make_tuple( dA.end(), thrust::make_permutation_iterator(dx.end(),rv_end) ) ), temp.begin(), f_mult()); // thrust::device_vector<int> outkey(size); // thrust::device_vector<float> sum(size); thrust::reduce_by_key(cv_begin, cv_end, temp.begin(), outkey.begin(), sum.begin()); // thrust::transform(v.begin(), v.end(), sum.begin(), v.begin(), thrust::plus<float>()); // std::cout <<"sum= "; // for (int i = 0; i<size; i++) { // //printf("%f ", v[i]); // std::cout << sum[i]<<" "; // } // //printf("\n"); // std::cout << "\n"; // thrust::transform( // make_zip_iterator( // make_tuple( // // dA.begin(), // thrust::make_permutation_iterator(db.begin(),rv_begin), // thrust::make_permutation_iterator(sum.begin(),rv_begin), // thrust::make_permutation_iterator(dA.begin(),dg_begin), // thrust::make_permutation_iterator(dx.begin(),rv_begin) // ) // ), // make_zip_iterator( // make_tuple( // thrust::make_permutation_iterator(db.end(),rv_end), // thrust::make_permutation_iterator(sum.end(),rv_end), // thrust::make_permutation_iterator(dA.end(),dg_end), // thrust::make_permutation_iterator(dx.end(),rv_end) // ) // ), // dnextX.begin(), // f_nextx()); thrust::transform( make_zip_iterator( make_tuple( // dA.begin(), db.begin(), sum.begin(), thrust::make_permutation_iterator(dA.begin(),dg_begin), dx.begin() ) ), make_zip_iterator( make_tuple( db.end(), sum.end(), thrust::make_permutation_iterator(dA.end(),dg_end), dx.end() ) ), dnextX.begin(), f_nextx()); //nextX[i] = ((b[i] - sum) / A[i][i]) + x[i]; // std::cout <<"nextX= "; // for (int i = 0; i<size; i++) { // //printf("%f ", v[i]); // std::cout << dnextX[i]<<" "; // } // //printf("\n"); // std::cout << "\n"; } int main(int argc, char ** argv) { int maxIterations = 100; int size = std::stoi(argv[1], 0, 10); std::cout << "size="<<size<<"\n"; thrust::host_vector<float> A_flat(size*size); thrust::host_vector<float> hb(size); thrust::host_vector<float> hx(size); thrust::host_vector<float> hnextX(size); int n = size; std::string rfile = argv[2]; std::ifstream fin(rfile); for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) { fin >> A_flat[i*n+j]; //cout << A[i][j] << " "; } //cout << endl; } for (int i = 0; i < n; i++) { fin >> hb[i]; } fin.close(); std::cout << "Read benchmark file "<<rfile<<std::endl; // //float * A_flat = new float [size*size]; // for (int i = 0; i< size*size; i++) { // A_flat[i] = i; // } int size2=size*size; thrust::device_vector<float> dA(size2); thrust::device_vector<float> dx(size); thrust::device_vector<float> db(size); thrust::device_vector<float> dnextX(size); thrust::device_vector<float> temp(size*size); thrust::device_vector<int> outkey(size); thrust::device_vector<float> sum(size); //thrust::fill(dA.begin(), dA.end(), A_flat); //thrust::copy(dA.begin(), dA.end(), A_flat); dA = A_flat; db = hb; // thrust::fill(db.begin(), db.end(), 3); thrust::fill(dx.begin(), dx.end(), 0); thrust::fill(dnextX.begin(), dnextX.end(), 0); hipEvent_t start; hipEvent_t stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start); int count = 1; for (; (count < maxIterations) ; count++) { if (count % 2) { // odd solve(dnextX, dA, db, dx, size, temp, outkey, sum); } else { // even solve(dx, dA, db, dnextX, size, temp, outkey, sum); } } hipEventRecord(stop); hipEventSynchronize(stop); // Get the elapsed time in milliseconds float ms = 0; hipEventElapsedTime(&ms, start, stop); std::cout << std::endl << "Iterations:" << count << std::endl; printf("%f\n", ms); hx = dx; hnextX = dnextX; std::string wfile = argv[3]; std::ofstream fout(wfile); for (int i = 0; i < n; i++) { fout << std::fixed<<hx[i] << " "; //cout << x[i] << " "; } fout << std::endl; fout.close(); float * c = new float [size]; float maxError = 0; float total_err = 0; for(int i = 0; i < size; i++) { c[i] = 0; for(int j = 0; j < size; j++) { c[i] += A_flat[i*size+j] * hx[j]; } maxError = fmax(maxError, fabs(c[i] - hb[i])); total_err += fabs(c[i] - hb[i]); } total_err = total_err / size; std::cout << "\n==== max error: "<<maxError<<"\n"; std::cout << "==== avg error: "<<total_err<<"\n"; delete[] c; return 0; }
9b5d709571c0ff547843502a86d02bf5d985d47a.cu
#include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/generate.h> #include <thrust/sort.h> #include <thrust/copy.h> #include <thrust/iterator/transform_iterator.h> #include <thrust/iterator/permutation_iterator.h> #include <thrust/iterator/zip_iterator.h> #include <thrust/transform.h> #include <thrust/reduce.h> #include <thrust/execution_policy.h> #include <algorithm> #include <cstdlib> #include <cstdio> #include <string> #include <cstring> #include <iostream> #include <iomanip> #include <fstream> #include <stdio.h> // The std::chrono namespace provides timer functions in C++ #include <chrono> // std::ratio provides easy conversions between metric units #include <ratio> // // Provide some namespace shortcuts // using std::cout; // using std::chrono::high_resolution_clock; // using std::chrono::duration; //using namespace std; struct f_mult { template <typename Tuple> __host__ __device__ float operator()(Tuple v) { return thrust::get<0>(v) * thrust::get<1>(v); } }; struct f_nextx { template <typename Tuple> __host__ __device__ float operator()(Tuple v) { return ((thrust::get<0>(v) - thrust::get<1>(v)) / thrust::get<2>(v)) + thrust::get<3>(v); //nextX[i] = (b[i] - sum) / A[i][i]; } }; struct divF: thrust::unary_function<int, int> { int n; divF(int n_) : n(n_) {} __host__ __device__ int operator()(int idx) { return idx / n; } }; struct modF: thrust::unary_function<int, int> { int n; modF(int n_) : n(n_) {} __host__ __device__ int operator()(int idx) { return idx % n; } }; // struct diag_index : public thrust::unary_function<int,int> // { // diag_index(int rows) : rows(rows){} // __host__ __device__ // int operator()(const int index) const // { // return (index*rows + (index%rows)); // } // const int rows; // }; struct dmF: thrust::unary_function<int, int> { int n; dmF(int n_) : n(n_) {} __host__ __device__ int operator()(int i) { return i*n+i; } }; typedef thrust::counting_iterator<int> countIt; typedef thrust::transform_iterator<divF, countIt> columnIt; typedef thrust::transform_iterator<modF, countIt> rowIt; typedef thrust::transform_iterator<dmF, countIt> diagIt; void solve(thrust::device_vector<float>& dx, thrust::device_vector<float>& dA, thrust::device_vector<float>& db, thrust::device_vector<float>& dnextX, int size, thrust::device_vector<float>& temp, thrust::device_vector<int>&outkey, thrust::device_vector<float>&sum) { // std::cout <<"dA= "; // for (int i = 0; i<size*size; i++) { // //printf("%f ", v[i]); // std::cout << dA[i]<<" "; // } // //printf("\n"); // std::cout << "\n"; columnIt cv_begin = thrust::make_transform_iterator(thrust::make_counting_iterator(0), divF(size)); columnIt cv_end = cv_begin + (size*size); rowIt rv_begin = thrust::make_transform_iterator(thrust::make_counting_iterator(0), modF(size)); rowIt rv_end = rv_begin + (size*size); diagIt dg_begin = thrust::make_transform_iterator(thrust::make_counting_iterator(0), dmF(size)); diagIt dg_end = dg_begin + (size); // diagIt dg_begin = thrust::make_transform_iterator(thrust::make_counting_iterator(0),diag_index(size)); // diagIt dg_end = dg_begin + (size*size); //thrust::device_vector<float> temp(size*size); thrust::transform(make_zip_iterator( make_tuple( dA.begin(), thrust::make_permutation_iterator(dx.begin(),rv_begin) ) ), make_zip_iterator( make_tuple( dA.end(), thrust::make_permutation_iterator(dx.end(),rv_end) ) ), temp.begin(), f_mult()); // thrust::device_vector<int> outkey(size); // thrust::device_vector<float> sum(size); thrust::reduce_by_key(cv_begin, cv_end, temp.begin(), outkey.begin(), sum.begin()); // thrust::transform(v.begin(), v.end(), sum.begin(), v.begin(), thrust::plus<float>()); // std::cout <<"sum= "; // for (int i = 0; i<size; i++) { // //printf("%f ", v[i]); // std::cout << sum[i]<<" "; // } // //printf("\n"); // std::cout << "\n"; // thrust::transform( // make_zip_iterator( // make_tuple( // // dA.begin(), // thrust::make_permutation_iterator(db.begin(),rv_begin), // thrust::make_permutation_iterator(sum.begin(),rv_begin), // thrust::make_permutation_iterator(dA.begin(),dg_begin), // thrust::make_permutation_iterator(dx.begin(),rv_begin) // ) // ), // make_zip_iterator( // make_tuple( // thrust::make_permutation_iterator(db.end(),rv_end), // thrust::make_permutation_iterator(sum.end(),rv_end), // thrust::make_permutation_iterator(dA.end(),dg_end), // thrust::make_permutation_iterator(dx.end(),rv_end) // ) // ), // dnextX.begin(), // f_nextx()); thrust::transform( make_zip_iterator( make_tuple( // dA.begin(), db.begin(), sum.begin(), thrust::make_permutation_iterator(dA.begin(),dg_begin), dx.begin() ) ), make_zip_iterator( make_tuple( db.end(), sum.end(), thrust::make_permutation_iterator(dA.end(),dg_end), dx.end() ) ), dnextX.begin(), f_nextx()); //nextX[i] = ((b[i] - sum) / A[i][i]) + x[i]; // std::cout <<"nextX= "; // for (int i = 0; i<size; i++) { // //printf("%f ", v[i]); // std::cout << dnextX[i]<<" "; // } // //printf("\n"); // std::cout << "\n"; } int main(int argc, char ** argv) { int maxIterations = 100; int size = std::stoi(argv[1], 0, 10); std::cout << "size="<<size<<"\n"; thrust::host_vector<float> A_flat(size*size); thrust::host_vector<float> hb(size); thrust::host_vector<float> hx(size); thrust::host_vector<float> hnextX(size); int n = size; std::string rfile = argv[2]; std::ifstream fin(rfile); for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) { fin >> A_flat[i*n+j]; //cout << A[i][j] << " "; } //cout << endl; } for (int i = 0; i < n; i++) { fin >> hb[i]; } fin.close(); std::cout << "Read benchmark file "<<rfile<<std::endl; // //float * A_flat = new float [size*size]; // for (int i = 0; i< size*size; i++) { // A_flat[i] = i; // } int size2=size*size; thrust::device_vector<float> dA(size2); thrust::device_vector<float> dx(size); thrust::device_vector<float> db(size); thrust::device_vector<float> dnextX(size); thrust::device_vector<float> temp(size*size); thrust::device_vector<int> outkey(size); thrust::device_vector<float> sum(size); //thrust::fill(dA.begin(), dA.end(), A_flat); //thrust::copy(dA.begin(), dA.end(), A_flat); dA = A_flat; db = hb; // thrust::fill(db.begin(), db.end(), 3); thrust::fill(dx.begin(), dx.end(), 0); thrust::fill(dnextX.begin(), dnextX.end(), 0); cudaEvent_t start; cudaEvent_t stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); int count = 1; for (; (count < maxIterations) ; count++) { if (count % 2) { // odd solve(dnextX, dA, db, dx, size, temp, outkey, sum); } else { // even solve(dx, dA, db, dnextX, size, temp, outkey, sum); } } cudaEventRecord(stop); cudaEventSynchronize(stop); // Get the elapsed time in milliseconds float ms = 0; cudaEventElapsedTime(&ms, start, stop); std::cout << std::endl << "Iterations:" << count << std::endl; printf("%f\n", ms); hx = dx; hnextX = dnextX; std::string wfile = argv[3]; std::ofstream fout(wfile); for (int i = 0; i < n; i++) { fout << std::fixed<<hx[i] << " "; //cout << x[i] << " "; } fout << std::endl; fout.close(); float * c = new float [size]; float maxError = 0; float total_err = 0; for(int i = 0; i < size; i++) { c[i] = 0; for(int j = 0; j < size; j++) { c[i] += A_flat[i*size+j] * hx[j]; } maxError = fmax(maxError, fabs(c[i] - hb[i])); total_err += fabs(c[i] - hb[i]); } total_err = total_err / size; std::cout << "\n==== max error: "<<maxError<<"\n"; std::cout << "==== avg error: "<<total_err<<"\n"; delete[] c; return 0; }
ad9282a5538baf689697b51d3db722488d99cdd1.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "projectionProfileHorizontalCuda.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const uint8_t *image = NULL; hipMalloc(&image, XSIZE*YSIZE); uint32_t rowSize = XSIZE*YSIZE; uint32_t width = XSIZE; uint32_t height = YSIZE; uint32_t *projection = NULL; hipMalloc(&projection, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( projectionProfileHorizontalCuda), dim3(gridBlock),dim3(threadBlock), 0, 0, image,rowSize,width,height,projection); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( projectionProfileHorizontalCuda), dim3(gridBlock),dim3(threadBlock), 0, 0, image,rowSize,width,height,projection); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( projectionProfileHorizontalCuda), dim3(gridBlock),dim3(threadBlock), 0, 0, image,rowSize,width,height,projection); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
ad9282a5538baf689697b51d3db722488d99cdd1.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "projectionProfileHorizontalCuda.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const uint8_t *image = NULL; cudaMalloc(&image, XSIZE*YSIZE); uint32_t rowSize = XSIZE*YSIZE; uint32_t width = XSIZE; uint32_t height = YSIZE; uint32_t *projection = NULL; cudaMalloc(&projection, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); projectionProfileHorizontalCuda<<<gridBlock,threadBlock>>>(image,rowSize,width,height,projection); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { projectionProfileHorizontalCuda<<<gridBlock,threadBlock>>>(image,rowSize,width,height,projection); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { projectionProfileHorizontalCuda<<<gridBlock,threadBlock>>>(image,rowSize,width,height,projection); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
4d852a8ea1ebb34afb40b4d3184b9afed76c161c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cmath> #include <fstream> #include <time.h> #include <iostream> #include <stddef.h> #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <sys/time.h> __global__ void AplusB(int *ret, int a, int b) { ret[threadIdx.x] = a + b + threadIdx.x; } int main() { int *ret; hipMallocManaged(&ret, 1000 * sizeof(int)); hipLaunchKernelGGL(( AplusB), dim3(1), dim3(1000) , 0, 0, ret, 10, 100); hipDeviceSynchronize(); for(int i=0; i<1000; i++) printf("%d: A+B = %d\n", i, ret[i]); hipFree(ret); return 0; }
4d852a8ea1ebb34afb40b4d3184b9afed76c161c.cu
#include <cmath> #include <fstream> #include <time.h> #include <iostream> #include <stddef.h> #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <sys/time.h> __global__ void AplusB(int *ret, int a, int b) { ret[threadIdx.x] = a + b + threadIdx.x; } int main() { int *ret; cudaMallocManaged(&ret, 1000 * sizeof(int)); AplusB<<< 1, 1000 >>>(ret, 10, 100); cudaDeviceSynchronize(); for(int i=0; i<1000; i++) printf("%d: A+B = %d\n", i, ret[i]); cudaFree(ret); return 0; }
f8fc43a76cc12e4a7a3167631f14506a0a56c0fb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by ops.py // __constant__ int xdim0_zerores_kernel; int xdim0_zerores_kernel_h = -1; __constant__ int xdim1_zerores_kernel; int xdim1_zerores_kernel_h = -1; __constant__ int xdim2_zerores_kernel; int xdim2_zerores_kernel_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #define OPS_ACC0(x) (x) #define OPS_ACC1(x) (x) #define OPS_ACC2(x) (x) // user function __device__ void zerores_kernel_gpu(double *rho_res, double *rhou_res, double *rhoE_res) { rho_res[OPS_ACC0(0)] = 0.0; rhou_res[OPS_ACC1(0)] = 0.0; rhoE_res[OPS_ACC2(0)] = 0.0; } #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 __global__ void ops_zerores_kernel(double *__restrict arg0, double *__restrict arg1, double *__restrict arg2, int size0) { int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 * 1; arg1 += idx_x * 1 * 1; arg2 += idx_x * 1 * 1; if (idx_x < size0) { zerores_kernel_gpu(arg0, arg1, arg2); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_zerores_kernel(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { #else void ops_par_loop_zerores_kernel_execute(ops_kernel_descriptor *desc) { int dim = desc->dim; int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; ops_arg arg2 = desc->args[2]; #endif // Timing double t1, t2, c1, c2; ops_arg args[3] = {arg0, arg1, arg2}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args, 3, range, 2)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(2, "zerores_kernel"); OPS_kernels[2].count++; ops_timers_core(&c1, &t1); } // compute locally allocated range for the sub-block int start[1]; int end[1]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for (int n = 0; n < 1; n++) { start[n] = sb->decomp_disp[n]; end[n] = sb->decomp_disp[n] + sb->decomp_size[n]; if (start[n] >= range[2 * n]) { start[n] = 0; } else { start[n] = range[2 * n] - start[n]; } if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0) start[n] = range[2 * n]; if (end[n] >= range[2 * n + 1]) { end[n] = range[2 * n + 1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n] == MPI_PROC_NULL && (range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n])) end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]); } #else for (int n = 0; n < 1; n++) { start[n] = range[2 * n]; end[n] = range[2 * n + 1]; } #endif int x_size = MAX(0, end[0] - start[0]); int xdim0 = args[0].dat->size[0]; int xdim1 = args[1].dat->size[0]; int xdim2 = args[2].dat->size[0]; if (xdim0 != xdim0_zerores_kernel_h || xdim1 != xdim1_zerores_kernel_h || xdim2 != xdim2_zerores_kernel_h) { hipMemcpyToSymbol(xdim0_zerores_kernel, &xdim0, sizeof(int)); xdim0_zerores_kernel_h = xdim0; hipMemcpyToSymbol(xdim1_zerores_kernel, &xdim1, sizeof(int)); xdim1_zerores_kernel_h = xdim1; hipMemcpyToSymbol(xdim2_zerores_kernel, &xdim2, sizeof(int)); xdim2_zerores_kernel_h = xdim2; } dim3 grid((x_size - 1) / OPS_block_size_x + 1, 1, 1); dim3 tblock(OPS_block_size_x, 1, 1); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size); char *p_a[3]; // set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); p_a[1] = (char *)args[1].data_d + base1; int base2 = args[2].dat->base_offset + dat2 * 1 * (start[0] * args[2].stencil->stride[0]); p_a[2] = (char *)args[2].data_d + base2; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 3); ops_halo_exchanges(args, 3, range); #endif if (OPS_diags > 1) { ops_timers_core(&c2, &t2); OPS_kernels[2].mpi_time += t2 - t1; } // call kernel wrapper function, passing in pointers to data hipLaunchKernelGGL(( ops_zerores_kernel), dim3(grid), dim3(tblock), 0, 0, (double *)p_a[0], (double *)p_a[1], (double *)p_a[2], x_size); cutilSafeCall(hipGetLastError()); if (OPS_diags > 1) { cutilSafeCall(hipDeviceSynchronize()); ops_timers_core(&c1, &t1); OPS_kernels[2].time += t1 - t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 3); ops_set_halo_dirtybit3(&args[0], range); ops_set_halo_dirtybit3(&args[1], range); ops_set_halo_dirtybit3(&args[2], range); #endif if (OPS_diags > 1) { // Update kernel record ops_timers_core(&c2, &t2); OPS_kernels[2].mpi_time += t2 - t1; OPS_kernels[2].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[2].transfer += ops_compute_transfer(dim, start, end, &arg1); OPS_kernels[2].transfer += ops_compute_transfer(dim, start, end, &arg2); } } #ifdef OPS_LAZY void ops_par_loop_zerores_kernel(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 2; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 2; for (int i = 0; i < 2; i++) { desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 3; desc->args = (ops_arg *)malloc(3 * sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->args[2] = arg2; desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index; desc->function = ops_par_loop_zerores_kernel_execute; if (OPS_diags > 1) { ops_timing_realloc(2, "zerores_kernel"); } ops_enqueue_kernel(desc); } #endif
f8fc43a76cc12e4a7a3167631f14506a0a56c0fb.cu
// // auto-generated by ops.py // __constant__ int xdim0_zerores_kernel; int xdim0_zerores_kernel_h = -1; __constant__ int xdim1_zerores_kernel; int xdim1_zerores_kernel_h = -1; __constant__ int xdim2_zerores_kernel; int xdim2_zerores_kernel_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #define OPS_ACC0(x) (x) #define OPS_ACC1(x) (x) #define OPS_ACC2(x) (x) // user function __device__ void zerores_kernel_gpu(double *rho_res, double *rhou_res, double *rhoE_res) { rho_res[OPS_ACC0(0)] = 0.0; rhou_res[OPS_ACC1(0)] = 0.0; rhoE_res[OPS_ACC2(0)] = 0.0; } #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 __global__ void ops_zerores_kernel(double *__restrict arg0, double *__restrict arg1, double *__restrict arg2, int size0) { int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 * 1; arg1 += idx_x * 1 * 1; arg2 += idx_x * 1 * 1; if (idx_x < size0) { zerores_kernel_gpu(arg0, arg1, arg2); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_zerores_kernel(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { #else void ops_par_loop_zerores_kernel_execute(ops_kernel_descriptor *desc) { int dim = desc->dim; int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; ops_arg arg2 = desc->args[2]; #endif // Timing double t1, t2, c1, c2; ops_arg args[3] = {arg0, arg1, arg2}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args, 3, range, 2)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(2, "zerores_kernel"); OPS_kernels[2].count++; ops_timers_core(&c1, &t1); } // compute locally allocated range for the sub-block int start[1]; int end[1]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for (int n = 0; n < 1; n++) { start[n] = sb->decomp_disp[n]; end[n] = sb->decomp_disp[n] + sb->decomp_size[n]; if (start[n] >= range[2 * n]) { start[n] = 0; } else { start[n] = range[2 * n] - start[n]; } if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0) start[n] = range[2 * n]; if (end[n] >= range[2 * n + 1]) { end[n] = range[2 * n + 1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n] == MPI_PROC_NULL && (range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n])) end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]); } #else for (int n = 0; n < 1; n++) { start[n] = range[2 * n]; end[n] = range[2 * n + 1]; } #endif int x_size = MAX(0, end[0] - start[0]); int xdim0 = args[0].dat->size[0]; int xdim1 = args[1].dat->size[0]; int xdim2 = args[2].dat->size[0]; if (xdim0 != xdim0_zerores_kernel_h || xdim1 != xdim1_zerores_kernel_h || xdim2 != xdim2_zerores_kernel_h) { cudaMemcpyToSymbol(xdim0_zerores_kernel, &xdim0, sizeof(int)); xdim0_zerores_kernel_h = xdim0; cudaMemcpyToSymbol(xdim1_zerores_kernel, &xdim1, sizeof(int)); xdim1_zerores_kernel_h = xdim1; cudaMemcpyToSymbol(xdim2_zerores_kernel, &xdim2, sizeof(int)); xdim2_zerores_kernel_h = xdim2; } dim3 grid((x_size - 1) / OPS_block_size_x + 1, 1, 1); dim3 tblock(OPS_block_size_x, 1, 1); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size); char *p_a[3]; // set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); p_a[1] = (char *)args[1].data_d + base1; int base2 = args[2].dat->base_offset + dat2 * 1 * (start[0] * args[2].stencil->stride[0]); p_a[2] = (char *)args[2].data_d + base2; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 3); ops_halo_exchanges(args, 3, range); #endif if (OPS_diags > 1) { ops_timers_core(&c2, &t2); OPS_kernels[2].mpi_time += t2 - t1; } // call kernel wrapper function, passing in pointers to data ops_zerores_kernel<<<grid, tblock>>>((double *)p_a[0], (double *)p_a[1], (double *)p_a[2], x_size); cutilSafeCall(cudaGetLastError()); if (OPS_diags > 1) { cutilSafeCall(cudaDeviceSynchronize()); ops_timers_core(&c1, &t1); OPS_kernels[2].time += t1 - t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 3); ops_set_halo_dirtybit3(&args[0], range); ops_set_halo_dirtybit3(&args[1], range); ops_set_halo_dirtybit3(&args[2], range); #endif if (OPS_diags > 1) { // Update kernel record ops_timers_core(&c2, &t2); OPS_kernels[2].mpi_time += t2 - t1; OPS_kernels[2].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[2].transfer += ops_compute_transfer(dim, start, end, &arg1); OPS_kernels[2].transfer += ops_compute_transfer(dim, start, end, &arg2); } } #ifdef OPS_LAZY void ops_par_loop_zerores_kernel(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 2; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 2; for (int i = 0; i < 2; i++) { desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 3; desc->args = (ops_arg *)malloc(3 * sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->args[2] = arg2; desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index; desc->function = ops_par_loop_zerores_kernel_execute; if (OPS_diags > 1) { ops_timing_realloc(2, "zerores_kernel"); } ops_enqueue_kernel(desc); } #endif
7803f7b1c2e892372f329f4745ff809f5f774b6e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* This is a automatically generated test. Do not modify */ #include <stdio.h> #include <stdlib.h> #include <math.h> __global__ void compute(float comp, int var_1,float var_2,int var_3,int var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float* var_11,float var_12,float var_13,float var_14,float var_15) { for (int i=0; i < var_1; ++i) { if (comp == var_2 * (+1.4715E-42f - -1.3782E35f)) { comp = (-1.4090E-41f - var_5 - (var_6 + var_7)); comp = -0.0f - var_8; comp += var_9 * asinf(-1.4665E17f); comp = (+0.0f + -1.0400E-35f - (var_10 * ceilf(expf(-1.9179E-35f)))); for (int i=0; i < var_3; ++i) { var_11[i] = (var_12 + (var_13 / +1.4589E-36f + (+1.9314E35f - (+1.0494E-43f * +1.7027E-37f)))); float tmp_1 = (var_14 + atanf(+1.3364E-8f)); float tmp_2 = -1.0532E-41f; comp += tmp_2 - tmp_1 * var_11[i] - (+1.0629E-9f * (+1.4833E-37f - +1.7569E10f)); } for (int i=0; i < var_4; ++i) { comp = -1.0301E-7f * -1.3249E23f * +1.4176E-35f; comp = (+1.6993E-37f * (+1.0260E-35f * var_15)); } } } printf("%.17g\n", comp); } float* initPointer(float v) { float *ret = (float*) malloc(sizeof(float)*10); for(int i=0; i < 10; ++i) ret[i] = v; return ret; } int main(int argc, char** argv) { /* Program variables */ float tmp_1 = atof(argv[1]); int tmp_2 = atoi(argv[2]); float tmp_3 = atof(argv[3]); int tmp_4 = atoi(argv[4]); int tmp_5 = atoi(argv[5]); float tmp_6 = atof(argv[6]); float tmp_7 = atof(argv[7]); float tmp_8 = atof(argv[8]); float tmp_9 = atof(argv[9]); float tmp_10 = atof(argv[10]); float tmp_11 = atof(argv[11]); float* tmp_12 = initPointer( atof(argv[12]) ); float tmp_13 = atof(argv[13]); float tmp_14 = atof(argv[14]); float tmp_15 = atof(argv[15]); float tmp_16 = atof(argv[16]); hipLaunchKernelGGL(( compute), dim3(1),dim3(1), 0, 0, tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16); hipDeviceSynchronize(); return 0; }
7803f7b1c2e892372f329f4745ff809f5f774b6e.cu
/* This is a automatically generated test. Do not modify */ #include <stdio.h> #include <stdlib.h> #include <math.h> __global__ void compute(float comp, int var_1,float var_2,int var_3,int var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float* var_11,float var_12,float var_13,float var_14,float var_15) { for (int i=0; i < var_1; ++i) { if (comp == var_2 * (+1.4715E-42f - -1.3782E35f)) { comp = (-1.4090E-41f - var_5 - (var_6 + var_7)); comp = -0.0f - var_8; comp += var_9 * asinf(-1.4665E17f); comp = (+0.0f + -1.0400E-35f - (var_10 * ceilf(expf(-1.9179E-35f)))); for (int i=0; i < var_3; ++i) { var_11[i] = (var_12 + (var_13 / +1.4589E-36f + (+1.9314E35f - (+1.0494E-43f * +1.7027E-37f)))); float tmp_1 = (var_14 + atanf(+1.3364E-8f)); float tmp_2 = -1.0532E-41f; comp += tmp_2 - tmp_1 * var_11[i] - (+1.0629E-9f * (+1.4833E-37f - +1.7569E10f)); } for (int i=0; i < var_4; ++i) { comp = -1.0301E-7f * -1.3249E23f * +1.4176E-35f; comp = (+1.6993E-37f * (+1.0260E-35f * var_15)); } } } printf("%.17g\n", comp); } float* initPointer(float v) { float *ret = (float*) malloc(sizeof(float)*10); for(int i=0; i < 10; ++i) ret[i] = v; return ret; } int main(int argc, char** argv) { /* Program variables */ float tmp_1 = atof(argv[1]); int tmp_2 = atoi(argv[2]); float tmp_3 = atof(argv[3]); int tmp_4 = atoi(argv[4]); int tmp_5 = atoi(argv[5]); float tmp_6 = atof(argv[6]); float tmp_7 = atof(argv[7]); float tmp_8 = atof(argv[8]); float tmp_9 = atof(argv[9]); float tmp_10 = atof(argv[10]); float tmp_11 = atof(argv[11]); float* tmp_12 = initPointer( atof(argv[12]) ); float tmp_13 = atof(argv[13]); float tmp_14 = atof(argv[14]); float tmp_15 = atof(argv[15]); float tmp_16 = atof(argv[16]); compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16); cudaDeviceSynchronize(); return 0; }
a11ea8d8aa711c85b722069c0ff169ada711caec.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <iostream> #include <chrono> #include <iomanip> using namespace std; __global__ void convert(char *input, char *output, int len, int step) { int index = blockIdx.x * blockDim.x + threadIdx.x; //if (index < size) *(output + (2 - index % 3) * len + index / 3) = *(input+index); char * oR = output + index * step, *oG = oR + len, *oB = oG + len; const char *iPos = (const char *)input + index * step * 3; for(int i = 0; i < step; i += 1){ *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); } } extern "C" void rgbConvert(char* input, int size, unsigned char * pBuffer){ int len = size / 3; char * cinput = (char *) pBuffer; char * coutput = (char *) pBuffer + size; int block = 128; int thread = 128; hipMemcpy(pBuffer, input, size, hipMemcpyHostToDevice ); hipMemset(pBuffer + len * 6, 0, len); hipDeviceSynchronize(); if(len/block/thread > 0){ hipLaunchKernelGGL(( convert), dim3(len/block/thread), dim3(thread), 0, 0, cinput, coutput, len, block); hipLaunchKernelGGL(( convert), dim3(len%(block*thread)), dim3(1), 0, 0, cinput + len/block/thread*block*thread*3, coutput + len/block/thread*block*thread, len, 1); }else{ hipLaunchKernelGGL(( convert), dim3(len%(block*thread)), dim3(1), 0, 0, cinput , coutput , len, 1); } hipDeviceSynchronize(); } __global__ void convertBack(char *input, char *output, int len, int step) { int index = blockIdx.x * blockDim.x + threadIdx.x; //if (index < size) *(output + (2 - index % 3) * len + index / 3) = *(input+index); char * iR = input + index * step , *iG = iR + len, *iB = iG + len; char *oPos = output + index * step * 3; for (int i = 0; i < step; i++){ *(oPos++) = *(iB++); *(oPos++) = *(iG++); *(oPos++) = *(iR++); } } extern "C" void rgbConvertBack(int len, unsigned char * pBuffer){ int block = 128; int thread = 128; char * cinput = (char *) pBuffer; char * coutput = (char *) pBuffer + len * 4; if(len/block/thread > 0){ hipLaunchKernelGGL(( convertBack), dim3(len/block/thread), dim3(thread), 0, 0, cinput, coutput, len, thread); hipLaunchKernelGGL(( convertBack), dim3(len%(block*thread)), dim3(1), 0, 0, cinput + len/block/thread*block*thread, coutput + len/block/thread*block*thread*3, len, 1); }else{ hipLaunchKernelGGL(( convertBack), dim3(len%(block*thread)), dim3(1), 0, 0, cinput, coutput, len, 1); } hipDeviceSynchronize(); } __global__ void convert128(char *input, char *output, int len, int step) { int index = blockIdx.x * blockDim.x + threadIdx.x; //if (index < size) *(output + (2 - index % 3) * len + index / 3) = *(input+index); char * oR = output + index * step, *oG = oR + len, *oB = oG + len; const char *iPos = (const char *)input + index * step * 3; *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); } /* int main(){ int size = 1811520*3; char * input = (char*) malloc(size); memset(input, 2, size); char* output = rgbConvert(input, size); hipFree(output); return EXIT_SUCCESS; } */
a11ea8d8aa711c85b722069c0ff169ada711caec.cu
#include <stdio.h> #include <iostream> #include <chrono> #include <iomanip> using namespace std; __global__ void convert(char *input, char *output, int len, int step) { int index = blockIdx.x * blockDim.x + threadIdx.x; //if (index < size) *(output + (2 - index % 3) * len + index / 3) = *(input+index); char * oR = output + index * step, *oG = oR + len, *oB = oG + len; const char *iPos = (const char *)input + index * step * 3; for(int i = 0; i < step; i += 1){ *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); } } extern "C" void rgbConvert(char* input, int size, unsigned char * pBuffer){ int len = size / 3; char * cinput = (char *) pBuffer; char * coutput = (char *) pBuffer + size; int block = 128; int thread = 128; cudaMemcpy(pBuffer, input, size, cudaMemcpyHostToDevice ); cudaMemset(pBuffer + len * 6, 0, len); cudaDeviceSynchronize(); if(len/block/thread > 0){ convert<<<len/block/thread, thread>>>(cinput, coutput, len, block); convert<<<len%(block*thread), 1>>>(cinput + len/block/thread*block*thread*3, coutput + len/block/thread*block*thread, len, 1); }else{ convert<<<len%(block*thread), 1>>>(cinput , coutput , len, 1); } cudaDeviceSynchronize(); } __global__ void convertBack(char *input, char *output, int len, int step) { int index = blockIdx.x * blockDim.x + threadIdx.x; //if (index < size) *(output + (2 - index % 3) * len + index / 3) = *(input+index); char * iR = input + index * step , *iG = iR + len, *iB = iG + len; char *oPos = output + index * step * 3; for (int i = 0; i < step; i++){ *(oPos++) = *(iB++); *(oPos++) = *(iG++); *(oPos++) = *(iR++); } } extern "C" void rgbConvertBack(int len, unsigned char * pBuffer){ int block = 128; int thread = 128; char * cinput = (char *) pBuffer; char * coutput = (char *) pBuffer + len * 4; if(len/block/thread > 0){ convertBack<<<len/block/thread, thread>>>(cinput, coutput, len, thread); convertBack<<<len%(block*thread), 1>>>(cinput + len/block/thread*block*thread, coutput + len/block/thread*block*thread*3, len, 1); }else{ convertBack<<<len%(block*thread), 1>>>(cinput, coutput, len, 1); } cudaDeviceSynchronize(); } __global__ void convert128(char *input, char *output, int len, int step) { int index = blockIdx.x * blockDim.x + threadIdx.x; //if (index < size) *(output + (2 - index % 3) * len + index / 3) = *(input+index); char * oR = output + index * step, *oG = oR + len, *oB = oG + len; const char *iPos = (const char *)input + index * step * 3; *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); *(oB++) = *(iPos++); *(oG++) = *(iPos++); *(oR++) = *(iPos++); } /* int main(){ int size = 1811520*3; char * input = (char*) malloc(size); memset(input, 2, size); char* output = rgbConvert(input, size); cudaFree(output); return EXIT_SUCCESS; } */
cef134023a5ad5bf57813bd769f00dc9eaca2f8b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under the BSD-style license found in the * LICENSE file in the root directory of this source tree. */ // This file is auto-generated. See "generate_kernels.py" #include <ATen/native/transformers/hip/mem_eff_attention/kernel_backward.h> __global__ void __launch_bounds__( AttentionBackwardKernel<cutlass::arch::Sm50, float, false, true, false, 64, 64, 65536>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm50, float, false, true, false, 64, 64, 65536>::kMinBlocksPerSm) fmha_cutlassB_f32_notaligned_64x64_k65536_dropout_sm50(typename AttentionBackwardKernel<cutlass::arch::Sm50, float, false, true, false, 64, 64, 65536>::Params p) { #ifdef __CUDA_ARCH__ #if __CUDA_ARCH__ >= 500 #if __CUDA_ARCH__ < 700 if (!p.advance_to_block()) { return; } AttentionBackwardKernel<cutlass::arch::Sm50, float, false, true, false, 64, 64, 65536>::attention_kernel(p); return; #endif #endif printf( "FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k65536_dropout_sm50` is for sm50-sm70, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10); #endif } __global__ void __launch_bounds__( AttentionBackwardKernel<cutlass::arch::Sm70, float, false, true, false, 64, 64, 65536>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm70, float, false, true, false, 64, 64, 65536>::kMinBlocksPerSm) fmha_cutlassB_f32_notaligned_64x64_k65536_dropout_sm70(typename AttentionBackwardKernel<cutlass::arch::Sm70, float, false, true, false, 64, 64, 65536>::Params p) { #ifdef __CUDA_ARCH__ #if __CUDA_ARCH__ >= 700 #if __CUDA_ARCH__ < 750 if (!p.advance_to_block()) { return; } AttentionBackwardKernel<cutlass::arch::Sm70, float, false, true, false, 64, 64, 65536>::attention_kernel(p); return; #endif #endif printf( "FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k65536_dropout_sm70` is for sm70-sm75, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10); #endif } __global__ void __launch_bounds__( AttentionBackwardKernel<cutlass::arch::Sm75, float, false, true, false, 64, 64, 65536>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm75, float, false, true, false, 64, 64, 65536>::kMinBlocksPerSm) fmha_cutlassB_f32_notaligned_64x64_k65536_dropout_sm75(typename AttentionBackwardKernel<cutlass::arch::Sm75, float, false, true, false, 64, 64, 65536>::Params p) { #ifdef __CUDA_ARCH__ #if __CUDA_ARCH__ >= 750 #if __CUDA_ARCH__ < 800 if (!p.advance_to_block()) { return; } AttentionBackwardKernel<cutlass::arch::Sm75, float, false, true, false, 64, 64, 65536>::attention_kernel(p); return; #endif #endif printf( "FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k65536_dropout_sm75` is for sm75-sm80, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10); #endif }
cef134023a5ad5bf57813bd769f00dc9eaca2f8b.cu
/* * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under the BSD-style license found in the * LICENSE file in the root directory of this source tree. */ // This file is auto-generated. See "generate_kernels.py" #include <ATen/native/transformers/cuda/mem_eff_attention/kernel_backward.h> __global__ void __launch_bounds__( AttentionBackwardKernel<cutlass::arch::Sm50, float, false, true, false, 64, 64, 65536>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm50, float, false, true, false, 64, 64, 65536>::kMinBlocksPerSm) fmha_cutlassB_f32_notaligned_64x64_k65536_dropout_sm50(typename AttentionBackwardKernel<cutlass::arch::Sm50, float, false, true, false, 64, 64, 65536>::Params p) { #ifdef __CUDA_ARCH__ #if __CUDA_ARCH__ >= 500 #if __CUDA_ARCH__ < 700 if (!p.advance_to_block()) { return; } AttentionBackwardKernel<cutlass::arch::Sm50, float, false, true, false, 64, 64, 65536>::attention_kernel(p); return; #endif #endif printf( "FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k65536_dropout_sm50` is for sm50-sm70, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10); #endif } __global__ void __launch_bounds__( AttentionBackwardKernel<cutlass::arch::Sm70, float, false, true, false, 64, 64, 65536>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm70, float, false, true, false, 64, 64, 65536>::kMinBlocksPerSm) fmha_cutlassB_f32_notaligned_64x64_k65536_dropout_sm70(typename AttentionBackwardKernel<cutlass::arch::Sm70, float, false, true, false, 64, 64, 65536>::Params p) { #ifdef __CUDA_ARCH__ #if __CUDA_ARCH__ >= 700 #if __CUDA_ARCH__ < 750 if (!p.advance_to_block()) { return; } AttentionBackwardKernel<cutlass::arch::Sm70, float, false, true, false, 64, 64, 65536>::attention_kernel(p); return; #endif #endif printf( "FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k65536_dropout_sm70` is for sm70-sm75, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10); #endif } __global__ void __launch_bounds__( AttentionBackwardKernel<cutlass::arch::Sm75, float, false, true, false, 64, 64, 65536>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm75, float, false, true, false, 64, 64, 65536>::kMinBlocksPerSm) fmha_cutlassB_f32_notaligned_64x64_k65536_dropout_sm75(typename AttentionBackwardKernel<cutlass::arch::Sm75, float, false, true, false, 64, 64, 65536>::Params p) { #ifdef __CUDA_ARCH__ #if __CUDA_ARCH__ >= 750 #if __CUDA_ARCH__ < 800 if (!p.advance_to_block()) { return; } AttentionBackwardKernel<cutlass::arch::Sm75, float, false, true, false, 64, 64, 65536>::attention_kernel(p); return; #endif #endif printf( "FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k65536_dropout_sm75` is for sm75-sm80, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10); #endif }
065157f78b5f73bb67628bf8e94470a5fde09e92.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<iostream> #include<ctime> #include"head.h" using namespace std; #define R template<typename T> void __global__ transpose(const T* src,T *after,unsigned int row, unsigned int column) { unsigned int idx = threadIdx.x + blockDim.x * blockIdx.x * 4; unsigned int idy = threadIdx.y + blockDim.y * blockIdx.y; if(idx + blockDim.x * 3< row && idy < column) { //read merge #ifdef C unsigned int to = idx * column + idy; unsigned int fo = idy * row + idx; unsigned int step = blockDim.x * column; //read row write column after[to] = src[fo]; after[to + step] = src[fo + blockDim.x]; after[to + step*2] = src[fo + blockDim.x*2]; after[to + step*3] = src[fo + blockDim.x*3]; #else //read column write row unsigned int to = idx * column + idy; unsigned int fo = idy * row + idx; unsigned int step = blockDim.x * column; after[fo] = src[to]; after[fo + blockDim.x] = src[to + step]; after[fo + blockDim.x*2] = src[to + step*2]; after[fo + blockDim.x*3] = src[to + step*3]; #endif } } template<typename T> void transposeHost(const T* src, T* after, unsigned int row, unsigned int column) { for (int i = 0;i < row;i++) { for(int j = 0;j < column;j++) { after[j * row + i] = src[i * column + j]; } } } int main(int argc,char *argv[]) { int Row = 32; int Column = 32; int nx = 1 << 9; int ny = 1 << 9; int N = nx * ny; int a[N], b[N],*a_dev,*b_dev; clock_t start, end; for(int i = 0;i < N;i++) { a[i] = i; } start = clock(); transposeHost(a,b,nx,ny); end = clock(); cout << "cpu :" << end - start << "ms" << endl; dim3 block(Row,Column); dim3 grid( (nx + Row*4 - 1)/Row/4, (Column + ny - 1)/Column); hipMalloc((void**)&a_dev,sizeof(a)); hipMalloc((void**)&b_dev,sizeof(a)); cout << "brea 1" << sizeof(a) <<endl; hipMemcpy(a_dev,a,sizeof(a),hipMemcpyHostToDevice); start = clock(); hipLaunchKernelGGL(( transpose), dim3(block),dim3(grid), 0, 0, a_dev,b_dev,nx,ny); hipDeviceSynchronize(); end = clock(); hipMemcpy(a, b_dev, sizeof(a), hipMemcpyDeviceToHost); hipDeviceSynchronize(); for(int i = 0;i < N;i++) { if(a[i] != b[i]) { cout <<"failed" << endl; } } cout <<"gpu:" << end - start << "ms" << endl; return 0; }
065157f78b5f73bb67628bf8e94470a5fde09e92.cu
#include<iostream> #include<ctime> #include"head.h" using namespace std; #define R template<typename T> void __global__ transpose(const T* src,T *after,unsigned int row, unsigned int column) { unsigned int idx = threadIdx.x + blockDim.x * blockIdx.x * 4; unsigned int idy = threadIdx.y + blockDim.y * blockIdx.y; if(idx + blockDim.x * 3< row && idy < column) { //read merge #ifdef C unsigned int to = idx * column + idy; unsigned int fo = idy * row + idx; unsigned int step = blockDim.x * column; //read row write column after[to] = src[fo]; after[to + step] = src[fo + blockDim.x]; after[to + step*2] = src[fo + blockDim.x*2]; after[to + step*3] = src[fo + blockDim.x*3]; #else //read column write row unsigned int to = idx * column + idy; unsigned int fo = idy * row + idx; unsigned int step = blockDim.x * column; after[fo] = src[to]; after[fo + blockDim.x] = src[to + step]; after[fo + blockDim.x*2] = src[to + step*2]; after[fo + blockDim.x*3] = src[to + step*3]; #endif } } template<typename T> void transposeHost(const T* src, T* after, unsigned int row, unsigned int column) { for (int i = 0;i < row;i++) { for(int j = 0;j < column;j++) { after[j * row + i] = src[i * column + j]; } } } int main(int argc,char *argv[]) { int Row = 32; int Column = 32; int nx = 1 << 9; int ny = 1 << 9; int N = nx * ny; int a[N], b[N],*a_dev,*b_dev; clock_t start, end; for(int i = 0;i < N;i++) { a[i] = i; } start = clock(); transposeHost(a,b,nx,ny); end = clock(); cout << "cpu :" << end - start << "ms" << endl; dim3 block(Row,Column); dim3 grid( (nx + Row*4 - 1)/Row/4, (Column + ny - 1)/Column); cudaMalloc((void**)&a_dev,sizeof(a)); cudaMalloc((void**)&b_dev,sizeof(a)); cout << "brea 1" << sizeof(a) <<endl; cudaMemcpy(a_dev,a,sizeof(a),cudaMemcpyHostToDevice); start = clock(); transpose<<<block,grid>>>(a_dev,b_dev,nx,ny); cudaDeviceSynchronize(); end = clock(); cudaMemcpy(a, b_dev, sizeof(a), cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); for(int i = 0;i < N;i++) { if(a[i] != b[i]) { cout <<"failed" << endl; } } cout <<"gpu:" << end - start << "ms" << endl; return 0; }
c9644d0cb2f3f931f9193979ac3d7ef2e5e9b17f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <algorithm> #include <cmath> #include <cstdio> #include <iostream> #include <sstream> #include <omp.h> #ifdef HAVE_CUB #include <hipcub/hipcub.hpp> #endif // HAVE_CUB #ifdef USE_NVTX #include <roctracer/roctx.h> const uint32_t colors[] = {0x0000ff00, 0x000000ff, 0x00ffff00, 0x00ff00ff, 0x0000ffff, 0x00ff0000, 0x00ffffff}; const int num_colors = sizeof(colors) / sizeof(uint32_t); #define PUSH_RANGE(name, cid) \ { \ int color_id = cid; \ color_id = color_id % num_colors; \ nvtxEventAttributes_t eventAttrib = {0}; \ eventAttrib.version = NVTX_VERSION; \ eventAttrib.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE; \ eventAttrib.colorType = NVTX_COLOR_ARGB; \ eventAttrib.color = colors[color_id]; \ eventAttrib.messageType = NVTX_MESSAGE_TYPE_ASCII; \ eventAttrib.message.ascii = name; \ nvtxRangePushEx(&eventAttrib); \ } #define POP_RANGE roctxRangePop(); #else #define PUSH_RANGE(name, cid) #define POP_RANGE #endif #define CUDA_RT_CALL(call) \ { \ hipError_t cudaStatus = call; \ if (hipSuccess != cudaStatus) \ fprintf(stderr, \ "ERROR: CUDA RT call \"%s\" in line %d of file %s failed " \ "with " \ "%s (%d).\n", \ #call, __LINE__, __FILE__, hipGetErrorString(cudaStatus), cudaStatus); \ } constexpr int MAX_NUM_DEVICES = 32; typedef float real; constexpr real tol = 1.0e-8; const real PI = 2.0 * std::asin(1.0); __global__ void initialize_boundaries(real* __restrict__ const a_new, real* __restrict__ const a, const real pi, const int offset, const int nx, const int my_ny, const int ny) { for (int iy = blockIdx.x * blockDim.x + threadIdx.x; iy < my_ny; iy += blockDim.x * gridDim.x) { const real y0 = sin(2.0 * pi * (offset + iy) / (ny - 1)); a[iy * nx + 0] = y0; a[iy * nx + (nx - 1)] = y0; a_new[iy * nx + 0] = y0; a_new[iy * nx + (nx - 1)] = y0; } } template <int BLOCK_DIM_X, int BLOCK_DIM_Y> __global__ void jacobi_kernel(real* __restrict__ const a_new, const real* __restrict__ const a, real* __restrict__ const l2_norm, const int iy_start, const int iy_end, const int nx, real* __restrict__ const a_new_top, const int top_iy, real* __restrict__ const a_new_bottom, const int bottom_iy) { #ifdef HAVE_CUB typedef hipcub::BlockReduce<real, BLOCK_DIM_X, hipcub::BLOCK_REDUCE_WARP_REDUCTIONS, BLOCK_DIM_Y> BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage; #endif // HAVE_CUB int iy = blockIdx.y * blockDim.y + threadIdx.y + iy_start; int ix = blockIdx.x * blockDim.x + threadIdx.x + 1; real local_l2_norm = 0.0; if (iy < iy_end && ix < (nx - 1)) { const real new_val = 0.25 * (a[iy * nx + ix + 1] + a[iy * nx + ix - 1] + a[(iy + 1) * nx + ix] + a[(iy - 1) * nx + ix]); a_new[iy * nx + ix] = new_val; if (iy_start == iy) { a_new_top[top_iy * nx + ix] = new_val; } if ((iy_end - 1) == iy) { a_new_bottom[bottom_iy * nx + ix] = new_val; } real residue = new_val - a[iy * nx + ix]; local_l2_norm += residue * residue; } #ifdef HAVE_CUB real block_l2_norm = BlockReduce(temp_storage).Sum(local_l2_norm); if (0 == threadIdx.y && 0 == threadIdx.x) atomicAdd(l2_norm, block_l2_norm); #else atomicAdd(l2_norm, local_l2_norm); #endif // HAVE_CUB } double single_gpu(const int nx, const int ny, const int iter_max, real* const a_ref_h, const int nccheck, const bool print); template <typename T> T get_argval(char** begin, char** end, const std::string& arg, const T default_val) { T argval = default_val; char** itr = std::find(begin, end, arg); if (itr != end && ++itr != end) { std::istringstream inbuf(*itr); inbuf >> argval; } return argval; } bool get_arg(char** begin, char** end, const std::string& arg) { char** itr = std::find(begin, end, arg); if (itr != end) { return true; } return false; } struct l2_norm_buf { hipEvent_t copy_done; real* d; real* h; }; int main(int argc, char* argv[]) { const int iter_max = get_argval<int>(argv, argv + argc, "-niter", 1000); const int nccheck = get_argval<int>(argv, argv + argc, "-nccheck", 1); const int nx = get_argval<int>(argv, argv + argc, "-nx", 7168); const int ny = get_argval<int>(argv, argv + argc, "-ny", 7168); const bool csv = get_arg(argv, argv + argc, "-csv"); real* a_new[MAX_NUM_DEVICES]; real* a_ref_h; real* a_h; double runtime_serial = 0.0; int iy_end[MAX_NUM_DEVICES]; hipEvent_t compute_done[2][MAX_NUM_DEVICES]; hipEvent_t reset_l2_norm_done[2][MAX_NUM_DEVICES]; bool result_correct = true; bool p2p_works = true; int num_devices = 0; CUDA_RT_CALL(hipGetDeviceCount(&num_devices)); real l2_norms[2]; #pragma omp parallel num_threads(num_devices) shared(l2_norms) { real* a; hipStream_t compute_stream; hipStream_t reset_l2_norm_stream; l2_norm_buf l2_norm_bufs[2]; // Ensure correctness if ny%size != 0 int chunk_size = ::ceil((1.0 * (ny - 2)) / num_devices); int dev_id = omp_get_thread_num(); CUDA_RT_CALL(hipSetDevice(dev_id)); CUDA_RT_CALL(hipSetDeviceFlags(hipDeviceScheduleSpin)); CUDA_RT_CALL(hipFree(0)); if (0 == dev_id) { CUDA_RT_CALL(hipHostMalloc(&a_ref_h, nx * ny * sizeof(real))); CUDA_RT_CALL(hipHostMalloc(&a_h, nx * ny * sizeof(real))); runtime_serial = single_gpu(nx, ny, iter_max, a_ref_h, nccheck, !csv); } #pragma omp barrier const int top = dev_id > 0 ? dev_id - 1 : (num_devices - 1); const int bottom = (dev_id + 1) % num_devices; if (top != dev_id) { int canAccessPeer = 0; CUDA_RT_CALL(hipDeviceCanAccessPeer(&canAccessPeer, dev_id, top)); if (canAccessPeer) { CUDA_RT_CALL(hipDeviceEnablePeerAccess(top, 0)); } else { std::cerr << "P2P access required from " << dev_id << " to " << top << std::endl; #pragma omp critical { if (p2p_works) p2p_works = false; } } if (top != bottom) { canAccessPeer = 0; CUDA_RT_CALL(hipDeviceCanAccessPeer(&canAccessPeer, dev_id, bottom)); if (canAccessPeer) { CUDA_RT_CALL(hipDeviceEnablePeerAccess(bottom, 0)); } else { std::cerr << "P2P access required from " << dev_id << " to " << bottom << std::endl; #pragma omp critical { if (p2p_works) p2p_works = false; } } } } #pragma omp barrier if (p2p_works) { CUDA_RT_CALL(hipMalloc(&a, nx * (chunk_size + 2) * sizeof(real))); CUDA_RT_CALL(hipMalloc(a_new + dev_id, nx * (chunk_size + 2) * sizeof(real))); CUDA_RT_CALL(hipMemset(a, 0, nx * (chunk_size + 2) * sizeof(real))); CUDA_RT_CALL(hipMemset(a_new[dev_id], 0, nx * (chunk_size + 2) * sizeof(real))); // Calculate local domain boundaries int iy_start_global = dev_id * chunk_size + 1; int iy_end_global = iy_start_global + chunk_size - 1; // Do not process boundaries iy_end_global = ::min(iy_end_global, ny - 2); int iy_start = 1; iy_end[dev_id] = (iy_end_global - iy_start_global + 1) + iy_start; // Set diriclet boundary conditions on left and right boarder hipLaunchKernelGGL(( initialize_boundaries), dim3((ny / num_devices) / 128 + 1), dim3(128), 0, 0, a, a_new[dev_id], PI, iy_start_global - 1, nx, (chunk_size + 2), ny); CUDA_RT_CALL(hipGetLastError()); CUDA_RT_CALL(hipDeviceSynchronize()); CUDA_RT_CALL(hipStreamCreate(&compute_stream)); CUDA_RT_CALL(hipStreamCreate(&reset_l2_norm_stream)); CUDA_RT_CALL( hipEventCreateWithFlags(compute_done[0] + dev_id, hipEventDisableTiming)); CUDA_RT_CALL( hipEventCreateWithFlags(compute_done[1] + dev_id, hipEventDisableTiming)); CUDA_RT_CALL( hipEventCreateWithFlags(reset_l2_norm_done[0] + dev_id, hipEventDisableTiming)); CUDA_RT_CALL( hipEventCreateWithFlags(reset_l2_norm_done[1] + dev_id, hipEventDisableTiming)); for (int i = 0; i < 2; ++i) { CUDA_RT_CALL( hipEventCreateWithFlags(&l2_norm_bufs[i].copy_done, hipEventDisableTiming)); CUDA_RT_CALL(hipMalloc(&l2_norm_bufs[i].d, sizeof(real))); CUDA_RT_CALL(hipMemset(l2_norm_bufs[i].d, 0, sizeof(real))); CUDA_RT_CALL(hipHostMalloc(&l2_norm_bufs[i].h, sizeof(real))); *(l2_norm_bufs[i].h) = 1.0; } CUDA_RT_CALL(hipDeviceSynchronize()); #pragma omp master { if (!csv) printf( "Jacobi relaxation: %d iterations on %d x %d mesh with " "norm " "check every %d iterations\n", iter_max, ny, nx, nccheck); } constexpr int dim_block_x = 32; constexpr int dim_block_y = 4; dim3 dim_grid((nx + dim_block_x - 1) / dim_block_x, (ny + (num_devices * dim_block_y) - 1) / (num_devices * dim_block_y), 1); int iter = 0; #pragma omp master { for (int i = 0; i < 2; ++i) { l2_norms[i] = 1.0; } } CUDA_RT_CALL(hipDeviceSynchronize()); #pragma omp barrier double start = omp_get_wtime(); PUSH_RANGE("Jacobi solve", 0) bool l2_norm_greater_than_tol = true; while (l2_norm_greater_than_tol && iter < iter_max) { // on new iteration: old current vars are now previous vars, old // previous vars are no longer needed int prev = iter % 2; int curr = (iter + 1) % 2; // need to wait for other threads due to sharing of a_new and compute_done // between threads #pragma omp barrier CUDA_RT_CALL(hipStreamWaitEvent(compute_stream, compute_done[prev][top], 0)); CUDA_RT_CALL(hipStreamWaitEvent(compute_stream, compute_done[prev][bottom], 0)); CUDA_RT_CALL( hipStreamWaitEvent(compute_stream, reset_l2_norm_done[curr][dev_id], 0)); hipLaunchKernelGGL(( jacobi_kernel<dim_block_x, dim_block_y>) , dim3(dim_grid), dim3({dim_block_x), dim_block_y, 1}, 0, compute_stream, a_new[dev_id], a, l2_norm_bufs[curr].d, iy_start, iy_end[dev_id], nx, a_new[top], iy_end[top], a_new[bottom], 0); CUDA_RT_CALL(hipGetLastError()); CUDA_RT_CALL(hipEventRecord(compute_done[curr][dev_id], compute_stream)); // perform L2 norm calculation if ((iter % nccheck) == 0 || (!csv && (iter % 100) == 0)) { // as soon as computation is complete -> D2H-copy L2 norm CUDA_RT_CALL(hipMemcpyAsync(l2_norm_bufs[curr].h, l2_norm_bufs[curr].d, sizeof(real), hipMemcpyDeviceToHost, compute_stream)); CUDA_RT_CALL(hipEventRecord(l2_norm_bufs[curr].copy_done, compute_stream)); // ensure previous D2H-copy is completed before using the // data for calculation CUDA_RT_CALL(hipEventSynchronize(l2_norm_bufs[prev].copy_done)); /* * using atomics instead of critical sections caused a minimal (100ns / * iteration) performance gain */ #pragma omp atomic l2_norms[prev] += *(l2_norm_bufs[prev].h); #pragma omp barrier const real l2_norm_prev = std::sqrt(l2_norms[prev]); l2_norm_greater_than_tol = (l2_norm_prev > tol); if (!csv && (iter % 100) == 0) { #pragma omp single printf("%5d, %0.6f\n", iter, l2_norm_prev); } #pragma omp barrier // reset everything for next iteration l2_norms[prev] = 0.0; *(l2_norm_bufs[prev].h) = 0.0; CUDA_RT_CALL(hipMemcpyAsync(l2_norm_bufs[prev].d, l2_norm_bufs[curr].h, sizeof(real), hipMemcpyHostToDevice, reset_l2_norm_stream)); CUDA_RT_CALL( hipEventRecord(reset_l2_norm_done[prev][dev_id], reset_l2_norm_stream)); } else { #pragma omp barrier } std::swap(a_new[dev_id], a); iter++; } CUDA_RT_CALL(hipDeviceSynchronize()); #pragma omp barrier double stop = omp_get_wtime(); POP_RANGE CUDA_RT_CALL( hipMemcpy(a_h + iy_start_global * nx, a + nx, ::min((ny - iy_start_global) * nx, chunk_size * nx) * sizeof(real), hipMemcpyDeviceToHost)); #pragma omp barrier #pragma omp master { result_correct = true; for (int iy = 1; result_correct && (iy < (ny - 1)); ++iy) { for (int ix = 1; result_correct && (ix < (nx - 1)); ++ix) { if (::fabs(a_ref_h[iy * nx + ix] - a_h[iy * nx + ix]) > tol) { fprintf(stderr, "ERROR: a[%d * %d + %d] = %f does not " "match %f (reference)\n", iy, nx, ix, a_h[iy * nx + ix], a_ref_h[iy * nx + ix]); result_correct = false; } } } if (result_correct) { if (csv) { printf( "multi_threaded_p2p_opt, %d, %d, %d, %d, %d, 1, " "%f, %f\n", nx, ny, iter_max, nccheck, num_devices, (stop - start), runtime_serial); } else { printf("Num GPUs: %d.\n", num_devices); printf( "%dx%d: 1 GPU: %8.4f s, %d GPUs: %8.4f s, speedup: " "%8.2f, " "efficiency: %8.2f \n", ny, nx, runtime_serial, num_devices, (stop - start), runtime_serial / (stop - start), runtime_serial / (num_devices * (stop - start)) * 100); } } } for (int i = 0; i < 2; ++i) { CUDA_RT_CALL(hipHostFree(l2_norm_bufs[i].h)); CUDA_RT_CALL(hipFree(l2_norm_bufs[i].d)); CUDA_RT_CALL(hipEventDestroy(l2_norm_bufs[i].copy_done)); } CUDA_RT_CALL(hipEventDestroy(reset_l2_norm_done[1][dev_id])); CUDA_RT_CALL(hipEventDestroy(reset_l2_norm_done[0][dev_id])); CUDA_RT_CALL(hipEventDestroy(compute_done[1][dev_id])); CUDA_RT_CALL(hipEventDestroy(compute_done[0][dev_id])); CUDA_RT_CALL(hipStreamDestroy(reset_l2_norm_stream)); CUDA_RT_CALL(hipStreamDestroy(compute_stream)); CUDA_RT_CALL(hipFree(a_new[dev_id])); CUDA_RT_CALL(hipFree(a)); if (0 == dev_id) { CUDA_RT_CALL(hipHostFree(a_h)); CUDA_RT_CALL(hipHostFree(a_ref_h)); } } CUDA_RT_CALL(hipDeviceReset()); } return result_correct ? 0 : 1; } double single_gpu(const int nx, const int ny, const int iter_max, real* const a_ref_h, const int nccheck, const bool print) { real* a; real* a_new; hipStream_t compute_stream; hipStream_t copy_l2_norm_stream; hipStream_t reset_l2_norm_stream; hipEvent_t compute_done; hipEvent_t reset_l2_norm_done[2]; real l2_norms[2]; l2_norm_buf l2_norm_bufs[2]; int iy_start = 1; int iy_end = (ny - 1); CUDA_RT_CALL(hipMalloc(&a, nx * ny * sizeof(real))); CUDA_RT_CALL(hipMalloc(&a_new, nx * ny * sizeof(real))); CUDA_RT_CALL(hipMemset(a, 0, nx * ny * sizeof(real))); CUDA_RT_CALL(hipMemset(a_new, 0, nx * ny * sizeof(real))); // Set diriclet boundary conditions on left and right boarder hipLaunchKernelGGL(( initialize_boundaries), dim3(ny / 128 + 1), dim3(128), 0, 0, a, a_new, PI, 0, nx, ny, ny); CUDA_RT_CALL(hipGetLastError()); CUDA_RT_CALL(hipDeviceSynchronize()); CUDA_RT_CALL(hipStreamCreate(&compute_stream)); CUDA_RT_CALL(hipStreamCreate(&copy_l2_norm_stream)); CUDA_RT_CALL(hipStreamCreate(&reset_l2_norm_stream)); CUDA_RT_CALL(hipEventCreateWithFlags(&compute_done, hipEventDisableTiming)); CUDA_RT_CALL(hipEventCreateWithFlags(&reset_l2_norm_done[0], hipEventDisableTiming)); CUDA_RT_CALL(hipEventCreateWithFlags(&reset_l2_norm_done[1], hipEventDisableTiming)); for (int i = 0; i < 2; ++i) { CUDA_RT_CALL(hipEventCreateWithFlags(&l2_norm_bufs[i].copy_done, hipEventDisableTiming)); CUDA_RT_CALL(hipMalloc(&l2_norm_bufs[i].d, sizeof(real))); CUDA_RT_CALL(hipMemset(l2_norm_bufs[i].d, 0, sizeof(real))); CUDA_RT_CALL(hipHostMalloc(&l2_norm_bufs[i].h, sizeof(real))); *(l2_norm_bufs[i].h) = 1.0; } CUDA_RT_CALL(hipDeviceSynchronize()); if (print) printf( "Single GPU Jacobi relaxation: %d iterations on %d x %d mesh with " "norm " "check every %d iterations\n", iter_max, ny, nx, nccheck); constexpr int dim_block_x = 32; constexpr int dim_block_y = 4; dim3 dim_grid((nx + dim_block_x - 1) / dim_block_x, (ny + dim_block_y - 1) / dim_block_y, 1); int iter = 0; for (int i = 0; i < 2; ++i) { l2_norms[i] = 1.0; } double start = omp_get_wtime(); PUSH_RANGE("Jacobi solve", 0) bool l2_norm_greater_than_tol = true; while (l2_norm_greater_than_tol && iter < iter_max) { // on new iteration: old current vars are now previous vars, old // previous vars are no longer needed int prev = iter % 2; int curr = (iter + 1) % 2; CUDA_RT_CALL(hipStreamWaitEvent(compute_stream, reset_l2_norm_done[curr], 0)); hipLaunchKernelGGL(( jacobi_kernel<dim_block_x, dim_block_y>) , dim3(dim_grid), dim3({dim_block_x), dim_block_y, 1}, 0, compute_stream, a_new, a, l2_norm_bufs[curr].d, iy_start, iy_end, nx, a_new, iy_end, a_new, (iy_start - 1)); CUDA_RT_CALL(hipGetLastError()); CUDA_RT_CALL(hipEventRecord(compute_done, compute_stream)); // perform L2 norm calculation if ((iter % nccheck) == 0 || (print && (iter % 100) == 0)) { // as soon as computation is complete -> D2H-copy L2 norm CUDA_RT_CALL(hipStreamWaitEvent(copy_l2_norm_stream, compute_done, 0)); CUDA_RT_CALL(hipMemcpyAsync(l2_norm_bufs[curr].h, l2_norm_bufs[curr].d, sizeof(real), hipMemcpyDeviceToHost, copy_l2_norm_stream)); CUDA_RT_CALL(hipEventRecord(l2_norm_bufs[curr].copy_done, copy_l2_norm_stream)); // ensure previous D2H copy is completed before using the data for // calculation CUDA_RT_CALL(hipEventSynchronize(l2_norm_bufs[prev].copy_done)); l2_norms[prev] = *(l2_norm_bufs[prev].h); l2_norms[prev] = std::sqrt(l2_norms[prev]); l2_norm_greater_than_tol = (l2_norms[prev] > tol); if (print && (iter % 100) == 0) { printf("%5d, %0.6f\n", iter, l2_norms[prev]); } // reset everything for next iteration l2_norms[prev] = 0.0; *(l2_norm_bufs[prev].h) = 0.0; CUDA_RT_CALL(hipMemcpyAsync(l2_norm_bufs[prev].d, l2_norm_bufs[prev].h, sizeof(real), hipMemcpyHostToDevice, reset_l2_norm_stream)); CUDA_RT_CALL(hipEventRecord(reset_l2_norm_done[prev], reset_l2_norm_stream)); } std::swap(a_new, a); iter++; } CUDA_RT_CALL(hipDeviceSynchronize()); POP_RANGE double stop = omp_get_wtime(); CUDA_RT_CALL(hipMemcpy(a_ref_h, a, nx * ny * sizeof(real), hipMemcpyDeviceToHost)); for (int i = 0; i < 2; ++i) { CUDA_RT_CALL(hipHostFree(l2_norm_bufs[i].h)); CUDA_RT_CALL(hipFree(l2_norm_bufs[i].d)); CUDA_RT_CALL(hipEventDestroy(l2_norm_bufs[i].copy_done)); } CUDA_RT_CALL(hipEventDestroy(reset_l2_norm_done[1])); CUDA_RT_CALL(hipEventDestroy(reset_l2_norm_done[0])); CUDA_RT_CALL(hipEventDestroy(compute_done)); CUDA_RT_CALL(hipStreamDestroy(reset_l2_norm_stream)); CUDA_RT_CALL(hipStreamDestroy(copy_l2_norm_stream)); CUDA_RT_CALL(hipStreamDestroy(compute_stream)); CUDA_RT_CALL(hipFree(a_new)); CUDA_RT_CALL(hipFree(a)); return (stop - start); }
c9644d0cb2f3f931f9193979ac3d7ef2e5e9b17f.cu
/* Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <algorithm> #include <cmath> #include <cstdio> #include <iostream> #include <sstream> #include <omp.h> #ifdef HAVE_CUB #include <cub/block/block_reduce.cuh> #endif // HAVE_CUB #ifdef USE_NVTX #include <nvToolsExt.h> const uint32_t colors[] = {0x0000ff00, 0x000000ff, 0x00ffff00, 0x00ff00ff, 0x0000ffff, 0x00ff0000, 0x00ffffff}; const int num_colors = sizeof(colors) / sizeof(uint32_t); #define PUSH_RANGE(name, cid) \ { \ int color_id = cid; \ color_id = color_id % num_colors; \ nvtxEventAttributes_t eventAttrib = {0}; \ eventAttrib.version = NVTX_VERSION; \ eventAttrib.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE; \ eventAttrib.colorType = NVTX_COLOR_ARGB; \ eventAttrib.color = colors[color_id]; \ eventAttrib.messageType = NVTX_MESSAGE_TYPE_ASCII; \ eventAttrib.message.ascii = name; \ nvtxRangePushEx(&eventAttrib); \ } #define POP_RANGE nvtxRangePop(); #else #define PUSH_RANGE(name, cid) #define POP_RANGE #endif #define CUDA_RT_CALL(call) \ { \ cudaError_t cudaStatus = call; \ if (cudaSuccess != cudaStatus) \ fprintf(stderr, \ "ERROR: CUDA RT call \"%s\" in line %d of file %s failed " \ "with " \ "%s (%d).\n", \ #call, __LINE__, __FILE__, cudaGetErrorString(cudaStatus), cudaStatus); \ } constexpr int MAX_NUM_DEVICES = 32; typedef float real; constexpr real tol = 1.0e-8; const real PI = 2.0 * std::asin(1.0); __global__ void initialize_boundaries(real* __restrict__ const a_new, real* __restrict__ const a, const real pi, const int offset, const int nx, const int my_ny, const int ny) { for (int iy = blockIdx.x * blockDim.x + threadIdx.x; iy < my_ny; iy += blockDim.x * gridDim.x) { const real y0 = sin(2.0 * pi * (offset + iy) / (ny - 1)); a[iy * nx + 0] = y0; a[iy * nx + (nx - 1)] = y0; a_new[iy * nx + 0] = y0; a_new[iy * nx + (nx - 1)] = y0; } } template <int BLOCK_DIM_X, int BLOCK_DIM_Y> __global__ void jacobi_kernel(real* __restrict__ const a_new, const real* __restrict__ const a, real* __restrict__ const l2_norm, const int iy_start, const int iy_end, const int nx, real* __restrict__ const a_new_top, const int top_iy, real* __restrict__ const a_new_bottom, const int bottom_iy) { #ifdef HAVE_CUB typedef cub::BlockReduce<real, BLOCK_DIM_X, cub::BLOCK_REDUCE_WARP_REDUCTIONS, BLOCK_DIM_Y> BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage; #endif // HAVE_CUB int iy = blockIdx.y * blockDim.y + threadIdx.y + iy_start; int ix = blockIdx.x * blockDim.x + threadIdx.x + 1; real local_l2_norm = 0.0; if (iy < iy_end && ix < (nx - 1)) { const real new_val = 0.25 * (a[iy * nx + ix + 1] + a[iy * nx + ix - 1] + a[(iy + 1) * nx + ix] + a[(iy - 1) * nx + ix]); a_new[iy * nx + ix] = new_val; if (iy_start == iy) { a_new_top[top_iy * nx + ix] = new_val; } if ((iy_end - 1) == iy) { a_new_bottom[bottom_iy * nx + ix] = new_val; } real residue = new_val - a[iy * nx + ix]; local_l2_norm += residue * residue; } #ifdef HAVE_CUB real block_l2_norm = BlockReduce(temp_storage).Sum(local_l2_norm); if (0 == threadIdx.y && 0 == threadIdx.x) atomicAdd(l2_norm, block_l2_norm); #else atomicAdd(l2_norm, local_l2_norm); #endif // HAVE_CUB } double single_gpu(const int nx, const int ny, const int iter_max, real* const a_ref_h, const int nccheck, const bool print); template <typename T> T get_argval(char** begin, char** end, const std::string& arg, const T default_val) { T argval = default_val; char** itr = std::find(begin, end, arg); if (itr != end && ++itr != end) { std::istringstream inbuf(*itr); inbuf >> argval; } return argval; } bool get_arg(char** begin, char** end, const std::string& arg) { char** itr = std::find(begin, end, arg); if (itr != end) { return true; } return false; } struct l2_norm_buf { cudaEvent_t copy_done; real* d; real* h; }; int main(int argc, char* argv[]) { const int iter_max = get_argval<int>(argv, argv + argc, "-niter", 1000); const int nccheck = get_argval<int>(argv, argv + argc, "-nccheck", 1); const int nx = get_argval<int>(argv, argv + argc, "-nx", 7168); const int ny = get_argval<int>(argv, argv + argc, "-ny", 7168); const bool csv = get_arg(argv, argv + argc, "-csv"); real* a_new[MAX_NUM_DEVICES]; real* a_ref_h; real* a_h; double runtime_serial = 0.0; int iy_end[MAX_NUM_DEVICES]; cudaEvent_t compute_done[2][MAX_NUM_DEVICES]; cudaEvent_t reset_l2_norm_done[2][MAX_NUM_DEVICES]; bool result_correct = true; bool p2p_works = true; int num_devices = 0; CUDA_RT_CALL(cudaGetDeviceCount(&num_devices)); real l2_norms[2]; #pragma omp parallel num_threads(num_devices) shared(l2_norms) { real* a; cudaStream_t compute_stream; cudaStream_t reset_l2_norm_stream; l2_norm_buf l2_norm_bufs[2]; // Ensure correctness if ny%size != 0 int chunk_size = std::ceil((1.0 * (ny - 2)) / num_devices); int dev_id = omp_get_thread_num(); CUDA_RT_CALL(cudaSetDevice(dev_id)); CUDA_RT_CALL(cudaSetDeviceFlags(cudaDeviceScheduleSpin)); CUDA_RT_CALL(cudaFree(0)); if (0 == dev_id) { CUDA_RT_CALL(cudaMallocHost(&a_ref_h, nx * ny * sizeof(real))); CUDA_RT_CALL(cudaMallocHost(&a_h, nx * ny * sizeof(real))); runtime_serial = single_gpu(nx, ny, iter_max, a_ref_h, nccheck, !csv); } #pragma omp barrier const int top = dev_id > 0 ? dev_id - 1 : (num_devices - 1); const int bottom = (dev_id + 1) % num_devices; if (top != dev_id) { int canAccessPeer = 0; CUDA_RT_CALL(cudaDeviceCanAccessPeer(&canAccessPeer, dev_id, top)); if (canAccessPeer) { CUDA_RT_CALL(cudaDeviceEnablePeerAccess(top, 0)); } else { std::cerr << "P2P access required from " << dev_id << " to " << top << std::endl; #pragma omp critical { if (p2p_works) p2p_works = false; } } if (top != bottom) { canAccessPeer = 0; CUDA_RT_CALL(cudaDeviceCanAccessPeer(&canAccessPeer, dev_id, bottom)); if (canAccessPeer) { CUDA_RT_CALL(cudaDeviceEnablePeerAccess(bottom, 0)); } else { std::cerr << "P2P access required from " << dev_id << " to " << bottom << std::endl; #pragma omp critical { if (p2p_works) p2p_works = false; } } } } #pragma omp barrier if (p2p_works) { CUDA_RT_CALL(cudaMalloc(&a, nx * (chunk_size + 2) * sizeof(real))); CUDA_RT_CALL(cudaMalloc(a_new + dev_id, nx * (chunk_size + 2) * sizeof(real))); CUDA_RT_CALL(cudaMemset(a, 0, nx * (chunk_size + 2) * sizeof(real))); CUDA_RT_CALL(cudaMemset(a_new[dev_id], 0, nx * (chunk_size + 2) * sizeof(real))); // Calculate local domain boundaries int iy_start_global = dev_id * chunk_size + 1; int iy_end_global = iy_start_global + chunk_size - 1; // Do not process boundaries iy_end_global = std::min(iy_end_global, ny - 2); int iy_start = 1; iy_end[dev_id] = (iy_end_global - iy_start_global + 1) + iy_start; // Set diriclet boundary conditions on left and right boarder initialize_boundaries<<<(ny / num_devices) / 128 + 1, 128>>>( a, a_new[dev_id], PI, iy_start_global - 1, nx, (chunk_size + 2), ny); CUDA_RT_CALL(cudaGetLastError()); CUDA_RT_CALL(cudaDeviceSynchronize()); CUDA_RT_CALL(cudaStreamCreate(&compute_stream)); CUDA_RT_CALL(cudaStreamCreate(&reset_l2_norm_stream)); CUDA_RT_CALL( cudaEventCreateWithFlags(compute_done[0] + dev_id, cudaEventDisableTiming)); CUDA_RT_CALL( cudaEventCreateWithFlags(compute_done[1] + dev_id, cudaEventDisableTiming)); CUDA_RT_CALL( cudaEventCreateWithFlags(reset_l2_norm_done[0] + dev_id, cudaEventDisableTiming)); CUDA_RT_CALL( cudaEventCreateWithFlags(reset_l2_norm_done[1] + dev_id, cudaEventDisableTiming)); for (int i = 0; i < 2; ++i) { CUDA_RT_CALL( cudaEventCreateWithFlags(&l2_norm_bufs[i].copy_done, cudaEventDisableTiming)); CUDA_RT_CALL(cudaMalloc(&l2_norm_bufs[i].d, sizeof(real))); CUDA_RT_CALL(cudaMemset(l2_norm_bufs[i].d, 0, sizeof(real))); CUDA_RT_CALL(cudaMallocHost(&l2_norm_bufs[i].h, sizeof(real))); *(l2_norm_bufs[i].h) = 1.0; } CUDA_RT_CALL(cudaDeviceSynchronize()); #pragma omp master { if (!csv) printf( "Jacobi relaxation: %d iterations on %d x %d mesh with " "norm " "check every %d iterations\n", iter_max, ny, nx, nccheck); } constexpr int dim_block_x = 32; constexpr int dim_block_y = 4; dim3 dim_grid((nx + dim_block_x - 1) / dim_block_x, (ny + (num_devices * dim_block_y) - 1) / (num_devices * dim_block_y), 1); int iter = 0; #pragma omp master { for (int i = 0; i < 2; ++i) { l2_norms[i] = 1.0; } } CUDA_RT_CALL(cudaDeviceSynchronize()); #pragma omp barrier double start = omp_get_wtime(); PUSH_RANGE("Jacobi solve", 0) bool l2_norm_greater_than_tol = true; while (l2_norm_greater_than_tol && iter < iter_max) { // on new iteration: old current vars are now previous vars, old // previous vars are no longer needed int prev = iter % 2; int curr = (iter + 1) % 2; // need to wait for other threads due to sharing of a_new and compute_done // between threads #pragma omp barrier CUDA_RT_CALL(cudaStreamWaitEvent(compute_stream, compute_done[prev][top], 0)); CUDA_RT_CALL(cudaStreamWaitEvent(compute_stream, compute_done[prev][bottom], 0)); CUDA_RT_CALL( cudaStreamWaitEvent(compute_stream, reset_l2_norm_done[curr][dev_id], 0)); jacobi_kernel<dim_block_x, dim_block_y> <<<dim_grid, {dim_block_x, dim_block_y, 1}, 0, compute_stream>>>( a_new[dev_id], a, l2_norm_bufs[curr].d, iy_start, iy_end[dev_id], nx, a_new[top], iy_end[top], a_new[bottom], 0); CUDA_RT_CALL(cudaGetLastError()); CUDA_RT_CALL(cudaEventRecord(compute_done[curr][dev_id], compute_stream)); // perform L2 norm calculation if ((iter % nccheck) == 0 || (!csv && (iter % 100) == 0)) { // as soon as computation is complete -> D2H-copy L2 norm CUDA_RT_CALL(cudaMemcpyAsync(l2_norm_bufs[curr].h, l2_norm_bufs[curr].d, sizeof(real), cudaMemcpyDeviceToHost, compute_stream)); CUDA_RT_CALL(cudaEventRecord(l2_norm_bufs[curr].copy_done, compute_stream)); // ensure previous D2H-copy is completed before using the // data for calculation CUDA_RT_CALL(cudaEventSynchronize(l2_norm_bufs[prev].copy_done)); /* * using atomics instead of critical sections caused a minimal (100ns / * iteration) performance gain */ #pragma omp atomic l2_norms[prev] += *(l2_norm_bufs[prev].h); #pragma omp barrier const real l2_norm_prev = std::sqrt(l2_norms[prev]); l2_norm_greater_than_tol = (l2_norm_prev > tol); if (!csv && (iter % 100) == 0) { #pragma omp single printf("%5d, %0.6f\n", iter, l2_norm_prev); } #pragma omp barrier // reset everything for next iteration l2_norms[prev] = 0.0; *(l2_norm_bufs[prev].h) = 0.0; CUDA_RT_CALL(cudaMemcpyAsync(l2_norm_bufs[prev].d, l2_norm_bufs[curr].h, sizeof(real), cudaMemcpyHostToDevice, reset_l2_norm_stream)); CUDA_RT_CALL( cudaEventRecord(reset_l2_norm_done[prev][dev_id], reset_l2_norm_stream)); } else { #pragma omp barrier } std::swap(a_new[dev_id], a); iter++; } CUDA_RT_CALL(cudaDeviceSynchronize()); #pragma omp barrier double stop = omp_get_wtime(); POP_RANGE CUDA_RT_CALL( cudaMemcpy(a_h + iy_start_global * nx, a + nx, std::min((ny - iy_start_global) * nx, chunk_size * nx) * sizeof(real), cudaMemcpyDeviceToHost)); #pragma omp barrier #pragma omp master { result_correct = true; for (int iy = 1; result_correct && (iy < (ny - 1)); ++iy) { for (int ix = 1; result_correct && (ix < (nx - 1)); ++ix) { if (std::fabs(a_ref_h[iy * nx + ix] - a_h[iy * nx + ix]) > tol) { fprintf(stderr, "ERROR: a[%d * %d + %d] = %f does not " "match %f (reference)\n", iy, nx, ix, a_h[iy * nx + ix], a_ref_h[iy * nx + ix]); result_correct = false; } } } if (result_correct) { if (csv) { printf( "multi_threaded_p2p_opt, %d, %d, %d, %d, %d, 1, " "%f, %f\n", nx, ny, iter_max, nccheck, num_devices, (stop - start), runtime_serial); } else { printf("Num GPUs: %d.\n", num_devices); printf( "%dx%d: 1 GPU: %8.4f s, %d GPUs: %8.4f s, speedup: " "%8.2f, " "efficiency: %8.2f \n", ny, nx, runtime_serial, num_devices, (stop - start), runtime_serial / (stop - start), runtime_serial / (num_devices * (stop - start)) * 100); } } } for (int i = 0; i < 2; ++i) { CUDA_RT_CALL(cudaFreeHost(l2_norm_bufs[i].h)); CUDA_RT_CALL(cudaFree(l2_norm_bufs[i].d)); CUDA_RT_CALL(cudaEventDestroy(l2_norm_bufs[i].copy_done)); } CUDA_RT_CALL(cudaEventDestroy(reset_l2_norm_done[1][dev_id])); CUDA_RT_CALL(cudaEventDestroy(reset_l2_norm_done[0][dev_id])); CUDA_RT_CALL(cudaEventDestroy(compute_done[1][dev_id])); CUDA_RT_CALL(cudaEventDestroy(compute_done[0][dev_id])); CUDA_RT_CALL(cudaStreamDestroy(reset_l2_norm_stream)); CUDA_RT_CALL(cudaStreamDestroy(compute_stream)); CUDA_RT_CALL(cudaFree(a_new[dev_id])); CUDA_RT_CALL(cudaFree(a)); if (0 == dev_id) { CUDA_RT_CALL(cudaFreeHost(a_h)); CUDA_RT_CALL(cudaFreeHost(a_ref_h)); } } CUDA_RT_CALL(cudaDeviceReset()); } return result_correct ? 0 : 1; } double single_gpu(const int nx, const int ny, const int iter_max, real* const a_ref_h, const int nccheck, const bool print) { real* a; real* a_new; cudaStream_t compute_stream; cudaStream_t copy_l2_norm_stream; cudaStream_t reset_l2_norm_stream; cudaEvent_t compute_done; cudaEvent_t reset_l2_norm_done[2]; real l2_norms[2]; l2_norm_buf l2_norm_bufs[2]; int iy_start = 1; int iy_end = (ny - 1); CUDA_RT_CALL(cudaMalloc(&a, nx * ny * sizeof(real))); CUDA_RT_CALL(cudaMalloc(&a_new, nx * ny * sizeof(real))); CUDA_RT_CALL(cudaMemset(a, 0, nx * ny * sizeof(real))); CUDA_RT_CALL(cudaMemset(a_new, 0, nx * ny * sizeof(real))); // Set diriclet boundary conditions on left and right boarder initialize_boundaries<<<ny / 128 + 1, 128>>>(a, a_new, PI, 0, nx, ny, ny); CUDA_RT_CALL(cudaGetLastError()); CUDA_RT_CALL(cudaDeviceSynchronize()); CUDA_RT_CALL(cudaStreamCreate(&compute_stream)); CUDA_RT_CALL(cudaStreamCreate(&copy_l2_norm_stream)); CUDA_RT_CALL(cudaStreamCreate(&reset_l2_norm_stream)); CUDA_RT_CALL(cudaEventCreateWithFlags(&compute_done, cudaEventDisableTiming)); CUDA_RT_CALL(cudaEventCreateWithFlags(&reset_l2_norm_done[0], cudaEventDisableTiming)); CUDA_RT_CALL(cudaEventCreateWithFlags(&reset_l2_norm_done[1], cudaEventDisableTiming)); for (int i = 0; i < 2; ++i) { CUDA_RT_CALL(cudaEventCreateWithFlags(&l2_norm_bufs[i].copy_done, cudaEventDisableTiming)); CUDA_RT_CALL(cudaMalloc(&l2_norm_bufs[i].d, sizeof(real))); CUDA_RT_CALL(cudaMemset(l2_norm_bufs[i].d, 0, sizeof(real))); CUDA_RT_CALL(cudaMallocHost(&l2_norm_bufs[i].h, sizeof(real))); *(l2_norm_bufs[i].h) = 1.0; } CUDA_RT_CALL(cudaDeviceSynchronize()); if (print) printf( "Single GPU Jacobi relaxation: %d iterations on %d x %d mesh with " "norm " "check every %d iterations\n", iter_max, ny, nx, nccheck); constexpr int dim_block_x = 32; constexpr int dim_block_y = 4; dim3 dim_grid((nx + dim_block_x - 1) / dim_block_x, (ny + dim_block_y - 1) / dim_block_y, 1); int iter = 0; for (int i = 0; i < 2; ++i) { l2_norms[i] = 1.0; } double start = omp_get_wtime(); PUSH_RANGE("Jacobi solve", 0) bool l2_norm_greater_than_tol = true; while (l2_norm_greater_than_tol && iter < iter_max) { // on new iteration: old current vars are now previous vars, old // previous vars are no longer needed int prev = iter % 2; int curr = (iter + 1) % 2; CUDA_RT_CALL(cudaStreamWaitEvent(compute_stream, reset_l2_norm_done[curr], 0)); jacobi_kernel<dim_block_x, dim_block_y> <<<dim_grid, {dim_block_x, dim_block_y, 1}, 0, compute_stream>>>( a_new, a, l2_norm_bufs[curr].d, iy_start, iy_end, nx, a_new, iy_end, a_new, (iy_start - 1)); CUDA_RT_CALL(cudaGetLastError()); CUDA_RT_CALL(cudaEventRecord(compute_done, compute_stream)); // perform L2 norm calculation if ((iter % nccheck) == 0 || (print && (iter % 100) == 0)) { // as soon as computation is complete -> D2H-copy L2 norm CUDA_RT_CALL(cudaStreamWaitEvent(copy_l2_norm_stream, compute_done, 0)); CUDA_RT_CALL(cudaMemcpyAsync(l2_norm_bufs[curr].h, l2_norm_bufs[curr].d, sizeof(real), cudaMemcpyDeviceToHost, copy_l2_norm_stream)); CUDA_RT_CALL(cudaEventRecord(l2_norm_bufs[curr].copy_done, copy_l2_norm_stream)); // ensure previous D2H copy is completed before using the data for // calculation CUDA_RT_CALL(cudaEventSynchronize(l2_norm_bufs[prev].copy_done)); l2_norms[prev] = *(l2_norm_bufs[prev].h); l2_norms[prev] = std::sqrt(l2_norms[prev]); l2_norm_greater_than_tol = (l2_norms[prev] > tol); if (print && (iter % 100) == 0) { printf("%5d, %0.6f\n", iter, l2_norms[prev]); } // reset everything for next iteration l2_norms[prev] = 0.0; *(l2_norm_bufs[prev].h) = 0.0; CUDA_RT_CALL(cudaMemcpyAsync(l2_norm_bufs[prev].d, l2_norm_bufs[prev].h, sizeof(real), cudaMemcpyHostToDevice, reset_l2_norm_stream)); CUDA_RT_CALL(cudaEventRecord(reset_l2_norm_done[prev], reset_l2_norm_stream)); } std::swap(a_new, a); iter++; } CUDA_RT_CALL(cudaDeviceSynchronize()); POP_RANGE double stop = omp_get_wtime(); CUDA_RT_CALL(cudaMemcpy(a_ref_h, a, nx * ny * sizeof(real), cudaMemcpyDeviceToHost)); for (int i = 0; i < 2; ++i) { CUDA_RT_CALL(cudaFreeHost(l2_norm_bufs[i].h)); CUDA_RT_CALL(cudaFree(l2_norm_bufs[i].d)); CUDA_RT_CALL(cudaEventDestroy(l2_norm_bufs[i].copy_done)); } CUDA_RT_CALL(cudaEventDestroy(reset_l2_norm_done[1])); CUDA_RT_CALL(cudaEventDestroy(reset_l2_norm_done[0])); CUDA_RT_CALL(cudaEventDestroy(compute_done)); CUDA_RT_CALL(cudaStreamDestroy(reset_l2_norm_stream)); CUDA_RT_CALL(cudaStreamDestroy(copy_l2_norm_stream)); CUDA_RT_CALL(cudaStreamDestroy(compute_stream)); CUDA_RT_CALL(cudaFree(a_new)); CUDA_RT_CALL(cudaFree(a)); return (stop - start); }
1e7eea7978a5b0e2446c75d47805a1b2093cce25.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifdef __cplusplus extern "C" { #endif #include <stdio.h> #include <math.h> #include <float.h> #include "roi_pooling_kernel.h" #define CUDA_1D_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \ i += blockDim.x * gridDim.x) __global__ void ROIPoolForward(const int nthreads, const float* bottom_data, const float spatial_scale, const int height, const int width, const int channels, const int pooled_height, const int pooled_width, const float* bottom_rois, float* top_data, int* argmax_data) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the pooled output int n = index; int pw = n % pooled_width; n /= pooled_width; int ph = n % pooled_height; n /= pooled_height; int c = n % channels; n /= channels; bottom_rois += n * 5; int roi_batch_ind = bottom_rois[0]; int roi_start_w = round(bottom_rois[1] * spatial_scale); int roi_start_h = round(bottom_rois[2] * spatial_scale); int roi_end_w = round(bottom_rois[3] * spatial_scale); int roi_end_h = round(bottom_rois[4] * spatial_scale); // Force malformed ROIs to be 1x1 int roi_width = fmaxf(roi_end_w - roi_start_w + 1, 1); int roi_height = fmaxf(roi_end_h - roi_start_h + 1, 1); float bin_size_h = (float)(roi_height) / (float)(pooled_height); float bin_size_w = (float)(roi_width) / (float)(pooled_width); int hstart = (int)(floor((float)(ph) * bin_size_h)); int wstart = (int)(floor((float)(pw) * bin_size_w)); int hend = (int)(ceil((float)(ph + 1) * bin_size_h)); int wend = (int)(ceil((float)(pw + 1) * bin_size_w)); // Add roi offsets and clip to input boundaries hstart = fminf(fmaxf(hstart + roi_start_h, 0), height); hend = fminf(fmaxf(hend + roi_start_h, 0), height); wstart = fminf(fmaxf(wstart + roi_start_w, 0), width); wend = fminf(fmaxf(wend + roi_start_w, 0), width); bool is_empty = (hend <= hstart) || (wend <= wstart); // Define an empty pooling region to be zero float maxval = is_empty ? 0 : -FLT_MAX; // If nothing is pooled, argmax = -1 causes nothing to be backprop'd int maxidx = -1; bottom_data += roi_batch_ind * channels * height * width; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { // int bottom_index = (h * width + w) * channels + c; int bottom_index = (c * height + h) * width + w; if (bottom_data[bottom_index] > maxval) { maxval = bottom_data[bottom_index]; maxidx = bottom_index; } } } top_data[index] = maxval; if (argmax_data != NULL) argmax_data[index] = maxidx; } } int ROIPoolForwardLaucher( const float* bottom_data, const float spatial_scale, const int num_rois, const int height, const int width, const int channels, const int pooled_height, const int pooled_width, const float* bottom_rois, float* top_data, int* argmax_data, hipStream_t stream) { const int kThreadsPerBlock = 1024; const int output_size = num_rois * pooled_height * pooled_width * channels; hipError_t err; hipLaunchKernelGGL(( ROIPoolForward), dim3((output_size + kThreadsPerBlock - 1) / kThreadsPerBlock), dim3(kThreadsPerBlock), 0, stream, output_size, bottom_data, spatial_scale, height, width, channels, pooled_height, pooled_width, bottom_rois, top_data, argmax_data); err = hipGetLastError(); if(hipSuccess != err) { fprintf( stderr, "cudaCheckError() failed : %s\n", hipGetErrorString( err ) ); exit( -1 ); } return 1; } __global__ void ROIPoolBackward(const int nthreads, const float* top_diff, const int* argmax_data, const int num_rois, const float spatial_scale, const int height, const int width, const int channels, const int pooled_height, const int pooled_width, float* bottom_diff, const float* bottom_rois) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the pooled output int n = index; int w = n % width; n /= width; int h = n % height; n /= height; int c = n % channels; n /= channels; float gradient = 0; // Accumulate gradient over all ROIs that pooled this element for (int roi_n = 0; roi_n < num_rois; ++roi_n) { const float* offset_bottom_rois = bottom_rois + roi_n * 5; int roi_batch_ind = offset_bottom_rois[0]; // Skip if ROI's batch index doesn't match n if (n != roi_batch_ind) { continue; } int roi_start_w = round(offset_bottom_rois[1] * spatial_scale); int roi_start_h = round(offset_bottom_rois[2] * spatial_scale); int roi_end_w = round(offset_bottom_rois[3] * spatial_scale); int roi_end_h = round(offset_bottom_rois[4] * spatial_scale); // Skip if ROI doesn't include (h, w) const bool in_roi = (w >= roi_start_w && w <= roi_end_w && h >= roi_start_h && h <= roi_end_h); if (!in_roi) { continue; } int offset = roi_n * pooled_height * pooled_width * channels; const float* offset_top_diff = top_diff + offset; const int* offset_argmax_data = argmax_data + offset; // Compute feasible set of pooled units that could have pooled // this bottom unit // Force malformed ROIs to be 1x1 int roi_width = fmaxf(roi_end_w - roi_start_w + 1, 1); int roi_height = fmaxf(roi_end_h - roi_start_h + 1, 1); float bin_size_h = (float)(roi_height) / (float)(pooled_height); float bin_size_w = (float)(roi_width) / (float)(pooled_width); int phstart = floor((float)(h - roi_start_h) / bin_size_h); int phend = ceil((float)(h - roi_start_h + 1) / bin_size_h); int pwstart = floor((float)(w - roi_start_w) / bin_size_w); int pwend = ceil((float)(w - roi_start_w + 1) / bin_size_w); phstart = fminf(fmaxf(phstart, 0), pooled_height); phend = fminf(fmaxf(phend, 0), pooled_height); pwstart = fminf(fmaxf(pwstart, 0), pooled_width); pwend = fminf(fmaxf(pwend, 0), pooled_width); for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { if (offset_argmax_data[(c * pooled_height + ph) * pooled_width + pw] == index) { gradient += offset_top_diff[(c * pooled_height + ph) * pooled_width + pw]; } } } } bottom_diff[index] = gradient; } } int ROIPoolBackwardLaucher(const float* top_diff, const float spatial_scale, const int batch_size, const int num_rois, const int height, const int width, const int channels, const int pooled_height, const int pooled_width, const float* bottom_rois, float* bottom_diff, const int* argmax_data, hipStream_t stream) { const int kThreadsPerBlock = 1024; const int output_size = batch_size * height * width * channels; hipError_t err; hipLaunchKernelGGL(( ROIPoolBackward), dim3((output_size + kThreadsPerBlock - 1) / kThreadsPerBlock), dim3(kThreadsPerBlock), 0, stream, output_size, top_diff, argmax_data, num_rois, spatial_scale, height, width, channels, pooled_height, pooled_width, bottom_diff, bottom_rois); err = hipGetLastError(); if(hipSuccess != err) { fprintf( stderr, "cudaCheckError() failed : %s\n", hipGetErrorString( err ) ); exit( -1 ); } return 1; } #ifdef __cplusplus } #endif
1e7eea7978a5b0e2446c75d47805a1b2093cce25.cu
#ifdef __cplusplus extern "C" { #endif #include <stdio.h> #include <math.h> #include <float.h> #include "roi_pooling_kernel.h" #define CUDA_1D_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \ i += blockDim.x * gridDim.x) __global__ void ROIPoolForward(const int nthreads, const float* bottom_data, const float spatial_scale, const int height, const int width, const int channels, const int pooled_height, const int pooled_width, const float* bottom_rois, float* top_data, int* argmax_data) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the pooled output int n = index; int pw = n % pooled_width; n /= pooled_width; int ph = n % pooled_height; n /= pooled_height; int c = n % channels; n /= channels; bottom_rois += n * 5; int roi_batch_ind = bottom_rois[0]; int roi_start_w = round(bottom_rois[1] * spatial_scale); int roi_start_h = round(bottom_rois[2] * spatial_scale); int roi_end_w = round(bottom_rois[3] * spatial_scale); int roi_end_h = round(bottom_rois[4] * spatial_scale); // Force malformed ROIs to be 1x1 int roi_width = fmaxf(roi_end_w - roi_start_w + 1, 1); int roi_height = fmaxf(roi_end_h - roi_start_h + 1, 1); float bin_size_h = (float)(roi_height) / (float)(pooled_height); float bin_size_w = (float)(roi_width) / (float)(pooled_width); int hstart = (int)(floor((float)(ph) * bin_size_h)); int wstart = (int)(floor((float)(pw) * bin_size_w)); int hend = (int)(ceil((float)(ph + 1) * bin_size_h)); int wend = (int)(ceil((float)(pw + 1) * bin_size_w)); // Add roi offsets and clip to input boundaries hstart = fminf(fmaxf(hstart + roi_start_h, 0), height); hend = fminf(fmaxf(hend + roi_start_h, 0), height); wstart = fminf(fmaxf(wstart + roi_start_w, 0), width); wend = fminf(fmaxf(wend + roi_start_w, 0), width); bool is_empty = (hend <= hstart) || (wend <= wstart); // Define an empty pooling region to be zero float maxval = is_empty ? 0 : -FLT_MAX; // If nothing is pooled, argmax = -1 causes nothing to be backprop'd int maxidx = -1; bottom_data += roi_batch_ind * channels * height * width; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { // int bottom_index = (h * width + w) * channels + c; int bottom_index = (c * height + h) * width + w; if (bottom_data[bottom_index] > maxval) { maxval = bottom_data[bottom_index]; maxidx = bottom_index; } } } top_data[index] = maxval; if (argmax_data != NULL) argmax_data[index] = maxidx; } } int ROIPoolForwardLaucher( const float* bottom_data, const float spatial_scale, const int num_rois, const int height, const int width, const int channels, const int pooled_height, const int pooled_width, const float* bottom_rois, float* top_data, int* argmax_data, cudaStream_t stream) { const int kThreadsPerBlock = 1024; const int output_size = num_rois * pooled_height * pooled_width * channels; cudaError_t err; ROIPoolForward<<<(output_size + kThreadsPerBlock - 1) / kThreadsPerBlock, kThreadsPerBlock, 0, stream>>>( output_size, bottom_data, spatial_scale, height, width, channels, pooled_height, pooled_width, bottom_rois, top_data, argmax_data); err = cudaGetLastError(); if(cudaSuccess != err) { fprintf( stderr, "cudaCheckError() failed : %s\n", cudaGetErrorString( err ) ); exit( -1 ); } return 1; } __global__ void ROIPoolBackward(const int nthreads, const float* top_diff, const int* argmax_data, const int num_rois, const float spatial_scale, const int height, const int width, const int channels, const int pooled_height, const int pooled_width, float* bottom_diff, const float* bottom_rois) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the pooled output int n = index; int w = n % width; n /= width; int h = n % height; n /= height; int c = n % channels; n /= channels; float gradient = 0; // Accumulate gradient over all ROIs that pooled this element for (int roi_n = 0; roi_n < num_rois; ++roi_n) { const float* offset_bottom_rois = bottom_rois + roi_n * 5; int roi_batch_ind = offset_bottom_rois[0]; // Skip if ROI's batch index doesn't match n if (n != roi_batch_ind) { continue; } int roi_start_w = round(offset_bottom_rois[1] * spatial_scale); int roi_start_h = round(offset_bottom_rois[2] * spatial_scale); int roi_end_w = round(offset_bottom_rois[3] * spatial_scale); int roi_end_h = round(offset_bottom_rois[4] * spatial_scale); // Skip if ROI doesn't include (h, w) const bool in_roi = (w >= roi_start_w && w <= roi_end_w && h >= roi_start_h && h <= roi_end_h); if (!in_roi) { continue; } int offset = roi_n * pooled_height * pooled_width * channels; const float* offset_top_diff = top_diff + offset; const int* offset_argmax_data = argmax_data + offset; // Compute feasible set of pooled units that could have pooled // this bottom unit // Force malformed ROIs to be 1x1 int roi_width = fmaxf(roi_end_w - roi_start_w + 1, 1); int roi_height = fmaxf(roi_end_h - roi_start_h + 1, 1); float bin_size_h = (float)(roi_height) / (float)(pooled_height); float bin_size_w = (float)(roi_width) / (float)(pooled_width); int phstart = floor((float)(h - roi_start_h) / bin_size_h); int phend = ceil((float)(h - roi_start_h + 1) / bin_size_h); int pwstart = floor((float)(w - roi_start_w) / bin_size_w); int pwend = ceil((float)(w - roi_start_w + 1) / bin_size_w); phstart = fminf(fmaxf(phstart, 0), pooled_height); phend = fminf(fmaxf(phend, 0), pooled_height); pwstart = fminf(fmaxf(pwstart, 0), pooled_width); pwend = fminf(fmaxf(pwend, 0), pooled_width); for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { if (offset_argmax_data[(c * pooled_height + ph) * pooled_width + pw] == index) { gradient += offset_top_diff[(c * pooled_height + ph) * pooled_width + pw]; } } } } bottom_diff[index] = gradient; } } int ROIPoolBackwardLaucher(const float* top_diff, const float spatial_scale, const int batch_size, const int num_rois, const int height, const int width, const int channels, const int pooled_height, const int pooled_width, const float* bottom_rois, float* bottom_diff, const int* argmax_data, cudaStream_t stream) { const int kThreadsPerBlock = 1024; const int output_size = batch_size * height * width * channels; cudaError_t err; ROIPoolBackward<<<(output_size + kThreadsPerBlock - 1) / kThreadsPerBlock, kThreadsPerBlock, 0, stream>>>( output_size, top_diff, argmax_data, num_rois, spatial_scale, height, width, channels, pooled_height, pooled_width, bottom_diff, bottom_rois); err = cudaGetLastError(); if(cudaSuccess != err) { fprintf( stderr, "cudaCheckError() failed : %s\n", cudaGetErrorString( err ) ); exit( -1 ); } return 1; } #ifdef __cplusplus } #endif
pinnedmem_transfer_test.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <omp.h> #include <stdlib.h> #include <string.h> #include <iostream> using namespace std; void warmUpGPU(); int main( int argc, char **argv ) { hipError_t error_code; int num_items = atoi( argv[ 1 ] ); int upper_bound = atoi( argv[ 2 ] ); warmUpGPU(); const int num_trials = 3; int outer_index = 0; while( num_items < upper_bound ) { for( outer_index = 0; outer_index < num_trials; outer_index++ ) { char *dev_A = NULL; char *host_A = NULL; error_code = hipHostMalloc( (char **) &host_A, sizeof( char ) * num_items ); if( error_code != hipSuccess ) { cout << "Error allocating on device" << endl; } int index = 0; for( index = 0; index < num_items - 1; index++ ) { host_A[ index ] = 'A'; } host_A[ num_items - 1 ] = '\0'; error_code = hipMalloc( (char **) &dev_A, sizeof( char ) * num_items ); if( error_code != hipSuccess ) { cout << "Error allocating on device" << endl; } error_code = hipMemcpy( dev_A, host_A, sizeof( char ) * num_items, hipMemcpyHostToDevice ); hipDeviceSynchronize(); hipHostFree( host_A ); hipFree( dev_A ); } num_items += 1; } return EXIT_SUCCESS; } __global__ void warmup( unsigned int *tmp ) { if( threadIdx.x == 0 ) { *tmp = 555; } return; } void warmUpGPU() { printf( "Warming up GPU for time trialing...\n" ); unsigned int *dev_tmp; unsigned int *tmp; hipError_t errCode = hipSuccess; tmp = (unsigned int *) malloc( sizeof( unsigned int ) ); errCode = hipMalloc( (unsigned int **) &dev_tmp, sizeof( unsigned int ) ); if( errCode != hipSuccess ) { cout << "Error: dev_tmp error with code " << errCode << endl; } hipLaunchKernelGGL(( warmup), dim3(1),dim3(256), 0, 0, dev_tmp); //copy data from device to host errCode=hipMemcpy( tmp, dev_tmp, sizeof(unsigned int), hipMemcpyDeviceToHost); if(errCode != hipSuccess) { cout << "Error: getting tmp result form GPU error with code " << errCode << endl; } hipDeviceSynchronize(); printf("tmp (changed to 555 on GPU): %d\n",*tmp); hipFree(dev_tmp); return; }
pinnedmem_transfer_test.cu
#include <stdio.h> #include <omp.h> #include <stdlib.h> #include <string.h> #include <iostream> using namespace std; void warmUpGPU(); int main( int argc, char **argv ) { cudaError_t error_code; int num_items = atoi( argv[ 1 ] ); int upper_bound = atoi( argv[ 2 ] ); warmUpGPU(); const int num_trials = 3; int outer_index = 0; while( num_items < upper_bound ) { for( outer_index = 0; outer_index < num_trials; outer_index++ ) { char *dev_A = NULL; char *host_A = NULL; error_code = cudaMallocHost( (char **) &host_A, sizeof( char ) * num_items ); if( error_code != cudaSuccess ) { cout << "Error allocating on device" << endl; } int index = 0; for( index = 0; index < num_items - 1; index++ ) { host_A[ index ] = 'A'; } host_A[ num_items - 1 ] = '\0'; error_code = cudaMalloc( (char **) &dev_A, sizeof( char ) * num_items ); if( error_code != cudaSuccess ) { cout << "Error allocating on device" << endl; } error_code = cudaMemcpy( dev_A, host_A, sizeof( char ) * num_items, cudaMemcpyHostToDevice ); cudaDeviceSynchronize(); cudaFreeHost( host_A ); cudaFree( dev_A ); } num_items += 1; } return EXIT_SUCCESS; } __global__ void warmup( unsigned int *tmp ) { if( threadIdx.x == 0 ) { *tmp = 555; } return; } void warmUpGPU() { printf( "Warming up GPU for time trialing...\n" ); unsigned int *dev_tmp; unsigned int *tmp; cudaError_t errCode = cudaSuccess; tmp = (unsigned int *) malloc( sizeof( unsigned int ) ); errCode = cudaMalloc( (unsigned int **) &dev_tmp, sizeof( unsigned int ) ); if( errCode != cudaSuccess ) { cout << "Error: dev_tmp error with code " << errCode << endl; } warmup<<<1,256>>>(dev_tmp); //copy data from device to host errCode=cudaMemcpy( tmp, dev_tmp, sizeof(unsigned int), cudaMemcpyDeviceToHost); if(errCode != cudaSuccess) { cout << "Error: getting tmp result form GPU error with code " << errCode << endl; } cudaDeviceSynchronize(); printf("tmp (changed to 555 on GPU): %d\n",*tmp); cudaFree(dev_tmp); return; }
74b74324b40485abfa23a25c08fc2310785e72f9.hip
// !!! This is a file automatically generated by hipify!!! // Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <hip/hip_runtime.h> #include "paddle/fluid/operators/seed_op.h" namespace paddle { namespace operators { template <typename Place, typename T> class GPUSeedKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& context) const override { auto* out = context.Output<Tensor>("Out"); auto* out_data = out->mutable_data<T>(context.GetPlace()); int user_seed = context.Attr<int>("seed"); std::random_device rnd; int seed; if (user_seed != 0) { seed = user_seed; } else { seed = rnd(); } auto target_gpu_place = boost::get<platform::CUDAPlace>(context.GetPlace()); auto stream = context.cuda_device_context().stream(); memory::Copy(target_gpu_place, out_data, platform::CPUPlace(), &seed, sizeof(int), stream); } }; } // namespace operators } // namespace paddle REGISTER_OP_CUDA_KERNEL( seed, paddle::operators::GPUSeedKernel<paddle::platform::CUDADeviceContext, int>);
74b74324b40485abfa23a25c08fc2310785e72f9.cu
// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <cuda.h> #include "paddle/fluid/operators/seed_op.h" namespace paddle { namespace operators { template <typename Place, typename T> class GPUSeedKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& context) const override { auto* out = context.Output<Tensor>("Out"); auto* out_data = out->mutable_data<T>(context.GetPlace()); int user_seed = context.Attr<int>("seed"); std::random_device rnd; int seed; if (user_seed != 0) { seed = user_seed; } else { seed = rnd(); } auto target_gpu_place = boost::get<platform::CUDAPlace>(context.GetPlace()); auto stream = context.cuda_device_context().stream(); memory::Copy(target_gpu_place, out_data, platform::CPUPlace(), &seed, sizeof(int), stream); } }; } // namespace operators } // namespace paddle REGISTER_OP_CUDA_KERNEL( seed, paddle::operators::GPUSeedKernel<paddle::platform::CUDADeviceContext, int>);
c14ea4385b715e142ca25968c1f1bf33afbc93a7.hip
// !!! This is a file automatically generated by hipify!!! // Includes #include <stdio.h> #include <stdlib.h> // includes from project // includes from CUDA #include <hip/hip_runtime.h> //#include <helper_math.h> #define THREADS_PER_BLOCK 256 #define NUM_OF_BLOCKS 640 // Variables unsigned* h_A; unsigned* h_B; unsigned* h_C; unsigned* d_A; unsigned* d_B; unsigned* d_C; // Functions void CleanupResources(void); void RandomInit(unsigned*, int); //////////////////////////////////////////////////////////////////////////////// // These are CUDA Helper functions // This will output the proper CUDA error strings in the event that a CUDA host call returns an error #define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__) inline void __checkCudaErrors(hipError_t err, const char *file, const int line ) { if(hipSuccess != err){ fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, hipGetErrorString( err ) ); exit(-1); } } // This will output the proper error string when calling hipGetLastError #define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__) inline void __getLastCudaError(const char *errorMessage, const char *file, const int line ) { hipError_t err = hipGetLastError(); if (hipSuccess != err){ fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, hipGetErrorString( err ) ); exit(-1); } } // end of CUDA Helper Functions __global__ void PowerKernal2(const unsigned* A, const unsigned* B, unsigned* C, int iterations) { int i = blockDim.x * blockIdx.x + threadIdx.x; //Do Some Computation unsigned Value1=1; unsigned Value2=A[i]; unsigned Value3=B[i]; unsigned Value; unsigned I1=A[i]; unsigned I2=B[i]; // Excessive INT addition access if((i%32)<=31){ for(unsigned k=0; k<iterations;k++) { Value2= I1*Value1; Value3=I2*Value3; Value1*=Value2; Value3*=Value1; Value2*=Value3; Value1*=Value3; } } __syncthreads(); Value=Value1; C[i]=Value; __syncthreads(); } int main(int argc, char** argv) { int iterations; if (argc != 2){ fprintf(stderr,"usage: %s #iterations\n",argv[0]); exit(1); } else{ iterations = atoi(argv[1]); } printf("Power Microbenchmark with %d iterations\n",iterations); int N = THREADS_PER_BLOCK*NUM_OF_BLOCKS; size_t size = N * sizeof(unsigned); // Allocate input vectors h_A and h_B in host memory h_A = (unsigned*)malloc(size); if (h_A == 0) CleanupResources(); h_B = (unsigned*)malloc(size); if (h_B == 0) CleanupResources(); h_C = (unsigned*)malloc(size); if (h_C == 0) CleanupResources(); // Initialize input vectors RandomInit(h_A, N); RandomInit(h_B, N); // Allocate vectors in device memory printf("before\n"); checkCudaErrors( hipMalloc((void**)&d_A, size) ); checkCudaErrors( hipMalloc((void**)&d_B, size) ); checkCudaErrors( hipMalloc((void**)&d_C, size) ); printf("after\n"); hipEvent_t start, stop; float elapsedTime = 0; checkCudaErrors(hipEventCreate(&start)); checkCudaErrors(hipEventCreate(&stop)); // Copy vectors from host memory to device memory checkCudaErrors( hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice) ); dim3 dimGrid(NUM_OF_BLOCKS,1); dim3 dimBlock(THREADS_PER_BLOCK,1); dim3 dimGrid2(1,1); dim3 dimBlock2(1,1); checkCudaErrors(hipEventRecord(start)); hipLaunchKernelGGL(( PowerKernal2), dim3(dimGrid),dim3(dimBlock), 0, 0, d_A, d_B, d_C, iterations); checkCudaErrors(hipEventRecord(stop)); checkCudaErrors(hipEventSynchronize(stop)); checkCudaErrors(hipEventElapsedTime(&elapsedTime, start, stop)); printf("gpu execution time = %.2f s\n", elapsedTime/1000); getLastCudaError("kernel launch failure"); hipDeviceSynchronize(); // Copy result from device memory to host memory // h_C contains the result in host memory checkCudaErrors( hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost) ); checkCudaErrors(hipEventDestroy(start)); checkCudaErrors(hipEventDestroy(stop)); CleanupResources(); return 0; } void CleanupResources(void) { // Free device memory if (d_A) hipFree(d_A); if (d_B) hipFree(d_B); if (d_C) hipFree(d_C); // Free host memory if (h_A) free(h_A); if (h_B) free(h_B); if (h_C) free(h_C); } // Allocates an array with random unsigned entries. void RandomInit(unsigned* data, int n) { for (int i = 0; i < n; ++i){ srand((unsigned)time(0)); data[i] = rand() / RAND_MAX; } }
c14ea4385b715e142ca25968c1f1bf33afbc93a7.cu
// Includes #include <stdio.h> #include <stdlib.h> // includes from project // includes from CUDA #include <cuda_runtime.h> //#include <helper_math.h> #define THREADS_PER_BLOCK 256 #define NUM_OF_BLOCKS 640 // Variables unsigned* h_A; unsigned* h_B; unsigned* h_C; unsigned* d_A; unsigned* d_B; unsigned* d_C; // Functions void CleanupResources(void); void RandomInit(unsigned*, int); //////////////////////////////////////////////////////////////////////////////// // These are CUDA Helper functions // This will output the proper CUDA error strings in the event that a CUDA host call returns an error #define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__) inline void __checkCudaErrors(cudaError err, const char *file, const int line ) { if(cudaSuccess != err){ fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, cudaGetErrorString( err ) ); exit(-1); } } // This will output the proper error string when calling cudaGetLastError #define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__) inline void __getLastCudaError(const char *errorMessage, const char *file, const int line ) { cudaError_t err = cudaGetLastError(); if (cudaSuccess != err){ fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, cudaGetErrorString( err ) ); exit(-1); } } // end of CUDA Helper Functions __global__ void PowerKernal2(const unsigned* A, const unsigned* B, unsigned* C, int iterations) { int i = blockDim.x * blockIdx.x + threadIdx.x; //Do Some Computation unsigned Value1=1; unsigned Value2=A[i]; unsigned Value3=B[i]; unsigned Value; unsigned I1=A[i]; unsigned I2=B[i]; // Excessive INT addition access if((i%32)<=31){ for(unsigned k=0; k<iterations;k++) { Value2= I1*Value1; Value3=I2*Value3; Value1*=Value2; Value3*=Value1; Value2*=Value3; Value1*=Value3; } } __syncthreads(); Value=Value1; C[i]=Value; __syncthreads(); } int main(int argc, char** argv) { int iterations; if (argc != 2){ fprintf(stderr,"usage: %s #iterations\n",argv[0]); exit(1); } else{ iterations = atoi(argv[1]); } printf("Power Microbenchmark with %d iterations\n",iterations); int N = THREADS_PER_BLOCK*NUM_OF_BLOCKS; size_t size = N * sizeof(unsigned); // Allocate input vectors h_A and h_B in host memory h_A = (unsigned*)malloc(size); if (h_A == 0) CleanupResources(); h_B = (unsigned*)malloc(size); if (h_B == 0) CleanupResources(); h_C = (unsigned*)malloc(size); if (h_C == 0) CleanupResources(); // Initialize input vectors RandomInit(h_A, N); RandomInit(h_B, N); // Allocate vectors in device memory printf("before\n"); checkCudaErrors( cudaMalloc((void**)&d_A, size) ); checkCudaErrors( cudaMalloc((void**)&d_B, size) ); checkCudaErrors( cudaMalloc((void**)&d_C, size) ); printf("after\n"); cudaEvent_t start, stop; float elapsedTime = 0; checkCudaErrors(cudaEventCreate(&start)); checkCudaErrors(cudaEventCreate(&stop)); // Copy vectors from host memory to device memory checkCudaErrors( cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice) ); dim3 dimGrid(NUM_OF_BLOCKS,1); dim3 dimBlock(THREADS_PER_BLOCK,1); dim3 dimGrid2(1,1); dim3 dimBlock2(1,1); checkCudaErrors(cudaEventRecord(start)); PowerKernal2<<<dimGrid,dimBlock>>>(d_A, d_B, d_C, iterations); checkCudaErrors(cudaEventRecord(stop)); checkCudaErrors(cudaEventSynchronize(stop)); checkCudaErrors(cudaEventElapsedTime(&elapsedTime, start, stop)); printf("gpu execution time = %.2f s\n", elapsedTime/1000); getLastCudaError("kernel launch failure"); cudaThreadSynchronize(); // Copy result from device memory to host memory // h_C contains the result in host memory checkCudaErrors( cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost) ); checkCudaErrors(cudaEventDestroy(start)); checkCudaErrors(cudaEventDestroy(stop)); CleanupResources(); return 0; } void CleanupResources(void) { // Free device memory if (d_A) cudaFree(d_A); if (d_B) cudaFree(d_B); if (d_C) cudaFree(d_C); // Free host memory if (h_A) free(h_A); if (h_B) free(h_B); if (h_C) free(h_C); } // Allocates an array with random unsigned entries. void RandomInit(unsigned* data, int n) { for (int i = 0; i < n; ++i){ srand((unsigned)time(0)); data[i] = rand() / RAND_MAX; } }
157eeb38e1fcf935ff7e6ee6248b4f8b4541bfc8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" static char help[] = "Benchmarking CUDA kernel launch time\n"; /* Running example on Summit at OLCF: # run with total 1 resource set (RS) (-n1), 1 RS per node (-r1), 1 MPI rank (-a1), 7 cores (-c7) and 1 GPU (-g1) per RS $ jsrun -n1 -a1 -c7 -g1 -r1 ./ex1cu Average asynchronous CUDA kernel launch time = 4.86 microseconds Average synchronous CUDA kernel launch time = 12.83 microseconds */ #include <petscsys.h> #include <petscdevice_cuda.h> __global__ void NullKernel() { } int main(int argc, char **argv) { PetscInt i, n = 100000; PetscLogDouble tstart, tend, time; PetscFunctionBeginUser; PetscCall(PetscInitialize(&argc, &argv, (char *)0, help)); PetscCall(PetscOptionsGetInt(NULL, NULL, "-n", &n, NULL)); PetscCallCUDA(hipStreamSynchronize(NULL)); /* Initialize CUDA runtime to get more accurate timing below */ /* Launch a sequence of kernels asynchronously. Previous launched kernels do not need to be completed before launching a new one */ PetscCall(PetscTime(&tstart)); for (i = 0; i < n; i++)hipLaunchKernelGGL(( NullKernel), dim3(1), dim3(1), 0, NULL, ); PetscCall(PetscTime(&tend)); PetscCallCUDA(hipStreamSynchronize(NULL)); /* Sync after tend since we don't want to count kernel execution time */ time = (tend - tstart) * 1e6 / n; PetscCall(PetscPrintf(PETSC_COMM_WORLD, "Average asynchronous CUDA kernel launch time = %.2f microseconds\n", time)); /* Launch a sequence of kernels synchronously. Only launch a new kernel after the one before it has been completed */ PetscCall(PetscTime(&tstart)); for (i = 0; i < n; i++) { hipLaunchKernelGGL(( NullKernel), dim3(1), dim3(1), 0, NULL, ); PetscCallCUDA(hipStreamSynchronize(NULL)); } PetscCall(PetscTime(&tend)); time = (tend - tstart) * 1e6 / n; PetscCall(PetscPrintf(PETSC_COMM_WORLD, "Average synchronous CUDA kernel launch time = %.2f microseconds\n", time)); PetscCall(PetscFinalize()); return 0; } /*TEST build: requires: cuda test: requires: cuda args: -n 2 output_file: output/empty.out filter: grep "DOES_NOT_EXIST" TEST*/
157eeb38e1fcf935ff7e6ee6248b4f8b4541bfc8.cu
static char help[] = "Benchmarking CUDA kernel launch time\n"; /* Running example on Summit at OLCF: # run with total 1 resource set (RS) (-n1), 1 RS per node (-r1), 1 MPI rank (-a1), 7 cores (-c7) and 1 GPU (-g1) per RS $ jsrun -n1 -a1 -c7 -g1 -r1 ./ex1cu Average asynchronous CUDA kernel launch time = 4.86 microseconds Average synchronous CUDA kernel launch time = 12.83 microseconds */ #include <petscsys.h> #include <petscdevice_cuda.h> __global__ void NullKernel() { } int main(int argc, char **argv) { PetscInt i, n = 100000; PetscLogDouble tstart, tend, time; PetscFunctionBeginUser; PetscCall(PetscInitialize(&argc, &argv, (char *)0, help)); PetscCall(PetscOptionsGetInt(NULL, NULL, "-n", &n, NULL)); PetscCallCUDA(cudaStreamSynchronize(NULL)); /* Initialize CUDA runtime to get more accurate timing below */ /* Launch a sequence of kernels asynchronously. Previous launched kernels do not need to be completed before launching a new one */ PetscCall(PetscTime(&tstart)); for (i = 0; i < n; i++) NullKernel<<<1, 1, 0, NULL>>>(); PetscCall(PetscTime(&tend)); PetscCallCUDA(cudaStreamSynchronize(NULL)); /* Sync after tend since we don't want to count kernel execution time */ time = (tend - tstart) * 1e6 / n; PetscCall(PetscPrintf(PETSC_COMM_WORLD, "Average asynchronous CUDA kernel launch time = %.2f microseconds\n", time)); /* Launch a sequence of kernels synchronously. Only launch a new kernel after the one before it has been completed */ PetscCall(PetscTime(&tstart)); for (i = 0; i < n; i++) { NullKernel<<<1, 1, 0, NULL>>>(); PetscCallCUDA(cudaStreamSynchronize(NULL)); } PetscCall(PetscTime(&tend)); time = (tend - tstart) * 1e6 / n; PetscCall(PetscPrintf(PETSC_COMM_WORLD, "Average synchronous CUDA kernel launch time = %.2f microseconds\n", time)); PetscCall(PetscFinalize()); return 0; } /*TEST build: requires: cuda test: requires: cuda args: -n 2 output_file: output/empty.out filter: grep "DOES_NOT_EXIST" TEST*/
727bdce8f094b75121ff722768685c013aa7afdc.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "vec_subf.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; size_t n = XSIZE*YSIZE; float *result = NULL; hipMalloc(&result, XSIZE*YSIZE); float *x = NULL; hipMalloc(&x, XSIZE*YSIZE); float *y = NULL; hipMalloc(&y, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( vec_subf), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,x,y); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( vec_subf), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,x,y); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( vec_subf), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,x,y); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
727bdce8f094b75121ff722768685c013aa7afdc.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "vec_subf.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; size_t n = XSIZE*YSIZE; float *result = NULL; cudaMalloc(&result, XSIZE*YSIZE); float *x = NULL; cudaMalloc(&x, XSIZE*YSIZE); float *y = NULL; cudaMalloc(&y, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); vec_subf<<<gridBlock,threadBlock>>>(n,result,x,y); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { vec_subf<<<gridBlock,threadBlock>>>(n,result,x,y); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { vec_subf<<<gridBlock,threadBlock>>>(n,result,x,y); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
e72c14def9c31879d16ed439a6221d77afaa31d2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * map_vector_cuda_funcs_tests.cu * * Created on: Aug 17, 2018 * Author: i-bird */ #define BOOST_GPU_ENABLED __host__ __device__ #include "util/cudify/cudify.hpp" #include "config.h" #define BOOST_TEST_DYN_LINK #include <boost/test/unit_test.hpp> #include "util/cuda_util.hpp" #include "Vector/map_vector.hpp" #include "util/tokernel_transformation.hpp" BOOST_AUTO_TEST_SUITE( vector_cuda_funcs_tests ) BOOST_AUTO_TEST_CASE( vector_cuda_funcs_add_prp_device ) { openfpm::vector_gpu<aggregate<float,float[3],float[3][3]>> vg_data; openfpm::vector_gpu<aggregate<float,float[3],float[3][3]>> vg_data2; vg_data.resize(100); vg_data2.resize(100); // we fill vg_data with something for (size_t i = 0 ; i < 100 ; i++) { vg_data.template get<0>(i) = 2.5 + i; vg_data.template get<1>(i)[0] = 4.6 + i; vg_data.template get<1>(i)[1] = 7.8 + i; vg_data.template get<1>(i)[2] = 9.0 + i; vg_data2.template get<0>(i) = 8.5 + i; vg_data2.template get<1>(i)[0] = 1.6 + i; vg_data2.template get<1>(i)[1] = 3.8 + i; vg_data2.template get<1>(i)[2] = 5.1 + i; } vg_data.hostToDevice<0,1>(); vg_data2.hostToDevice<0,1>(); vg_data.add_prp_device<aggregate<float,float[3],float[3][3]>, CudaMemory, openfpm::grow_policy_double, OPENFPM_NATIVE, memory_traits_inte, 0,1>(vg_data2); vg_data.deviceToHost<0,1>(); BOOST_REQUIRE_EQUAL(vg_data.size(),200); bool match = true; for (unsigned int i = 100 ; i < 200 ; i++) { match &= vg_data.template get<0>(i) == vg_data2.template get<0>(i-100); match &= vg_data.template get<1>(i)[0] == vg_data2.template get<1>(i-100)[0]; match &= vg_data.template get<1>(i)[1] == vg_data2.template get<1>(i-100)[1]; match &= vg_data.template get<1>(i)[2] == vg_data2.template get<1>(i-100)[2]; } BOOST_REQUIRE_EQUAL(match,true); } BOOST_AUTO_TEST_CASE( vector_cuda_to_kernel_recursive2 ) { typedef openfpm::vector_gpu<aggregate<int,long int>> test1_type; typedef openfpm::vector_gpu<aggregate<int,openfpm::vector_gpu<aggregate<long int>>>> test2_type; typedef openfpm::vector_gpu<aggregate<int,openfpm::vector_gpu<aggregate<Box<2,float>>>>> test3_type; typedef openfpm::vector<Box<3,float>,CudaMemory,memory_traits_inte> test4_type; typedef typename toKernel_transform<memory_traits_inte,test1_type>::type tker1; typedef typename toKernel_transform<memory_traits_inte,test2_type>::type tker2; typedef typename toKernel_transform<memory_traits_inte,test3_type>::type tker3; typedef typename toKernel_transform<memory_traits_inte,test4_type>::type tker4; bool test = std::is_same<tker1,openfpm::vector_gpu_ker<aggregate<int, long>, memory_traits_inte>>::value; BOOST_REQUIRE_EQUAL(test,true); test = std::is_same<tker2,openfpm::vector_gpu_ker<aggregate<int, openfpm::vector_gpu_ker<aggregate<long>, memory_traits_inte> >, memory_traits_inte>>::value; BOOST_REQUIRE_EQUAL(test,true); test = std::is_same<tker3,openfpm::vector_gpu_ker<aggregate<int, openfpm::vector_gpu_ker<aggregate<Box<2,float>>, memory_traits_inte> >, memory_traits_inte>>::value; BOOST_REQUIRE_EQUAL(test,true); test = std::is_same<tker4,openfpm::vector_gpu_ker<Box<3,float>,memory_traits_inte>>::value; BOOST_REQUIRE_EQUAL(test,true); } template<typename vv_rc,typename vector_output_type> __global__ void kernel_recursive_check(vv_rc vvrc, vector_output_type vot) { int k = 0; for (int i = 0 ; i < vvrc.size() ; i++) { for (int j = 0 ; j < vvrc.template get<1>(i).size() ; j++) { vot.template get<0>(k) = vvrc.template get<1>(i).template get<0>(j); k++; } } } BOOST_AUTO_TEST_CASE( vector_cuda_to_kernel_recursive2_test_toKernel ) { typedef openfpm::vector_gpu<aggregate<int,openfpm::vector_gpu<aggregate<long int>>>> test2_type; typedef openfpm::vector_gpu<aggregate<int,openfpm::vector_gpu<aggregate<Box<2,float>>>>> test3_type; test2_type tt2; test3_type tt3; tt2.add_no_device(); tt2.add_no_device(); tt2.add_no_device(); /* tt3.add(); tt3.add(); tt3.add();*/ tt2.template get<0>(0) = 80; tt2.template get<1>(0).add(); tt2.template get<1>(0).template get<0>(0) = 500; tt2.template get<0>(0) = 180; tt2.template get<1>(0).add(); tt2.template get<1>(0).template get<0>(1) = 600; tt2.template get<0>(0) = 280;; tt2.template get<1>(0).add(); tt2.template get<1>(0).template get<0>(2) = 700; tt2.template get<1>(0).template hostToDevice<0>(); tt2.template get<0>(1) = 10080; tt2.template get<1>(1).add(); tt2.template get<1>(1).template get<0>(0) = 1500; tt2.template get<0>(1) = 20080; tt2.template get<1>(1).add(); tt2.template get<1>(1).template get<0>(1) = 1600; tt2.template get<0>(1) = 30080; tt2.template get<1>(1).add(); tt2.template get<1>(1).template get<0>(2) = 1700; tt2.template get<1>(1).template hostToDevice<0>(); tt2.template get<0>(2) = 40080; tt2.template get<1>(2).add(); tt2.template get<1>(2).template get<0>(0) = 2500; tt2.template get<0>(2) = 50080; tt2.template get<1>(2).add(); tt2.template get<1>(2).template get<0>(1) = 2600; tt2.template get<0>(2) = 60080; tt2.template get<1>(2).add(); tt2.template get<1>(2).template get<0>(2) = 2700; tt2.template get<1>(2).template hostToDevice<0>(); tt2.template hostToDevice<1>(); openfpm::vector_gpu<aggregate<long int>> vg; vg.resize(9); CUDA_LAUNCH_DIM3(kernel_recursive_check,1,1,tt2.toKernel(),vg.toKernel()); vg.template deviceToHost<0>(); BOOST_REQUIRE_EQUAL(vg.template get<0>(0),500); BOOST_REQUIRE_EQUAL(vg.template get<0>(1),600); BOOST_REQUIRE_EQUAL(vg.template get<0>(2),700); BOOST_REQUIRE_EQUAL(vg.template get<0>(3),1500); BOOST_REQUIRE_EQUAL(vg.template get<0>(4),1600); BOOST_REQUIRE_EQUAL(vg.template get<0>(5),1700); BOOST_REQUIRE_EQUAL(vg.template get<0>(6),2500); BOOST_REQUIRE_EQUAL(vg.template get<0>(7),2600); BOOST_REQUIRE_EQUAL(vg.template get<0>(8),2700); } BOOST_AUTO_TEST_CASE( vector_cuda_to_cpu_operator_equal ) { openfpm::vector_gpu<aggregate<int,int,double>> v1; openfpm::vector<aggregate<int,int,double>> v2; openfpm::vector<aggregate<int,int,double>,HeapMemory, memory_traits_inte > v3; openfpm::vector<aggregate<int,int,double>> v4; v2.resize(3000); for (size_t i = 0 ; i < 3000 ; i++) { v2.template get<0>(i) = i; v2.template get<1>(i) = i+300; v2.template get<2>(i) = i+6123.0; } v1 = v2; v3 = v2; v4 = v1; for (size_t i = 0 ; i < v2.size() ; i++) { BOOST_REQUIRE_EQUAL(v2.template get<0>(i),v1.template get<0>(i)); BOOST_REQUIRE_EQUAL(v2.template get<0>(i),v3.template get<0>(i)); BOOST_REQUIRE_EQUAL(v2.template get<0>(i),v4.template get<0>(i)); BOOST_REQUIRE_EQUAL(v2.template get<1>(i),v1.template get<1>(i)); BOOST_REQUIRE_EQUAL(v2.template get<1>(i),v3.template get<1>(i)); BOOST_REQUIRE_EQUAL(v2.template get<1>(i),v4.template get<1>(i)); BOOST_REQUIRE_EQUAL(v2.template get<2>(i),v1.template get<2>(i)); BOOST_REQUIRE_EQUAL(v2.template get<2>(i),v3.template get<2>(i)); BOOST_REQUIRE_EQUAL(v2.template get<2>(i),v4.template get<2>(i)); } } BOOST_AUTO_TEST_CASE( vector_cuda_host_to_device_check ) { openfpm::vector_gpu<aggregate<int,int,double>> v1; v1.resize(3); for (size_t i = 0 ; i < v1.size() ; i++) { v1.template get<0>(i) = i; v1.template get<1>(i) = i+300; v1.template get<2>(i) = i+6123.0; } v1.hostToDevice<0,1,2>(); // Now we reset the element 0, 1 for (size_t i = 0 ; i < v1.size()-1 ; i++) { v1.template get<0>(i) = 0; v1.template get<1>(i) = 0; v1.template get<2>(i) = 0; } v1.hostToDevice<0,1,2>(v1.size()-1,v1.size()-1); v1.deviceToHost<0,1,2>(); for (size_t i = 0 ; i < v1.size() ; i++) { BOOST_REQUIRE_EQUAL(v1.template get<0>(i),i); BOOST_REQUIRE_EQUAL(v1.template get<1>(i),i+300); BOOST_REQUIRE_EQUAL(v1.template get<2>(i),i+6123.0); } } BOOST_AUTO_TEST_CASE( vector_cuda_host_to_device_vector_and_point_tensor ) { openfpm::vector_gpu<aggregate<float[3],float[3][3]>> v1; v1.resize(100); for (size_t i = 0 ; i < 50 ; i++) { v1.template get<0>(i)[0] = i+1500; v1.template get<0>(i)[1] = i+2200; v1.template get<0>(i)[2] = i+2600; v1.template get<1>(i)[0][0] = i+6000; v1.template get<1>(i)[0][1] = i+7200; v1.template get<1>(i)[0][2] = i+8600; v1.template get<1>(i)[1][0] = i+9000; v1.template get<1>(i)[1][1] = i+10200; v1.template get<1>(i)[1][2] = i+11600; v1.template get<1>(i)[2][0] = i+12800; v1.template get<1>(i)[2][1] = i+22200; v1.template get<1>(i)[2][2] = i+23600; } v1.hostToDevice<0,1>(0,50); for (size_t i = 50 ; i < 100 ; i++) { v1.template get<0>(i)[0] = i+1500; v1.template get<0>(i)[1] = i+2200; v1.template get<0>(i)[2] = i+2600; v1.template get<1>(i)[0][0] = i+6000; v1.template get<1>(i)[0][1] = i+7200; v1.template get<1>(i)[0][2] = i+8600; v1.template get<1>(i)[1][0] = i+9000; v1.template get<1>(i)[1][1] = i+10200; v1.template get<1>(i)[1][2] = i+11600; v1.template get<1>(i)[2][0] = i+12800; v1.template get<1>(i)[2][1] = i+22200; v1.template get<1>(i)[2][2] = i+23600; } v1.hostToDevice<0,1>(50,99); v1.deviceToHost<0,1>(); for (size_t i = 0 ; i < 100 ; i++) { BOOST_REQUIRE_EQUAL(v1.template get<0>(i)[0],i+1500); BOOST_REQUIRE_EQUAL(v1.template get<0>(i)[1],i+2200); BOOST_REQUIRE_EQUAL(v1.template get<0>(i)[2],i+2600); BOOST_REQUIRE_EQUAL(v1.template get<1>(i)[0][0],i+6000); BOOST_REQUIRE_EQUAL(v1.template get<1>(i)[0][1],i+7200); BOOST_REQUIRE_EQUAL(v1.template get<1>(i)[0][2],i+8600); BOOST_REQUIRE_EQUAL(v1.template get<1>(i)[1][0],i+9000); BOOST_REQUIRE_EQUAL(v1.template get<1>(i)[1][1],i+10200); BOOST_REQUIRE_EQUAL(v1.template get<1>(i)[1][2],i+11600); BOOST_REQUIRE_EQUAL(v1.template get<1>(i)[2][0],i+12800); BOOST_REQUIRE_EQUAL(v1.template get<1>(i)[2][1],i+22200); BOOST_REQUIRE_EQUAL(v1.template get<1>(i)[2][2],i+23600); } } BOOST_AUTO_TEST_CASE( vector_cuda_copy ) { openfpm::vector_gpu<aggregate<float,float[3],float[3][3]>> v1; openfpm::vector_gpu<aggregate<float,float[3],float[3][3]>> v2; v1.resize(100); auto ite = v1.getIterator(); while (ite.isNext()) { auto p = ite.get(); v1.template get<0>(p) = p + 100; v1.template get<0>(p) = p + 2000; v1.template get<0>(p) = p + 3000; v1.template get<0>(p) = p + 4000; v1.template get<1>(p)[0] = p + 5000; v1.template get<1>(p)[1] = p + 6000; v1.template get<1>(p)[2] = p + 7000; v1.template get<2>(p)[0][0] = p + 8000; v1.template get<2>(p)[0][1] = p + 9000; v1.template get<2>(p)[0][2] = p + 10000; v1.template get<2>(p)[1][0] = p + 11000; v1.template get<2>(p)[1][1] = p + 12000; v1.template get<2>(p)[2][2] = p + 13000; v1.template get<2>(p)[2][0] = p + 14000; v1.template get<2>(p)[2][1] = p + 15000; v1.template get<2>(p)[2][2] = p + 16000; ++ite; } v1.hostToDevice<0,1,2>(); ite = v1.getIterator(); while (ite.isNext()) { auto p = ite.get(); v1.template get<0>(p) = p + 6100; v1.template get<0>(p) = p + 62000; v1.template get<0>(p) = p + 63000; v1.template get<0>(p) = p + 64000; v1.template get<1>(p)[0] = p + 65000; v1.template get<1>(p)[1] = p + 66000; v1.template get<1>(p)[2] = p + 67000; v1.template get<2>(p)[0][0] = p + 68000; v1.template get<2>(p)[0][1] = p + 69000; v1.template get<2>(p)[0][2] = p + 610000; v1.template get<2>(p)[1][0] = p + 611000; v1.template get<2>(p)[1][1] = p + 612000; v1.template get<2>(p)[2][2] = p + 613000; v1.template get<2>(p)[2][0] = p + 614000; v1.template get<2>(p)[2][1] = p + 615000; v1.template get<2>(p)[2][2] = p + 616000; ++ite; } v2 = v1; // first check the CPU bool match = true; ite = v2.getIterator(); while (ite.isNext()) { auto p = ite.get(); match = v2.template get<0>(p) == p + 6100; match = v2.template get<0>(p) == p + 62000; match = v2.template get<0>(p) == p + 63000; match = v2.template get<0>(p) == p + 64000; match = v2.template get<1>(p)[0] == p + 65000; match = v2.template get<1>(p)[1] == p + 66000; match = v2.template get<1>(p)[2] == p + 67000; match = v2.template get<2>(p)[0][0] == p + 68000; match = v2.template get<2>(p)[0][1] == p + 69000; match = v2.template get<2>(p)[0][2] == p + 610000; match = v2.template get<2>(p)[1][0] == p + 611000; match = v2.template get<2>(p)[1][1] == p + 612000; match = v2.template get<2>(p)[2][2] == p + 613000; match = v2.template get<2>(p)[2][0] == p + 614000; match = v2.template get<2>(p)[2][1] == p + 615000; match = v2.template get<2>(p)[2][2] == p + 616000; ++ite; } BOOST_REQUIRE_EQUAL(match,true); v2.deviceToHost<0,1,2>(); ite = v2.getIterator(); while (ite.isNext()) { auto p = ite.get(); match = v2.template get<0>(p) == p + 100; match = v2.template get<0>(p) == p + 2000; match = v2.template get<0>(p) == p + 3000; match = v2.template get<0>(p) == p + 4000; match = v2.template get<1>(p)[0] == p + 5000; match = v2.template get<1>(p)[1] == p + 6000; match = v2.template get<1>(p)[2] == p + 7000; match = v2.template get<2>(p)[0][0] == p + 8000; match = v2.template get<2>(p)[0][1] == p + 9000; match = v2.template get<2>(p)[0][2] == p + 10000; match = v2.template get<2>(p)[1][0] == p + 11000; match = v2.template get<2>(p)[1][1] == p + 12000; match = v2.template get<2>(p)[2][2] == p + 13000; match = v2.template get<2>(p)[2][0] == p + 14000; match = v2.template get<2>(p)[2][1] == p + 15000; match = v2.template get<2>(p)[2][2] == p + 16000; if (match == false) { std::cout << v2.template get<0>(p) << std::endl; } ++ite; } BOOST_REQUIRE_EQUAL(match,true); } BOOST_AUTO_TEST_SUITE_END()
e72c14def9c31879d16ed439a6221d77afaa31d2.cu
/* * map_vector_cuda_funcs_tests.cu * * Created on: Aug 17, 2018 * Author: i-bird */ #define BOOST_GPU_ENABLED __host__ __device__ #include "util/cudify/cudify.hpp" #include "config.h" #define BOOST_TEST_DYN_LINK #include <boost/test/unit_test.hpp> #include "util/cuda_util.hpp" #include "Vector/map_vector.hpp" #include "util/tokernel_transformation.hpp" BOOST_AUTO_TEST_SUITE( vector_cuda_funcs_tests ) BOOST_AUTO_TEST_CASE( vector_cuda_funcs_add_prp_device ) { openfpm::vector_gpu<aggregate<float,float[3],float[3][3]>> vg_data; openfpm::vector_gpu<aggregate<float,float[3],float[3][3]>> vg_data2; vg_data.resize(100); vg_data2.resize(100); // we fill vg_data with something for (size_t i = 0 ; i < 100 ; i++) { vg_data.template get<0>(i) = 2.5 + i; vg_data.template get<1>(i)[0] = 4.6 + i; vg_data.template get<1>(i)[1] = 7.8 + i; vg_data.template get<1>(i)[2] = 9.0 + i; vg_data2.template get<0>(i) = 8.5 + i; vg_data2.template get<1>(i)[0] = 1.6 + i; vg_data2.template get<1>(i)[1] = 3.8 + i; vg_data2.template get<1>(i)[2] = 5.1 + i; } vg_data.hostToDevice<0,1>(); vg_data2.hostToDevice<0,1>(); vg_data.add_prp_device<aggregate<float,float[3],float[3][3]>, CudaMemory, openfpm::grow_policy_double, OPENFPM_NATIVE, memory_traits_inte, 0,1>(vg_data2); vg_data.deviceToHost<0,1>(); BOOST_REQUIRE_EQUAL(vg_data.size(),200); bool match = true; for (unsigned int i = 100 ; i < 200 ; i++) { match &= vg_data.template get<0>(i) == vg_data2.template get<0>(i-100); match &= vg_data.template get<1>(i)[0] == vg_data2.template get<1>(i-100)[0]; match &= vg_data.template get<1>(i)[1] == vg_data2.template get<1>(i-100)[1]; match &= vg_data.template get<1>(i)[2] == vg_data2.template get<1>(i-100)[2]; } BOOST_REQUIRE_EQUAL(match,true); } BOOST_AUTO_TEST_CASE( vector_cuda_to_kernel_recursive2 ) { typedef openfpm::vector_gpu<aggregate<int,long int>> test1_type; typedef openfpm::vector_gpu<aggregate<int,openfpm::vector_gpu<aggregate<long int>>>> test2_type; typedef openfpm::vector_gpu<aggregate<int,openfpm::vector_gpu<aggregate<Box<2,float>>>>> test3_type; typedef openfpm::vector<Box<3,float>,CudaMemory,memory_traits_inte> test4_type; typedef typename toKernel_transform<memory_traits_inte,test1_type>::type tker1; typedef typename toKernel_transform<memory_traits_inte,test2_type>::type tker2; typedef typename toKernel_transform<memory_traits_inte,test3_type>::type tker3; typedef typename toKernel_transform<memory_traits_inte,test4_type>::type tker4; bool test = std::is_same<tker1,openfpm::vector_gpu_ker<aggregate<int, long>, memory_traits_inte>>::value; BOOST_REQUIRE_EQUAL(test,true); test = std::is_same<tker2,openfpm::vector_gpu_ker<aggregate<int, openfpm::vector_gpu_ker<aggregate<long>, memory_traits_inte> >, memory_traits_inte>>::value; BOOST_REQUIRE_EQUAL(test,true); test = std::is_same<tker3,openfpm::vector_gpu_ker<aggregate<int, openfpm::vector_gpu_ker<aggregate<Box<2,float>>, memory_traits_inte> >, memory_traits_inte>>::value; BOOST_REQUIRE_EQUAL(test,true); test = std::is_same<tker4,openfpm::vector_gpu_ker<Box<3,float>,memory_traits_inte>>::value; BOOST_REQUIRE_EQUAL(test,true); } template<typename vv_rc,typename vector_output_type> __global__ void kernel_recursive_check(vv_rc vvrc, vector_output_type vot) { int k = 0; for (int i = 0 ; i < vvrc.size() ; i++) { for (int j = 0 ; j < vvrc.template get<1>(i).size() ; j++) { vot.template get<0>(k) = vvrc.template get<1>(i).template get<0>(j); k++; } } } BOOST_AUTO_TEST_CASE( vector_cuda_to_kernel_recursive2_test_toKernel ) { typedef openfpm::vector_gpu<aggregate<int,openfpm::vector_gpu<aggregate<long int>>>> test2_type; typedef openfpm::vector_gpu<aggregate<int,openfpm::vector_gpu<aggregate<Box<2,float>>>>> test3_type; test2_type tt2; test3_type tt3; tt2.add_no_device(); tt2.add_no_device(); tt2.add_no_device(); /* tt3.add(); tt3.add(); tt3.add();*/ tt2.template get<0>(0) = 80; tt2.template get<1>(0).add(); tt2.template get<1>(0).template get<0>(0) = 500; tt2.template get<0>(0) = 180; tt2.template get<1>(0).add(); tt2.template get<1>(0).template get<0>(1) = 600; tt2.template get<0>(0) = 280;; tt2.template get<1>(0).add(); tt2.template get<1>(0).template get<0>(2) = 700; tt2.template get<1>(0).template hostToDevice<0>(); tt2.template get<0>(1) = 10080; tt2.template get<1>(1).add(); tt2.template get<1>(1).template get<0>(0) = 1500; tt2.template get<0>(1) = 20080; tt2.template get<1>(1).add(); tt2.template get<1>(1).template get<0>(1) = 1600; tt2.template get<0>(1) = 30080; tt2.template get<1>(1).add(); tt2.template get<1>(1).template get<0>(2) = 1700; tt2.template get<1>(1).template hostToDevice<0>(); tt2.template get<0>(2) = 40080; tt2.template get<1>(2).add(); tt2.template get<1>(2).template get<0>(0) = 2500; tt2.template get<0>(2) = 50080; tt2.template get<1>(2).add(); tt2.template get<1>(2).template get<0>(1) = 2600; tt2.template get<0>(2) = 60080; tt2.template get<1>(2).add(); tt2.template get<1>(2).template get<0>(2) = 2700; tt2.template get<1>(2).template hostToDevice<0>(); tt2.template hostToDevice<1>(); openfpm::vector_gpu<aggregate<long int>> vg; vg.resize(9); CUDA_LAUNCH_DIM3(kernel_recursive_check,1,1,tt2.toKernel(),vg.toKernel()); vg.template deviceToHost<0>(); BOOST_REQUIRE_EQUAL(vg.template get<0>(0),500); BOOST_REQUIRE_EQUAL(vg.template get<0>(1),600); BOOST_REQUIRE_EQUAL(vg.template get<0>(2),700); BOOST_REQUIRE_EQUAL(vg.template get<0>(3),1500); BOOST_REQUIRE_EQUAL(vg.template get<0>(4),1600); BOOST_REQUIRE_EQUAL(vg.template get<0>(5),1700); BOOST_REQUIRE_EQUAL(vg.template get<0>(6),2500); BOOST_REQUIRE_EQUAL(vg.template get<0>(7),2600); BOOST_REQUIRE_EQUAL(vg.template get<0>(8),2700); } BOOST_AUTO_TEST_CASE( vector_cuda_to_cpu_operator_equal ) { openfpm::vector_gpu<aggregate<int,int,double>> v1; openfpm::vector<aggregate<int,int,double>> v2; openfpm::vector<aggregate<int,int,double>,HeapMemory, memory_traits_inte > v3; openfpm::vector<aggregate<int,int,double>> v4; v2.resize(3000); for (size_t i = 0 ; i < 3000 ; i++) { v2.template get<0>(i) = i; v2.template get<1>(i) = i+300; v2.template get<2>(i) = i+6123.0; } v1 = v2; v3 = v2; v4 = v1; for (size_t i = 0 ; i < v2.size() ; i++) { BOOST_REQUIRE_EQUAL(v2.template get<0>(i),v1.template get<0>(i)); BOOST_REQUIRE_EQUAL(v2.template get<0>(i),v3.template get<0>(i)); BOOST_REQUIRE_EQUAL(v2.template get<0>(i),v4.template get<0>(i)); BOOST_REQUIRE_EQUAL(v2.template get<1>(i),v1.template get<1>(i)); BOOST_REQUIRE_EQUAL(v2.template get<1>(i),v3.template get<1>(i)); BOOST_REQUIRE_EQUAL(v2.template get<1>(i),v4.template get<1>(i)); BOOST_REQUIRE_EQUAL(v2.template get<2>(i),v1.template get<2>(i)); BOOST_REQUIRE_EQUAL(v2.template get<2>(i),v3.template get<2>(i)); BOOST_REQUIRE_EQUAL(v2.template get<2>(i),v4.template get<2>(i)); } } BOOST_AUTO_TEST_CASE( vector_cuda_host_to_device_check ) { openfpm::vector_gpu<aggregate<int,int,double>> v1; v1.resize(3); for (size_t i = 0 ; i < v1.size() ; i++) { v1.template get<0>(i) = i; v1.template get<1>(i) = i+300; v1.template get<2>(i) = i+6123.0; } v1.hostToDevice<0,1,2>(); // Now we reset the element 0, 1 for (size_t i = 0 ; i < v1.size()-1 ; i++) { v1.template get<0>(i) = 0; v1.template get<1>(i) = 0; v1.template get<2>(i) = 0; } v1.hostToDevice<0,1,2>(v1.size()-1,v1.size()-1); v1.deviceToHost<0,1,2>(); for (size_t i = 0 ; i < v1.size() ; i++) { BOOST_REQUIRE_EQUAL(v1.template get<0>(i),i); BOOST_REQUIRE_EQUAL(v1.template get<1>(i),i+300); BOOST_REQUIRE_EQUAL(v1.template get<2>(i),i+6123.0); } } BOOST_AUTO_TEST_CASE( vector_cuda_host_to_device_vector_and_point_tensor ) { openfpm::vector_gpu<aggregate<float[3],float[3][3]>> v1; v1.resize(100); for (size_t i = 0 ; i < 50 ; i++) { v1.template get<0>(i)[0] = i+1500; v1.template get<0>(i)[1] = i+2200; v1.template get<0>(i)[2] = i+2600; v1.template get<1>(i)[0][0] = i+6000; v1.template get<1>(i)[0][1] = i+7200; v1.template get<1>(i)[0][2] = i+8600; v1.template get<1>(i)[1][0] = i+9000; v1.template get<1>(i)[1][1] = i+10200; v1.template get<1>(i)[1][2] = i+11600; v1.template get<1>(i)[2][0] = i+12800; v1.template get<1>(i)[2][1] = i+22200; v1.template get<1>(i)[2][2] = i+23600; } v1.hostToDevice<0,1>(0,50); for (size_t i = 50 ; i < 100 ; i++) { v1.template get<0>(i)[0] = i+1500; v1.template get<0>(i)[1] = i+2200; v1.template get<0>(i)[2] = i+2600; v1.template get<1>(i)[0][0] = i+6000; v1.template get<1>(i)[0][1] = i+7200; v1.template get<1>(i)[0][2] = i+8600; v1.template get<1>(i)[1][0] = i+9000; v1.template get<1>(i)[1][1] = i+10200; v1.template get<1>(i)[1][2] = i+11600; v1.template get<1>(i)[2][0] = i+12800; v1.template get<1>(i)[2][1] = i+22200; v1.template get<1>(i)[2][2] = i+23600; } v1.hostToDevice<0,1>(50,99); v1.deviceToHost<0,1>(); for (size_t i = 0 ; i < 100 ; i++) { BOOST_REQUIRE_EQUAL(v1.template get<0>(i)[0],i+1500); BOOST_REQUIRE_EQUAL(v1.template get<0>(i)[1],i+2200); BOOST_REQUIRE_EQUAL(v1.template get<0>(i)[2],i+2600); BOOST_REQUIRE_EQUAL(v1.template get<1>(i)[0][0],i+6000); BOOST_REQUIRE_EQUAL(v1.template get<1>(i)[0][1],i+7200); BOOST_REQUIRE_EQUAL(v1.template get<1>(i)[0][2],i+8600); BOOST_REQUIRE_EQUAL(v1.template get<1>(i)[1][0],i+9000); BOOST_REQUIRE_EQUAL(v1.template get<1>(i)[1][1],i+10200); BOOST_REQUIRE_EQUAL(v1.template get<1>(i)[1][2],i+11600); BOOST_REQUIRE_EQUAL(v1.template get<1>(i)[2][0],i+12800); BOOST_REQUIRE_EQUAL(v1.template get<1>(i)[2][1],i+22200); BOOST_REQUIRE_EQUAL(v1.template get<1>(i)[2][2],i+23600); } } BOOST_AUTO_TEST_CASE( vector_cuda_copy ) { openfpm::vector_gpu<aggregate<float,float[3],float[3][3]>> v1; openfpm::vector_gpu<aggregate<float,float[3],float[3][3]>> v2; v1.resize(100); auto ite = v1.getIterator(); while (ite.isNext()) { auto p = ite.get(); v1.template get<0>(p) = p + 100; v1.template get<0>(p) = p + 2000; v1.template get<0>(p) = p + 3000; v1.template get<0>(p) = p + 4000; v1.template get<1>(p)[0] = p + 5000; v1.template get<1>(p)[1] = p + 6000; v1.template get<1>(p)[2] = p + 7000; v1.template get<2>(p)[0][0] = p + 8000; v1.template get<2>(p)[0][1] = p + 9000; v1.template get<2>(p)[0][2] = p + 10000; v1.template get<2>(p)[1][0] = p + 11000; v1.template get<2>(p)[1][1] = p + 12000; v1.template get<2>(p)[2][2] = p + 13000; v1.template get<2>(p)[2][0] = p + 14000; v1.template get<2>(p)[2][1] = p + 15000; v1.template get<2>(p)[2][2] = p + 16000; ++ite; } v1.hostToDevice<0,1,2>(); ite = v1.getIterator(); while (ite.isNext()) { auto p = ite.get(); v1.template get<0>(p) = p + 6100; v1.template get<0>(p) = p + 62000; v1.template get<0>(p) = p + 63000; v1.template get<0>(p) = p + 64000; v1.template get<1>(p)[0] = p + 65000; v1.template get<1>(p)[1] = p + 66000; v1.template get<1>(p)[2] = p + 67000; v1.template get<2>(p)[0][0] = p + 68000; v1.template get<2>(p)[0][1] = p + 69000; v1.template get<2>(p)[0][2] = p + 610000; v1.template get<2>(p)[1][0] = p + 611000; v1.template get<2>(p)[1][1] = p + 612000; v1.template get<2>(p)[2][2] = p + 613000; v1.template get<2>(p)[2][0] = p + 614000; v1.template get<2>(p)[2][1] = p + 615000; v1.template get<2>(p)[2][2] = p + 616000; ++ite; } v2 = v1; // first check the CPU bool match = true; ite = v2.getIterator(); while (ite.isNext()) { auto p = ite.get(); match = v2.template get<0>(p) == p + 6100; match = v2.template get<0>(p) == p + 62000; match = v2.template get<0>(p) == p + 63000; match = v2.template get<0>(p) == p + 64000; match = v2.template get<1>(p)[0] == p + 65000; match = v2.template get<1>(p)[1] == p + 66000; match = v2.template get<1>(p)[2] == p + 67000; match = v2.template get<2>(p)[0][0] == p + 68000; match = v2.template get<2>(p)[0][1] == p + 69000; match = v2.template get<2>(p)[0][2] == p + 610000; match = v2.template get<2>(p)[1][0] == p + 611000; match = v2.template get<2>(p)[1][1] == p + 612000; match = v2.template get<2>(p)[2][2] == p + 613000; match = v2.template get<2>(p)[2][0] == p + 614000; match = v2.template get<2>(p)[2][1] == p + 615000; match = v2.template get<2>(p)[2][2] == p + 616000; ++ite; } BOOST_REQUIRE_EQUAL(match,true); v2.deviceToHost<0,1,2>(); ite = v2.getIterator(); while (ite.isNext()) { auto p = ite.get(); match = v2.template get<0>(p) == p + 100; match = v2.template get<0>(p) == p + 2000; match = v2.template get<0>(p) == p + 3000; match = v2.template get<0>(p) == p + 4000; match = v2.template get<1>(p)[0] == p + 5000; match = v2.template get<1>(p)[1] == p + 6000; match = v2.template get<1>(p)[2] == p + 7000; match = v2.template get<2>(p)[0][0] == p + 8000; match = v2.template get<2>(p)[0][1] == p + 9000; match = v2.template get<2>(p)[0][2] == p + 10000; match = v2.template get<2>(p)[1][0] == p + 11000; match = v2.template get<2>(p)[1][1] == p + 12000; match = v2.template get<2>(p)[2][2] == p + 13000; match = v2.template get<2>(p)[2][0] == p + 14000; match = v2.template get<2>(p)[2][1] == p + 15000; match = v2.template get<2>(p)[2][2] == p + 16000; if (match == false) { std::cout << v2.template get<0>(p) << std::endl; } ++ite; } BOOST_REQUIRE_EQUAL(match,true); } BOOST_AUTO_TEST_SUITE_END()
2e5d5276d0e236ee2ae5e4dba0b65dc7c016d52c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> int main() { int devcount; hipGetDeviceCount(&devcount); printf("%i device(s) found...", devcount); return 0; }
2e5d5276d0e236ee2ae5e4dba0b65dc7c016d52c.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> int main() { int devcount; cudaGetDeviceCount(&devcount); printf("%i device(s) found...", devcount); return 0; }
9085188e787d2bcf4cf66f93169c6d38f19ff7b8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #if !defined CUDA_DISABLER #include "opencv2/core/cuda/common.hpp" #include "opencv2/core/cuda/border_interpolate.hpp" #include "opencv2/core/cuda/vec_traits.hpp" #include "opencv2/core/cuda/vec_math.hpp" #include "opencv2/core/cuda/saturate_cast.hpp" namespace cv { namespace cuda { namespace device { namespace imgproc { template <typename T> __global__ void pyrUp(const PtrStepSz<T> src, PtrStepSz<T> dst) { typedef typename TypeVec<float, VecTraits<T>::cn>::vec_type sum_t; const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; __shared__ sum_t s_srcPatch[10][10]; __shared__ sum_t s_dstPatch[20][16]; if (threadIdx.x < 10 && threadIdx.y < 10) { int srcx = static_cast<int>((blockIdx.x * blockDim.x) / 2 + threadIdx.x) - 1; int srcy = static_cast<int>((blockIdx.y * blockDim.y) / 2 + threadIdx.y) - 1; srcx = ::abs(srcx); srcx = ::min(src.cols - 1, srcx); srcy = ::abs(srcy); srcy = ::min(src.rows - 1, srcy); s_srcPatch[threadIdx.y][threadIdx.x] = saturate_cast<sum_t>(src(srcy, srcx)); } __syncthreads(); sum_t sum = VecTraits<sum_t>::all(0); const int evenFlag = static_cast<int>((threadIdx.x & 1) == 0); const int oddFlag = static_cast<int>((threadIdx.x & 1) != 0); const bool eveny = ((threadIdx.y & 1) == 0); const int tidx = threadIdx.x; if (eveny) { sum = sum + (evenFlag * 0.0625f) * s_srcPatch[1 + (threadIdx.y >> 1)][1 + ((tidx - 2) >> 1)]; sum = sum + ( oddFlag * 0.25f ) * s_srcPatch[1 + (threadIdx.y >> 1)][1 + ((tidx - 1) >> 1)]; sum = sum + (evenFlag * 0.375f ) * s_srcPatch[1 + (threadIdx.y >> 1)][1 + ((tidx ) >> 1)]; sum = sum + ( oddFlag * 0.25f ) * s_srcPatch[1 + (threadIdx.y >> 1)][1 + ((tidx + 1) >> 1)]; sum = sum + (evenFlag * 0.0625f) * s_srcPatch[1 + (threadIdx.y >> 1)][1 + ((tidx + 2) >> 1)]; } s_dstPatch[2 + threadIdx.y][threadIdx.x] = sum; if (threadIdx.y < 2) { sum = VecTraits<sum_t>::all(0); if (eveny) { sum = sum + (evenFlag * 0.0625f) * s_srcPatch[0][1 + ((tidx - 2) >> 1)]; sum = sum + ( oddFlag * 0.25f ) * s_srcPatch[0][1 + ((tidx - 1) >> 1)]; sum = sum + (evenFlag * 0.375f ) * s_srcPatch[0][1 + ((tidx ) >> 1)]; sum = sum + ( oddFlag * 0.25f ) * s_srcPatch[0][1 + ((tidx + 1) >> 1)]; sum = sum + (evenFlag * 0.0625f) * s_srcPatch[0][1 + ((tidx + 2) >> 1)]; } s_dstPatch[threadIdx.y][threadIdx.x] = sum; } if (threadIdx.y > 13) { sum = VecTraits<sum_t>::all(0); if (eveny) { sum = sum + (evenFlag * 0.0625f) * s_srcPatch[9][1 + ((tidx - 2) >> 1)]; sum = sum + ( oddFlag * 0.25f ) * s_srcPatch[9][1 + ((tidx - 1) >> 1)]; sum = sum + (evenFlag * 0.375f ) * s_srcPatch[9][1 + ((tidx ) >> 1)]; sum = sum + ( oddFlag * 0.25f ) * s_srcPatch[9][1 + ((tidx + 1) >> 1)]; sum = sum + (evenFlag * 0.0625f) * s_srcPatch[9][1 + ((tidx + 2) >> 1)]; } s_dstPatch[4 + threadIdx.y][threadIdx.x] = sum; } __syncthreads(); sum = VecTraits<sum_t>::all(0); const int tidy = threadIdx.y; sum = sum + 0.0625f * s_dstPatch[2 + tidy - 2][threadIdx.x]; sum = sum + 0.25f * s_dstPatch[2 + tidy - 1][threadIdx.x]; sum = sum + 0.375f * s_dstPatch[2 + tidy ][threadIdx.x]; sum = sum + 0.25f * s_dstPatch[2 + tidy + 1][threadIdx.x]; sum = sum + 0.0625f * s_dstPatch[2 + tidy + 2][threadIdx.x]; if (x < dst.cols && y < dst.rows) dst(y, x) = saturate_cast<T>(4.0f * sum); } template <typename T> void pyrUp_caller(PtrStepSz<T> src, PtrStepSz<T> dst, hipStream_t stream) { const dim3 block(16, 16); const dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y)); hipLaunchKernelGGL(( pyrUp), dim3(grid), dim3(block), 0, stream, src, dst); cudaSafeCall( hipGetLastError() ); if (stream == 0) cudaSafeCall( hipDeviceSynchronize() ); } template <typename T> void pyrUp_gpu(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream) { pyrUp_caller<T>(static_cast< PtrStepSz<T> >(src), static_cast< PtrStepSz<T> >(dst), stream); } template void pyrUp_gpu<uchar>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream); //template void pyrUp_gpu<uchar2>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream); template void pyrUp_gpu<uchar3>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream); template void pyrUp_gpu<uchar4>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream); //template void pyrUp_gpu<schar>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream); //template void pyrUp_gpu<char2>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream); //template void pyrUp_gpu<char3>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream); //template void pyrUp_gpu<char4>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream); template void pyrUp_gpu<ushort>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream); //template void pyrUp_gpu<ushort2>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream); template void pyrUp_gpu<ushort3>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream); template void pyrUp_gpu<ushort4>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream); template void pyrUp_gpu<short>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream); //template void pyrUp_gpu<short2>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream); template void pyrUp_gpu<short3>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream); template void pyrUp_gpu<short4>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream); //template void pyrUp_gpu<int>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream); //template void pyrUp_gpu<int2>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream); //template void pyrUp_gpu<int3>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream); //template void pyrUp_gpu<int4>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream); template void pyrUp_gpu<float>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream); //template void pyrUp_gpu<float2>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream); template void pyrUp_gpu<float3>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream); template void pyrUp_gpu<float4>(PtrStepSzb src, PtrStepSzb dst, hipStream_t stream); } // namespace imgproc }}} // namespace cv { namespace cuda { namespace cudev #endif /* CUDA_DISABLER */
9085188e787d2bcf4cf66f93169c6d38f19ff7b8.cu
/*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #if !defined CUDA_DISABLER #include "opencv2/core/cuda/common.hpp" #include "opencv2/core/cuda/border_interpolate.hpp" #include "opencv2/core/cuda/vec_traits.hpp" #include "opencv2/core/cuda/vec_math.hpp" #include "opencv2/core/cuda/saturate_cast.hpp" namespace cv { namespace cuda { namespace device { namespace imgproc { template <typename T> __global__ void pyrUp(const PtrStepSz<T> src, PtrStepSz<T> dst) { typedef typename TypeVec<float, VecTraits<T>::cn>::vec_type sum_t; const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; __shared__ sum_t s_srcPatch[10][10]; __shared__ sum_t s_dstPatch[20][16]; if (threadIdx.x < 10 && threadIdx.y < 10) { int srcx = static_cast<int>((blockIdx.x * blockDim.x) / 2 + threadIdx.x) - 1; int srcy = static_cast<int>((blockIdx.y * blockDim.y) / 2 + threadIdx.y) - 1; srcx = ::abs(srcx); srcx = ::min(src.cols - 1, srcx); srcy = ::abs(srcy); srcy = ::min(src.rows - 1, srcy); s_srcPatch[threadIdx.y][threadIdx.x] = saturate_cast<sum_t>(src(srcy, srcx)); } __syncthreads(); sum_t sum = VecTraits<sum_t>::all(0); const int evenFlag = static_cast<int>((threadIdx.x & 1) == 0); const int oddFlag = static_cast<int>((threadIdx.x & 1) != 0); const bool eveny = ((threadIdx.y & 1) == 0); const int tidx = threadIdx.x; if (eveny) { sum = sum + (evenFlag * 0.0625f) * s_srcPatch[1 + (threadIdx.y >> 1)][1 + ((tidx - 2) >> 1)]; sum = sum + ( oddFlag * 0.25f ) * s_srcPatch[1 + (threadIdx.y >> 1)][1 + ((tidx - 1) >> 1)]; sum = sum + (evenFlag * 0.375f ) * s_srcPatch[1 + (threadIdx.y >> 1)][1 + ((tidx ) >> 1)]; sum = sum + ( oddFlag * 0.25f ) * s_srcPatch[1 + (threadIdx.y >> 1)][1 + ((tidx + 1) >> 1)]; sum = sum + (evenFlag * 0.0625f) * s_srcPatch[1 + (threadIdx.y >> 1)][1 + ((tidx + 2) >> 1)]; } s_dstPatch[2 + threadIdx.y][threadIdx.x] = sum; if (threadIdx.y < 2) { sum = VecTraits<sum_t>::all(0); if (eveny) { sum = sum + (evenFlag * 0.0625f) * s_srcPatch[0][1 + ((tidx - 2) >> 1)]; sum = sum + ( oddFlag * 0.25f ) * s_srcPatch[0][1 + ((tidx - 1) >> 1)]; sum = sum + (evenFlag * 0.375f ) * s_srcPatch[0][1 + ((tidx ) >> 1)]; sum = sum + ( oddFlag * 0.25f ) * s_srcPatch[0][1 + ((tidx + 1) >> 1)]; sum = sum + (evenFlag * 0.0625f) * s_srcPatch[0][1 + ((tidx + 2) >> 1)]; } s_dstPatch[threadIdx.y][threadIdx.x] = sum; } if (threadIdx.y > 13) { sum = VecTraits<sum_t>::all(0); if (eveny) { sum = sum + (evenFlag * 0.0625f) * s_srcPatch[9][1 + ((tidx - 2) >> 1)]; sum = sum + ( oddFlag * 0.25f ) * s_srcPatch[9][1 + ((tidx - 1) >> 1)]; sum = sum + (evenFlag * 0.375f ) * s_srcPatch[9][1 + ((tidx ) >> 1)]; sum = sum + ( oddFlag * 0.25f ) * s_srcPatch[9][1 + ((tidx + 1) >> 1)]; sum = sum + (evenFlag * 0.0625f) * s_srcPatch[9][1 + ((tidx + 2) >> 1)]; } s_dstPatch[4 + threadIdx.y][threadIdx.x] = sum; } __syncthreads(); sum = VecTraits<sum_t>::all(0); const int tidy = threadIdx.y; sum = sum + 0.0625f * s_dstPatch[2 + tidy - 2][threadIdx.x]; sum = sum + 0.25f * s_dstPatch[2 + tidy - 1][threadIdx.x]; sum = sum + 0.375f * s_dstPatch[2 + tidy ][threadIdx.x]; sum = sum + 0.25f * s_dstPatch[2 + tidy + 1][threadIdx.x]; sum = sum + 0.0625f * s_dstPatch[2 + tidy + 2][threadIdx.x]; if (x < dst.cols && y < dst.rows) dst(y, x) = saturate_cast<T>(4.0f * sum); } template <typename T> void pyrUp_caller(PtrStepSz<T> src, PtrStepSz<T> dst, cudaStream_t stream) { const dim3 block(16, 16); const dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y)); pyrUp<<<grid, block, 0, stream>>>(src, dst); cudaSafeCall( cudaGetLastError() ); if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); } template <typename T> void pyrUp_gpu(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream) { pyrUp_caller<T>(static_cast< PtrStepSz<T> >(src), static_cast< PtrStepSz<T> >(dst), stream); } template void pyrUp_gpu<uchar>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream); //template void pyrUp_gpu<uchar2>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream); template void pyrUp_gpu<uchar3>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream); template void pyrUp_gpu<uchar4>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream); //template void pyrUp_gpu<schar>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream); //template void pyrUp_gpu<char2>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream); //template void pyrUp_gpu<char3>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream); //template void pyrUp_gpu<char4>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream); template void pyrUp_gpu<ushort>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream); //template void pyrUp_gpu<ushort2>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream); template void pyrUp_gpu<ushort3>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream); template void pyrUp_gpu<ushort4>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream); template void pyrUp_gpu<short>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream); //template void pyrUp_gpu<short2>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream); template void pyrUp_gpu<short3>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream); template void pyrUp_gpu<short4>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream); //template void pyrUp_gpu<int>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream); //template void pyrUp_gpu<int2>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream); //template void pyrUp_gpu<int3>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream); //template void pyrUp_gpu<int4>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream); template void pyrUp_gpu<float>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream); //template void pyrUp_gpu<float2>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream); template void pyrUp_gpu<float3>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream); template void pyrUp_gpu<float4>(PtrStepSzb src, PtrStepSzb dst, cudaStream_t stream); } // namespace imgproc }}} // namespace cv { namespace cuda { namespace cudev #endif /* CUDA_DISABLER */
af14c3f3f9bff3c18409bf6031fef37c040c79bf.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <paddle/fluid/memory/allocation/allocator.h> #include "hipcub/hipcub.hpp" #include "paddle/fluid/framework/mixed_vector.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/memory/memcpy.h" #include "paddle/fluid/operators/detection/bbox_util.h" #include "paddle/fluid/operators/detection/collect_fpn_proposals_op.h" #include "paddle/fluid/operators/gather.cu.h" #include "paddle/fluid/operators/math/concat_and_split.h" #include "paddle/fluid/operators/strided_memcpy.h" #include "paddle/fluid/platform/cuda_primitives.h" #include "paddle/fluid/platform/for_range.h" namespace paddle { namespace operators { using Tensor = framework::Tensor; using LoDTensor = framework::LoDTensor; static constexpr int kNumCUDAThreads = 64; static constexpr int kNumMaxinumNumBlocks = 4096; const int kBBoxSize = 4; static inline int NumBlocks(const int N) { return ::min((N + kNumCUDAThreads - 1) / kNumCUDAThreads, kNumMaxinumNumBlocks); } static __global__ void GetLengthLoD(const int nthreads, const int* batch_ids, int* length_lod) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (nthreads); i += blockDim.x * gridDim.x) { platform::CudaAtomicAdd(length_lod + batch_ids[i], 1); } } template <typename DeviceContext, typename T> class GPUCollectFpnProposalsOpKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { const auto roi_ins = ctx.MultiInput<LoDTensor>("MultiLevelRois"); const auto score_ins = ctx.MultiInput<LoDTensor>("MultiLevelScores"); auto fpn_rois = ctx.Output<LoDTensor>("FpnRois"); auto& dev_ctx = ctx.template device_context<DeviceContext>(); const int post_nms_topN = ctx.Attr<int>("post_nms_topN"); // concat inputs along axis = 0 int roi_offset = 0; int score_offset = 0; int total_roi_num = 0; for (size_t i = 0; i < roi_ins.size(); ++i) { total_roi_num += roi_ins[i]->dims()[0]; } int real_post_num = min(post_nms_topN, total_roi_num); fpn_rois->mutable_data<T>({real_post_num, kBBoxSize}, dev_ctx.GetPlace()); Tensor concat_rois; Tensor concat_scores; T* concat_rois_data = concat_rois.mutable_data<T>( {total_roi_num, kBBoxSize}, dev_ctx.GetPlace()); T* concat_scores_data = concat_scores.mutable_data<T>({total_roi_num, 1}, dev_ctx.GetPlace()); Tensor roi_batch_id_list; roi_batch_id_list.Resize({total_roi_num}); int* roi_batch_id_data = roi_batch_id_list.mutable_data<int>(platform::CPUPlace()); int index = 0; int lod_size; auto place = BOOST_GET_CONST(platform::CUDAPlace, dev_ctx.GetPlace()); for (size_t i = 0; i < roi_ins.size(); ++i) { auto roi_in = roi_ins[i]; auto score_in = score_ins[i]; auto roi_lod = roi_in->lod().back(); lod_size = roi_lod.size() - 1; for (size_t n = 0; n < lod_size; ++n) { for (size_t j = roi_lod[n]; j < roi_lod[n + 1]; ++j) { roi_batch_id_data[index++] = n; } } memory::Copy(place, concat_rois_data + roi_offset, place, roi_in->data<T>(), roi_in->numel() * sizeof(T), dev_ctx.stream()); memory::Copy(place, concat_scores_data + score_offset, place, score_in->data<T>(), score_in->numel() * sizeof(T), dev_ctx.stream()); roi_offset += roi_in->numel(); score_offset += score_in->numel(); } // copy batch id list to GPU Tensor roi_batch_id_list_gpu; framework::TensorCopy(roi_batch_id_list, dev_ctx.GetPlace(), &roi_batch_id_list_gpu); Tensor index_in_t; int* idx_in = index_in_t.mutable_data<int>({total_roi_num}, dev_ctx.GetPlace()); platform::ForRange<platform::CUDADeviceContext> for_range_total( dev_ctx, total_roi_num); for_range_total(RangeInitFunctor{0, 1, idx_in}); Tensor keys_out_t; T* keys_out = keys_out_t.mutable_data<T>({total_roi_num}, dev_ctx.GetPlace()); Tensor index_out_t; int* idx_out = index_out_t.mutable_data<int>({total_roi_num}, dev_ctx.GetPlace()); // Determine temporary device storage requirements size_t temp_storage_bytes = 0; hipcub::DeviceRadixSort::SortPairsDescending<T, int>( nullptr, temp_storage_bytes, concat_scores.data<T>(), keys_out, idx_in, idx_out, total_roi_num); // Allocate temporary storage auto d_temp_storage = memory::Alloc(place, temp_storage_bytes); // Run sorting operation // sort score to get corresponding index hipcub::DeviceRadixSort::SortPairsDescending<T, int>( d_temp_storage->ptr(), temp_storage_bytes, concat_scores.data<T>(), keys_out, idx_in, idx_out, total_roi_num); index_out_t.Resize({real_post_num}); Tensor sorted_rois; sorted_rois.mutable_data<T>({real_post_num, kBBoxSize}, dev_ctx.GetPlace()); Tensor sorted_batch_id; sorted_batch_id.mutable_data<int>({real_post_num}, dev_ctx.GetPlace()); GPUGather<T>(dev_ctx, concat_rois, index_out_t, &sorted_rois); GPUGather<int>(dev_ctx, roi_batch_id_list_gpu, index_out_t, &sorted_batch_id); Tensor batch_index_t; int* batch_idx_in = batch_index_t.mutable_data<int>({real_post_num}, dev_ctx.GetPlace()); platform::ForRange<platform::CUDADeviceContext> for_range_post( dev_ctx, real_post_num); for_range_post(RangeInitFunctor{0, 1, batch_idx_in}); Tensor out_id_t; int* out_id_data = out_id_t.mutable_data<int>({real_post_num}, dev_ctx.GetPlace()); // Determine temporary device storage requirements temp_storage_bytes = 0; hipcub::DeviceRadixSort::SortPairs<int, int>( nullptr, temp_storage_bytes, sorted_batch_id.data<int>(), out_id_data, batch_idx_in, index_out_t.data<int>(), real_post_num); // Allocate temporary storage d_temp_storage = memory::Alloc(place, temp_storage_bytes); // Run sorting operation // sort batch_id to get corresponding index hipcub::DeviceRadixSort::SortPairs<int, int>( d_temp_storage->ptr(), temp_storage_bytes, sorted_batch_id.data<int>(), out_id_data, batch_idx_in, index_out_t.data<int>(), real_post_num); GPUGather<T>(dev_ctx, sorted_rois, index_out_t, fpn_rois); Tensor length_lod; int* length_lod_data = length_lod.mutable_data<int>({lod_size}, dev_ctx.GetPlace()); math::SetConstant<platform::CUDADeviceContext, int> set_zero; set_zero(dev_ctx, &length_lod, static_cast<int>(0)); int blocks = NumBlocks(real_post_num); int threads = kNumCUDAThreads; // get length-based lod by batch ids hipLaunchKernelGGL(( GetLengthLoD), dim3(blocks), dim3(threads), 0, 0, real_post_num, out_id_data, length_lod_data); std::vector<int> length_lod_cpu(lod_size); memory::Copy(platform::CPUPlace(), length_lod_cpu.data(), place, length_lod_data, sizeof(int) * lod_size, dev_ctx.stream()); dev_ctx.Wait(); std::vector<size_t> offset(1, 0); for (int i = 0; i < lod_size; ++i) { offset.emplace_back(offset.back() + length_lod_cpu[i]); } framework::LoD lod; lod.emplace_back(offset); fpn_rois->set_lod(lod); } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( collect_fpn_proposals, ops::GPUCollectFpnProposalsOpKernel<paddle::platform::CUDADeviceContext, float>, ops::GPUCollectFpnProposalsOpKernel<paddle::platform::CUDADeviceContext, double>);
af14c3f3f9bff3c18409bf6031fef37c040c79bf.cu
/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <paddle/fluid/memory/allocation/allocator.h> #include "cub/cub.cuh" #include "paddle/fluid/framework/mixed_vector.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/memory/memcpy.h" #include "paddle/fluid/operators/detection/bbox_util.h" #include "paddle/fluid/operators/detection/collect_fpn_proposals_op.h" #include "paddle/fluid/operators/gather.cu.h" #include "paddle/fluid/operators/math/concat_and_split.h" #include "paddle/fluid/operators/strided_memcpy.h" #include "paddle/fluid/platform/cuda_primitives.h" #include "paddle/fluid/platform/for_range.h" namespace paddle { namespace operators { using Tensor = framework::Tensor; using LoDTensor = framework::LoDTensor; static constexpr int kNumCUDAThreads = 64; static constexpr int kNumMaxinumNumBlocks = 4096; const int kBBoxSize = 4; static inline int NumBlocks(const int N) { return std::min((N + kNumCUDAThreads - 1) / kNumCUDAThreads, kNumMaxinumNumBlocks); } static __global__ void GetLengthLoD(const int nthreads, const int* batch_ids, int* length_lod) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (nthreads); i += blockDim.x * gridDim.x) { platform::CudaAtomicAdd(length_lod + batch_ids[i], 1); } } template <typename DeviceContext, typename T> class GPUCollectFpnProposalsOpKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { const auto roi_ins = ctx.MultiInput<LoDTensor>("MultiLevelRois"); const auto score_ins = ctx.MultiInput<LoDTensor>("MultiLevelScores"); auto fpn_rois = ctx.Output<LoDTensor>("FpnRois"); auto& dev_ctx = ctx.template device_context<DeviceContext>(); const int post_nms_topN = ctx.Attr<int>("post_nms_topN"); // concat inputs along axis = 0 int roi_offset = 0; int score_offset = 0; int total_roi_num = 0; for (size_t i = 0; i < roi_ins.size(); ++i) { total_roi_num += roi_ins[i]->dims()[0]; } int real_post_num = min(post_nms_topN, total_roi_num); fpn_rois->mutable_data<T>({real_post_num, kBBoxSize}, dev_ctx.GetPlace()); Tensor concat_rois; Tensor concat_scores; T* concat_rois_data = concat_rois.mutable_data<T>( {total_roi_num, kBBoxSize}, dev_ctx.GetPlace()); T* concat_scores_data = concat_scores.mutable_data<T>({total_roi_num, 1}, dev_ctx.GetPlace()); Tensor roi_batch_id_list; roi_batch_id_list.Resize({total_roi_num}); int* roi_batch_id_data = roi_batch_id_list.mutable_data<int>(platform::CPUPlace()); int index = 0; int lod_size; auto place = BOOST_GET_CONST(platform::CUDAPlace, dev_ctx.GetPlace()); for (size_t i = 0; i < roi_ins.size(); ++i) { auto roi_in = roi_ins[i]; auto score_in = score_ins[i]; auto roi_lod = roi_in->lod().back(); lod_size = roi_lod.size() - 1; for (size_t n = 0; n < lod_size; ++n) { for (size_t j = roi_lod[n]; j < roi_lod[n + 1]; ++j) { roi_batch_id_data[index++] = n; } } memory::Copy(place, concat_rois_data + roi_offset, place, roi_in->data<T>(), roi_in->numel() * sizeof(T), dev_ctx.stream()); memory::Copy(place, concat_scores_data + score_offset, place, score_in->data<T>(), score_in->numel() * sizeof(T), dev_ctx.stream()); roi_offset += roi_in->numel(); score_offset += score_in->numel(); } // copy batch id list to GPU Tensor roi_batch_id_list_gpu; framework::TensorCopy(roi_batch_id_list, dev_ctx.GetPlace(), &roi_batch_id_list_gpu); Tensor index_in_t; int* idx_in = index_in_t.mutable_data<int>({total_roi_num}, dev_ctx.GetPlace()); platform::ForRange<platform::CUDADeviceContext> for_range_total( dev_ctx, total_roi_num); for_range_total(RangeInitFunctor{0, 1, idx_in}); Tensor keys_out_t; T* keys_out = keys_out_t.mutable_data<T>({total_roi_num}, dev_ctx.GetPlace()); Tensor index_out_t; int* idx_out = index_out_t.mutable_data<int>({total_roi_num}, dev_ctx.GetPlace()); // Determine temporary device storage requirements size_t temp_storage_bytes = 0; cub::DeviceRadixSort::SortPairsDescending<T, int>( nullptr, temp_storage_bytes, concat_scores.data<T>(), keys_out, idx_in, idx_out, total_roi_num); // Allocate temporary storage auto d_temp_storage = memory::Alloc(place, temp_storage_bytes); // Run sorting operation // sort score to get corresponding index cub::DeviceRadixSort::SortPairsDescending<T, int>( d_temp_storage->ptr(), temp_storage_bytes, concat_scores.data<T>(), keys_out, idx_in, idx_out, total_roi_num); index_out_t.Resize({real_post_num}); Tensor sorted_rois; sorted_rois.mutable_data<T>({real_post_num, kBBoxSize}, dev_ctx.GetPlace()); Tensor sorted_batch_id; sorted_batch_id.mutable_data<int>({real_post_num}, dev_ctx.GetPlace()); GPUGather<T>(dev_ctx, concat_rois, index_out_t, &sorted_rois); GPUGather<int>(dev_ctx, roi_batch_id_list_gpu, index_out_t, &sorted_batch_id); Tensor batch_index_t; int* batch_idx_in = batch_index_t.mutable_data<int>({real_post_num}, dev_ctx.GetPlace()); platform::ForRange<platform::CUDADeviceContext> for_range_post( dev_ctx, real_post_num); for_range_post(RangeInitFunctor{0, 1, batch_idx_in}); Tensor out_id_t; int* out_id_data = out_id_t.mutable_data<int>({real_post_num}, dev_ctx.GetPlace()); // Determine temporary device storage requirements temp_storage_bytes = 0; cub::DeviceRadixSort::SortPairs<int, int>( nullptr, temp_storage_bytes, sorted_batch_id.data<int>(), out_id_data, batch_idx_in, index_out_t.data<int>(), real_post_num); // Allocate temporary storage d_temp_storage = memory::Alloc(place, temp_storage_bytes); // Run sorting operation // sort batch_id to get corresponding index cub::DeviceRadixSort::SortPairs<int, int>( d_temp_storage->ptr(), temp_storage_bytes, sorted_batch_id.data<int>(), out_id_data, batch_idx_in, index_out_t.data<int>(), real_post_num); GPUGather<T>(dev_ctx, sorted_rois, index_out_t, fpn_rois); Tensor length_lod; int* length_lod_data = length_lod.mutable_data<int>({lod_size}, dev_ctx.GetPlace()); math::SetConstant<platform::CUDADeviceContext, int> set_zero; set_zero(dev_ctx, &length_lod, static_cast<int>(0)); int blocks = NumBlocks(real_post_num); int threads = kNumCUDAThreads; // get length-based lod by batch ids GetLengthLoD<<<blocks, threads>>>(real_post_num, out_id_data, length_lod_data); std::vector<int> length_lod_cpu(lod_size); memory::Copy(platform::CPUPlace(), length_lod_cpu.data(), place, length_lod_data, sizeof(int) * lod_size, dev_ctx.stream()); dev_ctx.Wait(); std::vector<size_t> offset(1, 0); for (int i = 0; i < lod_size; ++i) { offset.emplace_back(offset.back() + length_lod_cpu[i]); } framework::LoD lod; lod.emplace_back(offset); fpn_rois->set_lod(lod); } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( collect_fpn_proposals, ops::GPUCollectFpnProposalsOpKernel<paddle::platform::CUDADeviceContext, float>, ops::GPUCollectFpnProposalsOpKernel<paddle::platform::CUDADeviceContext, double>);
700b3b6b1977d200a6e66ba9b8dae2020977388e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by ops.py // __constant__ int xdim0_revert_kernel; int xdim0_revert_kernel_h = -1; __constant__ int ydim0_revert_kernel; int ydim0_revert_kernel_h = -1; __constant__ int xdim1_revert_kernel; int xdim1_revert_kernel_h = -1; __constant__ int ydim1_revert_kernel; int ydim1_revert_kernel_h = -1; __constant__ int xdim2_revert_kernel; int xdim2_revert_kernel_h = -1; __constant__ int ydim2_revert_kernel; int ydim2_revert_kernel_h = -1; __constant__ int xdim3_revert_kernel; int xdim3_revert_kernel_h = -1; __constant__ int ydim3_revert_kernel; int ydim3_revert_kernel_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #define OPS_ACC0(x,y,z) (x+xdim0_revert_kernel*(y)+xdim0_revert_kernel*ydim0_revert_kernel*(z)) #define OPS_ACC1(x,y,z) (x+xdim1_revert_kernel*(y)+xdim1_revert_kernel*ydim1_revert_kernel*(z)) #define OPS_ACC2(x,y,z) (x+xdim2_revert_kernel*(y)+xdim2_revert_kernel*ydim2_revert_kernel*(z)) #define OPS_ACC3(x,y,z) (x+xdim3_revert_kernel*(y)+xdim3_revert_kernel*ydim3_revert_kernel*(z)) //user function __device__ void revert_kernel_gpu( const double *density0, double *density1, const double *energy0, double *energy1) { density1[OPS_ACC1(0,0,0)] = density0[OPS_ACC0(0,0,0)]; energy1[OPS_ACC3(0,0,0)] = energy0[OPS_ACC2(0,0,0)]; } #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 __global__ void ops_revert_kernel( const double* __restrict arg0, double* __restrict arg1, const double* __restrict arg2, double* __restrict arg3, int size0, int size1, int size2 ){ int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_revert_kernel + idx_z * 1*1 * xdim0_revert_kernel * ydim0_revert_kernel; arg1 += idx_x * 1*1 + idx_y * 1*1 * xdim1_revert_kernel + idx_z * 1*1 * xdim1_revert_kernel * ydim1_revert_kernel; arg2 += idx_x * 1*1 + idx_y * 1*1 * xdim2_revert_kernel + idx_z * 1*1 * xdim2_revert_kernel * ydim2_revert_kernel; arg3 += idx_x * 1*1 + idx_y * 1*1 * xdim3_revert_kernel + idx_z * 1*1 * xdim3_revert_kernel * ydim3_revert_kernel; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { revert_kernel_gpu(arg0, arg1, arg2, arg3); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_revert_kernel(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3) { #else void ops_par_loop_revert_kernel_execute(ops_kernel_descriptor *desc) { int dim = desc->dim; int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; ops_arg arg2 = desc->args[2]; ops_arg arg3 = desc->args[3]; #endif //Timing double t1,t2,c1,c2; ops_arg args[4] = { arg0, arg1, arg2, arg3}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args,4,range,104)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(104,"revert_kernel"); OPS_kernels[104].count++; ops_timers_core(&c1,&t1); } //compute locally allocated range for the sub-block int start[3]; int end[3]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for ( int n=0; n<3; n++ ){ start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n]; if (start[n] >= range[2*n]) { start[n] = 0; } else { start[n] = range[2*n] - start[n]; } if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n]; if (end[n] >= range[2*n+1]) { end[n] = range[2*n+1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n])) end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]); } #else for ( int n=0; n<3; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int z_size = MAX(0,end[2]-start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; int xdim2 = args[2].dat->size[0]; int ydim2 = args[2].dat->size[1]; int xdim3 = args[3].dat->size[0]; int ydim3 = args[3].dat->size[1]; if (xdim0 != xdim0_revert_kernel_h || ydim0 != ydim0_revert_kernel_h || xdim1 != xdim1_revert_kernel_h || ydim1 != ydim1_revert_kernel_h || xdim2 != xdim2_revert_kernel_h || ydim2 != ydim2_revert_kernel_h || xdim3 != xdim3_revert_kernel_h || ydim3 != ydim3_revert_kernel_h) { hipMemcpyToSymbol( xdim0_revert_kernel, &xdim0, sizeof(int) ); xdim0_revert_kernel_h = xdim0; hipMemcpyToSymbol( ydim0_revert_kernel, &ydim0, sizeof(int) ); ydim0_revert_kernel_h = ydim0; hipMemcpyToSymbol( xdim1_revert_kernel, &xdim1, sizeof(int) ); xdim1_revert_kernel_h = xdim1; hipMemcpyToSymbol( ydim1_revert_kernel, &ydim1, sizeof(int) ); ydim1_revert_kernel_h = ydim1; hipMemcpyToSymbol( xdim2_revert_kernel, &xdim2, sizeof(int) ); xdim2_revert_kernel_h = xdim2; hipMemcpyToSymbol( ydim2_revert_kernel, &ydim2, sizeof(int) ); ydim2_revert_kernel_h = ydim2; hipMemcpyToSymbol( xdim3_revert_kernel, &xdim3, sizeof(int) ); xdim3_revert_kernel_h = xdim3; hipMemcpyToSymbol( ydim3_revert_kernel, &ydim3, sizeof(int) ); ydim3_revert_kernel_h = ydim3; } dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1); dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size); int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size); char *p_a[4]; //set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); base0 = base0+ dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]); base1 = base1+ dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2]); p_a[1] = (char *)args[1].data_d + base1; int base2 = args[2].dat->base_offset + dat2 * 1 * (start[0] * args[2].stencil->stride[0]); base2 = base2+ dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1]); base2 = base2+ dat2 * args[2].dat->size[0] * args[2].dat->size[1] * (start[2] * args[2].stencil->stride[2]); p_a[2] = (char *)args[2].data_d + base2; int base3 = args[3].dat->base_offset + dat3 * 1 * (start[0] * args[3].stencil->stride[0]); base3 = base3+ dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1]); base3 = base3+ dat3 * args[3].dat->size[0] * args[3].dat->size[1] * (start[2] * args[3].stencil->stride[2]); p_a[3] = (char *)args[3].data_d + base3; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 4); ops_halo_exchanges(args,4,range); #endif if (OPS_diags > 1) { ops_timers_core(&c2,&t2); OPS_kernels[104].mpi_time += t2-t1; } //call kernel wrapper function, passing in pointers to data if (x_size > 0 && y_size > 0 && z_size > 0) hipLaunchKernelGGL(( ops_revert_kernel), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3],x_size, y_size, z_size); cutilSafeCall(hipGetLastError()); if (OPS_diags>1) { cutilSafeCall(hipDeviceSynchronize()); ops_timers_core(&c1,&t1); OPS_kernels[104].time += t1-t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 4); ops_set_halo_dirtybit3(&args[1],range); ops_set_halo_dirtybit3(&args[3],range); #endif if (OPS_diags > 1) { //Update kernel record ops_timers_core(&c2,&t2); OPS_kernels[104].mpi_time += t2-t1; OPS_kernels[104].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[104].transfer += ops_compute_transfer(dim, start, end, &arg1); OPS_kernels[104].transfer += ops_compute_transfer(dim, start, end, &arg2); OPS_kernels[104].transfer += ops_compute_transfer(dim, start, end, &arg3); } } #ifdef OPS_LAZY void ops_par_loop_revert_kernel(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 104; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 104; for ( int i=0; i<6; i++ ){ desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 4; desc->args = (ops_arg*)malloc(4*sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->args[2] = arg2; desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index; desc->args[3] = arg3; desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index; desc->function = ops_par_loop_revert_kernel_execute; if (OPS_diags > 1) { ops_timing_realloc(104,"revert_kernel"); } ops_enqueue_kernel(desc); } #endif
700b3b6b1977d200a6e66ba9b8dae2020977388e.cu
// // auto-generated by ops.py // __constant__ int xdim0_revert_kernel; int xdim0_revert_kernel_h = -1; __constant__ int ydim0_revert_kernel; int ydim0_revert_kernel_h = -1; __constant__ int xdim1_revert_kernel; int xdim1_revert_kernel_h = -1; __constant__ int ydim1_revert_kernel; int ydim1_revert_kernel_h = -1; __constant__ int xdim2_revert_kernel; int xdim2_revert_kernel_h = -1; __constant__ int ydim2_revert_kernel; int ydim2_revert_kernel_h = -1; __constant__ int xdim3_revert_kernel; int xdim3_revert_kernel_h = -1; __constant__ int ydim3_revert_kernel; int ydim3_revert_kernel_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #define OPS_ACC0(x,y,z) (x+xdim0_revert_kernel*(y)+xdim0_revert_kernel*ydim0_revert_kernel*(z)) #define OPS_ACC1(x,y,z) (x+xdim1_revert_kernel*(y)+xdim1_revert_kernel*ydim1_revert_kernel*(z)) #define OPS_ACC2(x,y,z) (x+xdim2_revert_kernel*(y)+xdim2_revert_kernel*ydim2_revert_kernel*(z)) #define OPS_ACC3(x,y,z) (x+xdim3_revert_kernel*(y)+xdim3_revert_kernel*ydim3_revert_kernel*(z)) //user function __device__ void revert_kernel_gpu( const double *density0, double *density1, const double *energy0, double *energy1) { density1[OPS_ACC1(0,0,0)] = density0[OPS_ACC0(0,0,0)]; energy1[OPS_ACC3(0,0,0)] = energy0[OPS_ACC2(0,0,0)]; } #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 __global__ void ops_revert_kernel( const double* __restrict arg0, double* __restrict arg1, const double* __restrict arg2, double* __restrict arg3, int size0, int size1, int size2 ){ int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_revert_kernel + idx_z * 1*1 * xdim0_revert_kernel * ydim0_revert_kernel; arg1 += idx_x * 1*1 + idx_y * 1*1 * xdim1_revert_kernel + idx_z * 1*1 * xdim1_revert_kernel * ydim1_revert_kernel; arg2 += idx_x * 1*1 + idx_y * 1*1 * xdim2_revert_kernel + idx_z * 1*1 * xdim2_revert_kernel * ydim2_revert_kernel; arg3 += idx_x * 1*1 + idx_y * 1*1 * xdim3_revert_kernel + idx_z * 1*1 * xdim3_revert_kernel * ydim3_revert_kernel; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { revert_kernel_gpu(arg0, arg1, arg2, arg3); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_revert_kernel(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3) { #else void ops_par_loop_revert_kernel_execute(ops_kernel_descriptor *desc) { int dim = desc->dim; int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; ops_arg arg2 = desc->args[2]; ops_arg arg3 = desc->args[3]; #endif //Timing double t1,t2,c1,c2; ops_arg args[4] = { arg0, arg1, arg2, arg3}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args,4,range,104)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(104,"revert_kernel"); OPS_kernels[104].count++; ops_timers_core(&c1,&t1); } //compute locally allocated range for the sub-block int start[3]; int end[3]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for ( int n=0; n<3; n++ ){ start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n]; if (start[n] >= range[2*n]) { start[n] = 0; } else { start[n] = range[2*n] - start[n]; } if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n]; if (end[n] >= range[2*n+1]) { end[n] = range[2*n+1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n])) end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]); } #else for ( int n=0; n<3; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int z_size = MAX(0,end[2]-start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; int xdim2 = args[2].dat->size[0]; int ydim2 = args[2].dat->size[1]; int xdim3 = args[3].dat->size[0]; int ydim3 = args[3].dat->size[1]; if (xdim0 != xdim0_revert_kernel_h || ydim0 != ydim0_revert_kernel_h || xdim1 != xdim1_revert_kernel_h || ydim1 != ydim1_revert_kernel_h || xdim2 != xdim2_revert_kernel_h || ydim2 != ydim2_revert_kernel_h || xdim3 != xdim3_revert_kernel_h || ydim3 != ydim3_revert_kernel_h) { cudaMemcpyToSymbol( xdim0_revert_kernel, &xdim0, sizeof(int) ); xdim0_revert_kernel_h = xdim0; cudaMemcpyToSymbol( ydim0_revert_kernel, &ydim0, sizeof(int) ); ydim0_revert_kernel_h = ydim0; cudaMemcpyToSymbol( xdim1_revert_kernel, &xdim1, sizeof(int) ); xdim1_revert_kernel_h = xdim1; cudaMemcpyToSymbol( ydim1_revert_kernel, &ydim1, sizeof(int) ); ydim1_revert_kernel_h = ydim1; cudaMemcpyToSymbol( xdim2_revert_kernel, &xdim2, sizeof(int) ); xdim2_revert_kernel_h = xdim2; cudaMemcpyToSymbol( ydim2_revert_kernel, &ydim2, sizeof(int) ); ydim2_revert_kernel_h = ydim2; cudaMemcpyToSymbol( xdim3_revert_kernel, &xdim3, sizeof(int) ); xdim3_revert_kernel_h = xdim3; cudaMemcpyToSymbol( ydim3_revert_kernel, &ydim3, sizeof(int) ); ydim3_revert_kernel_h = ydim3; } dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1); dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size); int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size); char *p_a[4]; //set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); base0 = base0+ dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]); base1 = base1+ dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2]); p_a[1] = (char *)args[1].data_d + base1; int base2 = args[2].dat->base_offset + dat2 * 1 * (start[0] * args[2].stencil->stride[0]); base2 = base2+ dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1]); base2 = base2+ dat2 * args[2].dat->size[0] * args[2].dat->size[1] * (start[2] * args[2].stencil->stride[2]); p_a[2] = (char *)args[2].data_d + base2; int base3 = args[3].dat->base_offset + dat3 * 1 * (start[0] * args[3].stencil->stride[0]); base3 = base3+ dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1]); base3 = base3+ dat3 * args[3].dat->size[0] * args[3].dat->size[1] * (start[2] * args[3].stencil->stride[2]); p_a[3] = (char *)args[3].data_d + base3; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 4); ops_halo_exchanges(args,4,range); #endif if (OPS_diags > 1) { ops_timers_core(&c2,&t2); OPS_kernels[104].mpi_time += t2-t1; } //call kernel wrapper function, passing in pointers to data if (x_size > 0 && y_size > 0 && z_size > 0) ops_revert_kernel<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3],x_size, y_size, z_size); cutilSafeCall(cudaGetLastError()); if (OPS_diags>1) { cutilSafeCall(cudaDeviceSynchronize()); ops_timers_core(&c1,&t1); OPS_kernels[104].time += t1-t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 4); ops_set_halo_dirtybit3(&args[1],range); ops_set_halo_dirtybit3(&args[3],range); #endif if (OPS_diags > 1) { //Update kernel record ops_timers_core(&c2,&t2); OPS_kernels[104].mpi_time += t2-t1; OPS_kernels[104].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[104].transfer += ops_compute_transfer(dim, start, end, &arg1); OPS_kernels[104].transfer += ops_compute_transfer(dim, start, end, &arg2); OPS_kernels[104].transfer += ops_compute_transfer(dim, start, end, &arg3); } } #ifdef OPS_LAZY void ops_par_loop_revert_kernel(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 104; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 104; for ( int i=0; i<6; i++ ){ desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 4; desc->args = (ops_arg*)malloc(4*sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->args[2] = arg2; desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index; desc->args[3] = arg3; desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index; desc->function = ops_par_loop_revert_kernel_execute; if (OPS_diags > 1) { ops_timing_realloc(104,"revert_kernel"); } ops_enqueue_kernel(desc); } #endif
5f17ffabac94cae8897d161ceb1fd94692f0b0bf.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /**************************************************************************** ** ** This software was developed by Javier Gonzalez on Feb 2018 ** ** This class controls a set of GPUs to perform digital image correlation ** ****************************************************************************/ #include "cuda_class.cuh" CudaClass::CudaClass() { #if DEBUG_CUDA printf("CudaClass::CudaClass\n"); #endif hipHostMalloc((void **)&pinnedChi, sizeof(float)); hipHostMalloc((void **)&lastGoodChi, sizeof(float)); } CudaClass::~CudaClass() { #if DEBUG_CUDA printf("CudaClass::~CudaClass\n"); #endif if (correlationStream) { hipError_t err = hipSuccess; err = hipStreamDestroy(correlationStream); if (err != hipSuccess) { printf("Failed to Destroy correlationStream (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } } hipHostFree(pinnedChi); hipHostFree(lastGoodChi); } int CudaClass::initialize() { // This method returns the number of devices to MainApp, and MainApp // will disable the GPU mode if the number of devices is 0. If the GPU // mode is disabled, there will be no more calls to this class. // I decided not to include an if statement in every method of this // class to check on this. if (initialize_devices) { // Initialize devices only once initialize_devices = false; // Calls every GPU to reduce initial latency ( control when it happens ) hipError_t err = hipGetDeviceCount(&deviceCount); if (err != hipSuccess) { printf("Failed to count GPUs (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } #if _WINDOWS deviceCount = 1; printf("CudaClass::initialize Using only %d GPU on WINDOWS compile\n", deviceCount); #endif devicesAvailable = deviceCount; err = hipStreamCreate(&correlationStream); if (err != hipSuccess) { printf("Failed to create correlationStream (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } } return devicesAvailable; } void CudaClass::set_deviceCount(int deviceCount_in) { deviceCount = deviceCount_in; #if DEBUG_CUDA printf("CUDA: set_deviceCount: Using %d devices\n", deviceCount); #endif } void CudaClass::set_fitting_model(fittingModelEnum fittingModel_in) { fittingModel = fittingModel_in; number_of_model_parameters = ModelClass::get_number_of_model_parameters(fittingModel); } void CudaClass::set_interpolation_model( interpolationModelEnum interpolationModel_in) { interpolationModel = interpolationModel_in; } void CudaClass::set_max_iters(int maximum_iterations_) { maximum_iterations = maximum_iterations_; } void CudaClass::set_precision(float required_precision_) { required_precision = required_precision_; } CorrelationResult *CudaClass::correlate(int iSector, float *initial_guess_, frame_results &results) { #if DEBUG_NEWTON_RAPHSON_CUDA printf("CudaClass::correlate\n"); printf("CudaClass::correlate: Initial_guess:\n"); int p = ModelClass::get_number_of_model_parameters(fittingModel); for (int p = 0; p < number_of_model_parameters; ++p) { printf("%14.5e\t", initial_guess_[p]); } printf("\n"); #endif errorEnum errorCode = error_none; float min_lambda = 1e-9f; float max_lambda = 1e9f; int totalIterations = 0; // Upload initialGuess for level0 to the GPU via the pyramid/polygon object // initialGuess -> lastGoodParameters , tentativeParameters , savedParameters cudaPyramidManager.initializeParametersLevel0(iSector, initial_guess_); int pyramid_start = cudaPyramidManager.getPyramidStart(); int pyramid_step = cudaPyramidManager.getPyramidStep(); int pyramid_stop = cudaPyramidManager.getPyramidStop(); for (int pyramidLevel = pyramid_stop; pyramidLevel >= pyramid_start; pyramidLevel -= pyramid_step) { // Put a marker on the nvvp CUDA profiler roctxRangePushA("CudaClass::correlate level"); float lambda = 0.0001f; // Scale lastGood for this pyramid level. tentative and savedParameters are // copied from lastGood cudaPyramidManager.scaleParametersForLevel(iSector, pyramidLevel); //########################################################################################### // // Find initial chi from the initial guess. Make that the lastGoodChi and // start the iteration // // lastGoodParameters -> NR -> tentativeParameters AND lastGoodChi = chi( // lastGoodParameters ) findNewParameters(iSector, pyramidLevel, parType_lastGood, parType_saved, lastGoodChi, lambda); #if DEBUG_NEWTON_RAPHSON_CUDA printf("CudaClass::correlate Initial chi is %f\n", *lastGoodChi); #endif bool useSavedParameters = true; //########################################################################################### // // Iteration to find correlation coefficients // for (int iteration = 1; iteration <= maximum_iterations + 1; ++iteration) { #if DEBUG_NEWTON_RAPHSON_CUDA printf("\n\nCudaClass::correlate Starting iteration %d, pyramid level " "%d, sector = %d\n", iteration, pyramidLevel, iSector); #endif if (iteration > maximum_iterations || lambda >= max_lambda) { errorCode = error_correlation_max_iters_reached; break; } else { totalIterations++; } //########################################################################################### // // Find tentative parameter set - We are saving parameters from the last // chi computation that we // use if the corresponding chi was better than the "lastGoodChi". // This happens most iterations and saves time. However, if the // corresponding chi is // worst, then we recompute the parameters. // if (useSavedParameters) { #if DEBUG_NEWTON_RAPHSON_CUDA printf("CudaClass::correlate Using saved correlation coefficients\n"); #endif cudaPyramidManager.transferParameters(iSector, parType_saved, parType_tentative); } else { #if DEBUG_NEWTON_RAPHSON_CUDA printf("CudaClass::correlate Finding next correlation coefficients the " "hard way\n"); #endif // Recompute tentativeParameters from the lastGoodParameters using a // larger lambda findNewParameters(iSector, pyramidLevel, parType_lastGood, parType_tentative, pinnedChi, lambda); } // else - finding new parameter set //########################################################################################### // // Computation of CHI FOR THE TENTATIVE set of model parameters // Compute chi associated with the new tentativeParameters findNewParameters(iSector, pyramidLevel, parType_tentative, parType_saved, pinnedChi, lambda); #if DEBUG_NEWTON_RAPHSON_CUDA printf("CudaClass::correlate New chi is %f\n", pinnedChi); #endif // Compares delta chi based on lastGoodChi(last_good_model_parameters) // and // chi(model_parameters). However, it does not act on this info until // the // parameters are updated // Make sure the last chi is already available hipStreamSynchronize(correlationStream); float delta_chi = std::abs((*lastGoodChi - *pinnedChi) / (fmaxf(*lastGoodChi, *pinnedChi) + required_precision)); #if DEBUG_NEWTON_RAPHSON_CUDA printf("CUDA iteration = %4d: ", iteration); printf("last good chi: %12.4e: ", *lastGoodChi); printf("new chi: %12.4e delta chi: %12.4e lambda: %12.4e " "required_precission = %f\n", *pinnedChi, delta_chi, lambda, required_precision); #endif if (*pinnedChi <= *lastGoodChi) // converging step - record the new "best" parameters { *lastGoodChi = *pinnedChi; lambda = fmaxf(lambda * 0.4f, min_lambda); // tentativeParameters (last result) -> lastGoodParameters cudaPyramidManager.transferParameters(iSector, parType_tentative, parType_lastGood); useSavedParameters = true; #if DEBUG_NEWTON_RAPHSON_CUDA printf(" # CONVERGING\n"); #endif } else // diverging step - increase lambda and revert to last "good" set // of parameters { lambda = fminf(lambda * 10.0f, max_lambda); useSavedParameters = false; #if DEBUG_NEWTON_RAPHSON_CUDA printf("\n"); #endif } // Was convergence reached? if (delta_chi < required_precision) { #if DEBUG_NEWTON_RAPHSON_CUDA printf("CudaClass::correlate Convergence reached in %d iterations at a " "delta_chi = %6f\n", totalIterations, delta_chi); #endif break; } } // iterations loop roctxRangePop(); } // Bring center, number of points and error status from the GPU to the CPU // with one transfer on host pinned memory CorrelationResult *cpuCorrelationResults = cudaPyramidManager.getCorrelationResultsToCPU(iSector); cpuCorrelationResults->chi = *lastGoodChi; cpuCorrelationResults->iterations = totalIterations; cpuCorrelationResults->errorCode = errorCode; memcpy(initial_guess_, cpuCorrelationResults->resultingParameters, number_of_model_parameters * sizeof(float)); return cpuCorrelationResults; } errorEnum CudaClass::findNewParameters(int iSector, int pyramidLevel, parameterTypeEnum parSrc, parameterTypeEnum parDst, float *chi, float lambda) { #if DEBUG_CUDA printf("CudaClass::findNewParameters: Start\n"); #endif errorEnum error = error_none; error = NewtonRaphsonStep(iSector, pyramidLevel, parSrc, lambda); if (error) return error; // Put a marker on the nvvp CUDA profiler roctxRangePushA("callCusolver"); error = cudaSolverManager.callCusolver(iSector, chi); if (error) return error_cuSolver; roctxRangePop(); // Put a marker on the nvvp CUDA profiler roctxRangePushA("updateParameters"); // Saves the parameter increment plus the tentativeParameters into // tentativeParameters via thrust operations on GPU cudaPyramidManager.updateParameters(iSector, number_of_model_parameters, parSrc, parDst, correlationStream); roctxRangePop(); return error_none; } errorEnum CudaClass::NewtonRaphsonStep(int iSector, int pyramidLevel, parameterTypeEnum parSrc, float lambda) { int iGPU = 0; errorEnum errorCode = error_none; // for ( int iGPU = 0 ; iGPU < deviceCount ; ++iGPU ) //{ hipError_t err = hipSetDevice(iGPU); if (err != hipSuccess) { printf("Failed to set device (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } hipTextureObject_t undTexture = cudaPyramidManager.getUndTexture(pyramidLevel); hipTextureObject_t defTexture = cudaPyramidManager.getDefTexture(pyramidLevel); int numberOfPoints = cudaPyramidManager.getNumberOfPoints(iSector, pyramidLevel); float scaling = 1.f / ((float)numberOfPoints); float *undX_ptr = cudaPyramidManager.getUndXPtr(iSector, pyramidLevel); float *undY_ptr = cudaPyramidManager.getUndYPtr(iSector, pyramidLevel); float *undCenter = cudaPyramidManager.getUndCenter(iSector, pyramidLevel); float *globalABChi = cudaPyramidManager.getGlobalABChi(iSector); float *parameters = cudaPyramidManager.getParameters(iSector, parSrc); int blocksPerGrid = (numberOfPoints + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK; int sharedMemorySize = sizeof(float) * (1 + number_of_model_parameters * (1 + number_of_model_parameters)) * THREADS_PER_BLOCK; #if DEBUG_CUDA_POLYGON printf("\nCudaClass::NewtonRaphsonStep Model numberOfPoints = %d , isector = " "%d , pyramidLevel = %d\n", numberOfPoints, iSector, pyramidLevel); printf("CudaClass::NewtonRaphsonStep parameters used\n"); float *h_par = new float[number_of_model_parameters]; hipMemcpy(h_par, parameters, number_of_model_parameters * sizeof(float), hipMemcpyDeviceToHost); for (int i = 0; i < number_of_model_parameters; ++i) { printf("%14.4e", h_par[i]); } printf("\n"); printf("CudaClass::NewtonRaphsonStep center used\n"); hipMemcpy(h_par, undCenter, 2 * sizeof(float), hipMemcpyDeviceToHost); for (int i = 0; i < 2; ++i) { printf("%14.4e", h_par[i]); } printf("\n"); fflush(stdout); delete[] h_par; #endif hipLaunchKernelGGL(( kCorrelation), dim3(blocksPerGrid), dim3(THREADS_PER_BLOCK), sharedMemorySize, correlationStream, parameters, fittingModel, interpolationModel, number_of_colors, undTexture, defTexture, numberOfPoints, undX_ptr, undY_ptr, undCenter, globalABChi); #if DEBUG_CUDA printf("CudaClass::NewtonRaphsonStep: kCorrelation kernel launched with %d " "blocks of %d threads with %zd bytes of shared memory per block\n", blocksPerGrid, THREADS_PER_BLOCK, sharedMemorySize); #endif err = hipGetLastError(); if (err != hipSuccess) { printf("Failed to launch correlation kernel (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } //} // // for ( int iGPU = 0 ; iGPU < deviceCount ; ++iGPU ) //{ #if DEBUG_CUDA printf("\nCudaClass::NewtonRaphsonStep Aggregation: numberOfPoints = %d in " "GPU %d \n", numberOfPoints, iGPU); #endif err = hipSetDevice(iGPU); if (err != hipSuccess) { printf("Failed to set device (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } //------------------------------------------------------------------------------------------- // // Launch second kernel many times to perform global aggregation of block // results // into globalABChi, layer by layer // //------------------------------------------------------------------------------------------- while (blocksPerGrid > 1) { int reducerBlocksPerGrid = (blocksPerGrid + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK; // Every call reduces the number of ABchi by a factor of 256 ( // THREADS_PER_BLOCK ) hipLaunchKernelGGL(( k_global_reduction), dim3(reducerBlocksPerGrid), dim3(THREADS_PER_BLOCK), sharedMemorySize, correlationStream, blocksPerGrid, // s, globalABChi, number_of_model_parameters); err = hipGetLastError(); if (err != hipSuccess) { printf("Failed to launch global aggregation kernel (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } blocksPerGrid = reducerBlocksPerGrid; } // Build LS problem in GPU0 hipLaunchKernelGGL(( k_build_LS_problem_in_GPU0), dim3(1), dim3(1), 0, correlationStream, globalABChi, number_of_model_parameters, scaling, lambda); //} //loop iGPU return errorCode; } // NewtonRaphsonStep void CudaClass::resetImagePyramids(const std::string undPath, const std::string defPath, const std::string nxtPath, colorEnum color_mode, const int start, const int step, const int stop) { cv::ImreadModes color_flag; switch (color_mode) { case color_monochrome: color_flag = cv::IMREAD_GRAYSCALE; break; case color_color: color_flag = cv::IMREAD_ANYCOLOR; break; default: assert(false); break; } // Put a marker on the nvvp CUDA profiler roctxRangePushA("CudaClass::resetImagePyramids read und image"); cv::Mat undCvImage = cv::imread(undPath, color_flag); roctxRangePop(); // Put a marker on the nvvp CUDA profiler roctxRangePushA("CudaClass::resetImagePyramids read def image"); cv::Mat defCvImage = cv::imread(defPath, color_flag); roctxRangePop(); cv::Mat nxtCvImage; if (!nxtPath.empty()) { // Put a marker on the nvvp CUDA profiler roctxRangePushA("CudaClass::resetImagePyramids read nxt image"); nxtCvImage = cv::imread(nxtPath, color_flag); roctxRangePop(); } number_of_colors = undCvImage.channels(); assert(number_of_colors == defCvImage.channels()); cudaPyramidManager.resetImagePyramids(undCvImage, defCvImage, nxtCvImage, start, step, stop); } void CudaClass::resetNextPyramid(const std::string nxtPath) { #if DEBUG_CUDA_PYRAMID printf("CudaClass::resetNextPyramid with %s\n", nxtPath.c_str()); #endif // Put a marker on the nvvp CUDA profiler roctxRangePushA("CudaClass::resetNextPyramid read image"); cv::Mat nxtCvImage; if (tempQ.empty()) { cv::ImreadModes color_flag; switch (number_of_colors) { case 1: color_flag = cv::IMREAD_GRAYSCALE; break; case 3: color_flag = cv::IMREAD_ANYCOLOR; break; default: assert(false); break; } nxtCvImage = cv::imread(nxtPath, color_flag); } else { nxtCvImage = tempQ.front(); tempQ.pop(); } roctxRangePop(); assert(number_of_colors == nxtCvImage.channels()); cudaPyramidManager.newNxtPyramid(nxtCvImage); } void CudaClass::makeUndPyramidFromDef() { cudaPyramidManager.makeUndPyramidFromDef(); } void CudaClass::makeDefPyramidFromNxt() { cudaPyramidManager.makeDefPyramidFromNxt(); } void CudaClass::updatePolygon( int iSector, deformationDescriptionEnum deformationDescription) { cudaPyramidManager.updatePolygon(iSector, deformationDescription); } errorEnum CudaClass::resetPolygon(int iSector, int x0, int y0, int x1, int y1) { errorEnum corrError; cudaPyramidManager.resetPolygon(iSector, x0, y0, x1, y1, fittingModel); corrError = cudaSolverManager.setCuSolver(iSector, number_of_model_parameters, correlationStream); return corrError; } errorEnum CudaClass::resetPolygon(int iSector, float r, float dr, float a, float da, float cx, float cy, int as) { errorEnum corrError; cudaPyramidManager.resetPolygon(iSector, r, dr, a, da, cx, cy, as, fittingModel); corrError = cudaSolverManager.setCuSolver(iSector, number_of_model_parameters, correlationStream); return corrError; } errorEnum CudaClass::resetPolygon(v_points blobContour) { errorEnum corrError; int iSector = 0; cudaPyramidManager.resetPolygon(blobContour, fittingModel); corrError = cudaSolverManager.setCuSolver(iSector, number_of_model_parameters, correlationStream); return corrError; } v_points CudaClass::getUndXY0ToCPU(int iSector) { return cudaPyramidManager.getUndXY0ToCPU(iSector); } v_points CudaClass::getDefXY0ToCPU(int iSector) { return cudaPyramidManager.getDefXY0ToCPU(iSector); }
5f17ffabac94cae8897d161ceb1fd94692f0b0bf.cu
/**************************************************************************** ** ** This software was developed by Javier Gonzalez on Feb 2018 ** ** This class controls a set of GPUs to perform digital image correlation ** ****************************************************************************/ #include "cuda_class.cuh" CudaClass::CudaClass() { #if DEBUG_CUDA printf("CudaClass::CudaClass\n"); #endif cudaMallocHost((void **)&pinnedChi, sizeof(float)); cudaMallocHost((void **)&lastGoodChi, sizeof(float)); } CudaClass::~CudaClass() { #if DEBUG_CUDA printf("CudaClass::~CudaClass\n"); #endif if (correlationStream) { cudaError_t err = cudaSuccess; err = cudaStreamDestroy(correlationStream); if (err != cudaSuccess) { printf("Failed to Destroy correlationStream (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } } cudaFreeHost(pinnedChi); cudaFreeHost(lastGoodChi); } int CudaClass::initialize() { // This method returns the number of devices to MainApp, and MainApp // will disable the GPU mode if the number of devices is 0. If the GPU // mode is disabled, there will be no more calls to this class. // I decided not to include an if statement in every method of this // class to check on this. if (initialize_devices) { // Initialize devices only once initialize_devices = false; // Calls every GPU to reduce initial latency ( control when it happens ) cudaError_t err = cudaGetDeviceCount(&deviceCount); if (err != cudaSuccess) { printf("Failed to count GPUs (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } #if _WINDOWS deviceCount = 1; printf("CudaClass::initialize Using only %d GPU on WINDOWS compile\n", deviceCount); #endif devicesAvailable = deviceCount; err = cudaStreamCreate(&correlationStream); if (err != cudaSuccess) { printf("Failed to create correlationStream (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } } return devicesAvailable; } void CudaClass::set_deviceCount(int deviceCount_in) { deviceCount = deviceCount_in; #if DEBUG_CUDA printf("CUDA: set_deviceCount: Using %d devices\n", deviceCount); #endif } void CudaClass::set_fitting_model(fittingModelEnum fittingModel_in) { fittingModel = fittingModel_in; number_of_model_parameters = ModelClass::get_number_of_model_parameters(fittingModel); } void CudaClass::set_interpolation_model( interpolationModelEnum interpolationModel_in) { interpolationModel = interpolationModel_in; } void CudaClass::set_max_iters(int maximum_iterations_) { maximum_iterations = maximum_iterations_; } void CudaClass::set_precision(float required_precision_) { required_precision = required_precision_; } CorrelationResult *CudaClass::correlate(int iSector, float *initial_guess_, frame_results &results) { #if DEBUG_NEWTON_RAPHSON_CUDA printf("CudaClass::correlate\n"); printf("CudaClass::correlate: Initial_guess:\n"); int p = ModelClass::get_number_of_model_parameters(fittingModel); for (int p = 0; p < number_of_model_parameters; ++p) { printf("%14.5e\t", initial_guess_[p]); } printf("\n"); #endif errorEnum errorCode = error_none; float min_lambda = 1e-9f; float max_lambda = 1e9f; int totalIterations = 0; // Upload initialGuess for level0 to the GPU via the pyramid/polygon object // initialGuess -> lastGoodParameters , tentativeParameters , savedParameters cudaPyramidManager.initializeParametersLevel0(iSector, initial_guess_); int pyramid_start = cudaPyramidManager.getPyramidStart(); int pyramid_step = cudaPyramidManager.getPyramidStep(); int pyramid_stop = cudaPyramidManager.getPyramidStop(); for (int pyramidLevel = pyramid_stop; pyramidLevel >= pyramid_start; pyramidLevel -= pyramid_step) { // Put a marker on the nvvp CUDA profiler nvtxRangePushA("CudaClass::correlate level"); float lambda = 0.0001f; // Scale lastGood for this pyramid level. tentative and savedParameters are // copied from lastGood cudaPyramidManager.scaleParametersForLevel(iSector, pyramidLevel); //########################################################################################### // // Find initial chi from the initial guess. Make that the lastGoodChi and // start the iteration // // lastGoodParameters -> NR -> tentativeParameters AND lastGoodChi = chi( // lastGoodParameters ) findNewParameters(iSector, pyramidLevel, parType_lastGood, parType_saved, lastGoodChi, lambda); #if DEBUG_NEWTON_RAPHSON_CUDA printf("CudaClass::correlate Initial chi is %f\n", *lastGoodChi); #endif bool useSavedParameters = true; //########################################################################################### // // Iteration to find correlation coefficients // for (int iteration = 1; iteration <= maximum_iterations + 1; ++iteration) { #if DEBUG_NEWTON_RAPHSON_CUDA printf("\n\nCudaClass::correlate Starting iteration %d, pyramid level " "%d, sector = %d\n", iteration, pyramidLevel, iSector); #endif if (iteration > maximum_iterations || lambda >= max_lambda) { errorCode = error_correlation_max_iters_reached; break; } else { totalIterations++; } //########################################################################################### // // Find tentative parameter set - We are saving parameters from the last // chi computation that we // use if the corresponding chi was better than the "lastGoodChi". // This happens most iterations and saves time. However, if the // corresponding chi is // worst, then we recompute the parameters. // if (useSavedParameters) { #if DEBUG_NEWTON_RAPHSON_CUDA printf("CudaClass::correlate Using saved correlation coefficients\n"); #endif cudaPyramidManager.transferParameters(iSector, parType_saved, parType_tentative); } else { #if DEBUG_NEWTON_RAPHSON_CUDA printf("CudaClass::correlate Finding next correlation coefficients the " "hard way\n"); #endif // Recompute tentativeParameters from the lastGoodParameters using a // larger lambda findNewParameters(iSector, pyramidLevel, parType_lastGood, parType_tentative, pinnedChi, lambda); } // else - finding new parameter set //########################################################################################### // // Computation of CHI FOR THE TENTATIVE set of model parameters // Compute chi associated with the new tentativeParameters findNewParameters(iSector, pyramidLevel, parType_tentative, parType_saved, pinnedChi, lambda); #if DEBUG_NEWTON_RAPHSON_CUDA printf("CudaClass::correlate New chi is %f\n", pinnedChi); #endif // Compares delta chi based on lastGoodChi(last_good_model_parameters) // and // chi(model_parameters). However, it does not act on this info until // the // parameters are updated // Make sure the last chi is already available cudaStreamSynchronize(correlationStream); float delta_chi = std::abs((*lastGoodChi - *pinnedChi) / (fmaxf(*lastGoodChi, *pinnedChi) + required_precision)); #if DEBUG_NEWTON_RAPHSON_CUDA printf("CUDA iteration = %4d: ", iteration); printf("last good chi: %12.4e: ", *lastGoodChi); printf("new chi: %12.4e delta chi: %12.4e lambda: %12.4e " "required_precission = %f\n", *pinnedChi, delta_chi, lambda, required_precision); #endif if (*pinnedChi <= *lastGoodChi) // converging step - record the new "best" parameters { *lastGoodChi = *pinnedChi; lambda = fmaxf(lambda * 0.4f, min_lambda); // tentativeParameters (last result) -> lastGoodParameters cudaPyramidManager.transferParameters(iSector, parType_tentative, parType_lastGood); useSavedParameters = true; #if DEBUG_NEWTON_RAPHSON_CUDA printf(" # CONVERGING\n"); #endif } else // diverging step - increase lambda and revert to last "good" set // of parameters { lambda = fminf(lambda * 10.0f, max_lambda); useSavedParameters = false; #if DEBUG_NEWTON_RAPHSON_CUDA printf("\n"); #endif } // Was convergence reached? if (delta_chi < required_precision) { #if DEBUG_NEWTON_RAPHSON_CUDA printf("CudaClass::correlate Convergence reached in %d iterations at a " "delta_chi = %6f\n", totalIterations, delta_chi); #endif break; } } // iterations loop nvtxRangePop(); } // Bring center, number of points and error status from the GPU to the CPU // with one transfer on host pinned memory CorrelationResult *cpuCorrelationResults = cudaPyramidManager.getCorrelationResultsToCPU(iSector); cpuCorrelationResults->chi = *lastGoodChi; cpuCorrelationResults->iterations = totalIterations; cpuCorrelationResults->errorCode = errorCode; memcpy(initial_guess_, cpuCorrelationResults->resultingParameters, number_of_model_parameters * sizeof(float)); return cpuCorrelationResults; } errorEnum CudaClass::findNewParameters(int iSector, int pyramidLevel, parameterTypeEnum parSrc, parameterTypeEnum parDst, float *chi, float lambda) { #if DEBUG_CUDA printf("CudaClass::findNewParameters: Start\n"); #endif errorEnum error = error_none; error = NewtonRaphsonStep(iSector, pyramidLevel, parSrc, lambda); if (error) return error; // Put a marker on the nvvp CUDA profiler nvtxRangePushA("callCusolver"); error = cudaSolverManager.callCusolver(iSector, chi); if (error) return error_cuSolver; nvtxRangePop(); // Put a marker on the nvvp CUDA profiler nvtxRangePushA("updateParameters"); // Saves the parameter increment plus the tentativeParameters into // tentativeParameters via thrust operations on GPU cudaPyramidManager.updateParameters(iSector, number_of_model_parameters, parSrc, parDst, correlationStream); nvtxRangePop(); return error_none; } errorEnum CudaClass::NewtonRaphsonStep(int iSector, int pyramidLevel, parameterTypeEnum parSrc, float lambda) { int iGPU = 0; errorEnum errorCode = error_none; // for ( int iGPU = 0 ; iGPU < deviceCount ; ++iGPU ) //{ cudaError_t err = cudaSetDevice(iGPU); if (err != cudaSuccess) { printf("Failed to set device (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } cudaTextureObject_t undTexture = cudaPyramidManager.getUndTexture(pyramidLevel); cudaTextureObject_t defTexture = cudaPyramidManager.getDefTexture(pyramidLevel); int numberOfPoints = cudaPyramidManager.getNumberOfPoints(iSector, pyramidLevel); float scaling = 1.f / ((float)numberOfPoints); float *undX_ptr = cudaPyramidManager.getUndXPtr(iSector, pyramidLevel); float *undY_ptr = cudaPyramidManager.getUndYPtr(iSector, pyramidLevel); float *undCenter = cudaPyramidManager.getUndCenter(iSector, pyramidLevel); float *globalABChi = cudaPyramidManager.getGlobalABChi(iSector); float *parameters = cudaPyramidManager.getParameters(iSector, parSrc); int blocksPerGrid = (numberOfPoints + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK; int sharedMemorySize = sizeof(float) * (1 + number_of_model_parameters * (1 + number_of_model_parameters)) * THREADS_PER_BLOCK; #if DEBUG_CUDA_POLYGON printf("\nCudaClass::NewtonRaphsonStep Model numberOfPoints = %d , isector = " "%d , pyramidLevel = %d\n", numberOfPoints, iSector, pyramidLevel); printf("CudaClass::NewtonRaphsonStep parameters used\n"); float *h_par = new float[number_of_model_parameters]; cudaMemcpy(h_par, parameters, number_of_model_parameters * sizeof(float), cudaMemcpyDeviceToHost); for (int i = 0; i < number_of_model_parameters; ++i) { printf("%14.4e", h_par[i]); } printf("\n"); printf("CudaClass::NewtonRaphsonStep center used\n"); cudaMemcpy(h_par, undCenter, 2 * sizeof(float), cudaMemcpyDeviceToHost); for (int i = 0; i < 2; ++i) { printf("%14.4e", h_par[i]); } printf("\n"); fflush(stdout); delete[] h_par; #endif kCorrelation<<<blocksPerGrid, THREADS_PER_BLOCK, sharedMemorySize, correlationStream>>>(parameters, fittingModel, interpolationModel, number_of_colors, undTexture, defTexture, numberOfPoints, undX_ptr, undY_ptr, undCenter, globalABChi); #if DEBUG_CUDA printf("CudaClass::NewtonRaphsonStep: kCorrelation kernel launched with %d " "blocks of %d threads with %zd bytes of shared memory per block\n", blocksPerGrid, THREADS_PER_BLOCK, sharedMemorySize); #endif err = cudaGetLastError(); if (err != cudaSuccess) { printf("Failed to launch correlation kernel (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } //} // // for ( int iGPU = 0 ; iGPU < deviceCount ; ++iGPU ) //{ #if DEBUG_CUDA printf("\nCudaClass::NewtonRaphsonStep Aggregation: numberOfPoints = %d in " "GPU %d \n", numberOfPoints, iGPU); #endif err = cudaSetDevice(iGPU); if (err != cudaSuccess) { printf("Failed to set device (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } //------------------------------------------------------------------------------------------- // // Launch second kernel many times to perform global aggregation of block // results // into globalABChi, layer by layer // //------------------------------------------------------------------------------------------- while (blocksPerGrid > 1) { int reducerBlocksPerGrid = (blocksPerGrid + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK; // Every call reduces the number of ABchi by a factor of 256 ( // THREADS_PER_BLOCK ) k_global_reduction<<<reducerBlocksPerGrid, THREADS_PER_BLOCK, sharedMemorySize, correlationStream>>>( blocksPerGrid, // s, globalABChi, number_of_model_parameters); err = cudaGetLastError(); if (err != cudaSuccess) { printf("Failed to launch global aggregation kernel (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } blocksPerGrid = reducerBlocksPerGrid; } // Build LS problem in GPU0 k_build_LS_problem_in_GPU0<<<1, 1, 0, correlationStream>>>( globalABChi, number_of_model_parameters, scaling, lambda); //} //loop iGPU return errorCode; } // NewtonRaphsonStep void CudaClass::resetImagePyramids(const std::string undPath, const std::string defPath, const std::string nxtPath, colorEnum color_mode, const int start, const int step, const int stop) { cv::ImreadModes color_flag; switch (color_mode) { case color_monochrome: color_flag = cv::IMREAD_GRAYSCALE; break; case color_color: color_flag = cv::IMREAD_ANYCOLOR; break; default: assert(false); break; } // Put a marker on the nvvp CUDA profiler nvtxRangePushA("CudaClass::resetImagePyramids read und image"); cv::Mat undCvImage = cv::imread(undPath, color_flag); nvtxRangePop(); // Put a marker on the nvvp CUDA profiler nvtxRangePushA("CudaClass::resetImagePyramids read def image"); cv::Mat defCvImage = cv::imread(defPath, color_flag); nvtxRangePop(); cv::Mat nxtCvImage; if (!nxtPath.empty()) { // Put a marker on the nvvp CUDA profiler nvtxRangePushA("CudaClass::resetImagePyramids read nxt image"); nxtCvImage = cv::imread(nxtPath, color_flag); nvtxRangePop(); } number_of_colors = undCvImage.channels(); assert(number_of_colors == defCvImage.channels()); cudaPyramidManager.resetImagePyramids(undCvImage, defCvImage, nxtCvImage, start, step, stop); } void CudaClass::resetNextPyramid(const std::string nxtPath) { #if DEBUG_CUDA_PYRAMID printf("CudaClass::resetNextPyramid with %s\n", nxtPath.c_str()); #endif // Put a marker on the nvvp CUDA profiler nvtxRangePushA("CudaClass::resetNextPyramid read image"); cv::Mat nxtCvImage; if (tempQ.empty()) { cv::ImreadModes color_flag; switch (number_of_colors) { case 1: color_flag = cv::IMREAD_GRAYSCALE; break; case 3: color_flag = cv::IMREAD_ANYCOLOR; break; default: assert(false); break; } nxtCvImage = cv::imread(nxtPath, color_flag); } else { nxtCvImage = tempQ.front(); tempQ.pop(); } nvtxRangePop(); assert(number_of_colors == nxtCvImage.channels()); cudaPyramidManager.newNxtPyramid(nxtCvImage); } void CudaClass::makeUndPyramidFromDef() { cudaPyramidManager.makeUndPyramidFromDef(); } void CudaClass::makeDefPyramidFromNxt() { cudaPyramidManager.makeDefPyramidFromNxt(); } void CudaClass::updatePolygon( int iSector, deformationDescriptionEnum deformationDescription) { cudaPyramidManager.updatePolygon(iSector, deformationDescription); } errorEnum CudaClass::resetPolygon(int iSector, int x0, int y0, int x1, int y1) { errorEnum corrError; cudaPyramidManager.resetPolygon(iSector, x0, y0, x1, y1, fittingModel); corrError = cudaSolverManager.setCuSolver(iSector, number_of_model_parameters, correlationStream); return corrError; } errorEnum CudaClass::resetPolygon(int iSector, float r, float dr, float a, float da, float cx, float cy, int as) { errorEnum corrError; cudaPyramidManager.resetPolygon(iSector, r, dr, a, da, cx, cy, as, fittingModel); corrError = cudaSolverManager.setCuSolver(iSector, number_of_model_parameters, correlationStream); return corrError; } errorEnum CudaClass::resetPolygon(v_points blobContour) { errorEnum corrError; int iSector = 0; cudaPyramidManager.resetPolygon(blobContour, fittingModel); corrError = cudaSolverManager.setCuSolver(iSector, number_of_model_parameters, correlationStream); return corrError; } v_points CudaClass::getUndXY0ToCPU(int iSector) { return cudaPyramidManager.getUndXY0ToCPU(iSector); } v_points CudaClass::getDefXY0ToCPU(int iSector) { return cudaPyramidManager.getDefXY0ToCPU(iSector); }
d140397f9130993eccb62eb511293b5893dffdff.hip
// !!! This is a file automatically generated by hipify!!! // generated by gen_cuda_conv_bias_kern_impls.py #include "../conv_bias_int8_implicit_gemm_imma32x8x16_cdiv4hwn4_unroll_width.cuinl" template void megdnn::cuda::conv_bias_int8:: do_conv_bias_int8_implicit_gemm_imma32x8x16_cdiv4hwn4_unroll_width< PerChannelBiasVisitor, IConvEpilogue<Activation< megdnn::param_enumv::ConvBias::NonlineMode::H_SWISH>>>( const int8_t* d_src, const int8_t* d_filter, PerChannelBiasVisitor bias, IConvEpilogue< Activation<megdnn::param_enumv::ConvBias::NonlineMode::H_SWISH>> epilogue, const ConvParam& param, float alpha, float beta, hipStream_t stream);
d140397f9130993eccb62eb511293b5893dffdff.cu
// generated by gen_cuda_conv_bias_kern_impls.py #include "../conv_bias_int8_implicit_gemm_imma32x8x16_cdiv4hwn4_unroll_width.cuinl" template void megdnn::cuda::conv_bias_int8:: do_conv_bias_int8_implicit_gemm_imma32x8x16_cdiv4hwn4_unroll_width< PerChannelBiasVisitor, IConvEpilogue<Activation< megdnn::param_enumv::ConvBias::NonlineMode::H_SWISH>>>( const int8_t* d_src, const int8_t* d_filter, PerChannelBiasVisitor bias, IConvEpilogue< Activation<megdnn::param_enumv::ConvBias::NonlineMode::H_SWISH>> epilogue, const ConvParam& param, float alpha, float beta, cudaStream_t stream);
34c71e6ef90133a79f5923e16a8a6589e6776cf6.hip
// !!! This is a file automatically generated by hipify!!! #include "CudaFlow.h" int CudaFlow::copyMaskToDevice(cv::Mat mask0, cv::Mat mask1) { checkCudaErrors(hipMemcpy(d_mask0, (float *)mask0.ptr(), dataSize32f, hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_mask1, (float *)mask1.ptr(), dataSize32f, hipMemcpyHostToDevice)); return 0; } int CudaFlow::solveInpainting(float flowScale) { return _solveInpainting(flowScale); } int CudaFlow::_solveInpainting(float flowScale) { // Convert RGB to Gray if (inputType == CV_8UC3) { rgbToGray(d_i08uc3, pI0[0], width, height, stride); rgbToGray(d_i18uc3, pI1[0], width, height, stride); } else if ((inputType == CV_8U) || (inputType == CV_8UC1)) { Cv8uToGray(d_i08u, pI0[0], width, height, stride); Cv8uToGray(d_i18u, pI1[0], width, height, stride); } else if (inputType == CV_16U) { Cv16uToGray(d_i016u, pI0[0], width, height, stride); Cv16uToGray(d_i116u, pI1[0], width, height, stride); } else if (inputType == CV_32F) { Cv32fToGray(d_i032f, pI0[0], width, height, stride); Cv32fToGray(d_i132f, pI1[0], width, height, stride); } else { rgbToGray(d_i08uc3, pI0[0], width, height, stride); rgbToGray(d_i18uc3, pI1[0], width, height, stride); } Cv32fToGray(d_mask0, pMask0[0], width, height, stride); Cv32fToGray(d_mask1, pMask1[0], width, height, stride); // construct pyramid for (int level = 1; level < nLevels; level++) { Downscale(pI0[level - 1], pW[level - 1], pH[level - 1], pS[level - 1], pW[level], pH[level], pS[level], pI0[level]); Downscale(pI1[level - 1], pW[level - 1], pH[level - 1], pS[level - 1], pW[level], pH[level], pS[level], pI1[level]); //masks Downscale(pMask0[level - 1], pW[level - 1], pH[level - 1], pS[level - 1], pW[level], pH[level], pS[level], pMask0[level]); Downscale(pMask1[level - 1], pW[level - 1], pH[level - 1], pS[level - 1], pW[level], pH[level], pS[level], pMask1[level]); } // solve flow checkCudaErrors(hipMemset(d_u, 0, dataSize)); checkCudaErrors(hipMemset(d_v, 0, dataSize)); for (int level = nLevels - 1; level >= 0; level--) { for (int warpIter = 0; warpIter < nWarpIters; warpIter++) { //std::cout << level << std::endl; //initialize zeros checkCudaErrors(hipMemset(d_du, 0, dataSize)); checkCudaErrors(hipMemset(d_dv, 0, dataSize)); checkCudaErrors(hipMemset(d_dus, 0, dataSize)); checkCudaErrors(hipMemset(d_dvs, 0, dataSize)); checkCudaErrors(hipMemset(d_dumed, 0, dataSize)); checkCudaErrors(hipMemset(d_dvmed, 0, dataSize)); checkCudaErrors(hipMemset(d_dumeds, 0, dataSize)); checkCudaErrors(hipMemset(d_dvmeds, 0, dataSize)); checkCudaErrors(hipMemset(d_pu1, 0, dataSize)); checkCudaErrors(hipMemset(d_pu2, 0, dataSize)); checkCudaErrors(hipMemset(d_pv1, 0, dataSize)); checkCudaErrors(hipMemset(d_pv2, 0, dataSize)); //warp frame 1 WarpImage(pI1[level], pW[level], pH[level], pS[level], d_u, d_v, d_i1warp); //compute derivatives ComputeDerivatives(pI0[level], d_i1warp, pW[level], pH[level], pS[level], d_Ix, d_Iy, d_Iz); //inner iteration for (int iter = 0; iter < nSolverIters; ++iter) { SolveDataL1Inpaint(d_dumed, d_dvmed, pMask0[level], pMask1[level], d_pu1, d_pu2, d_pv1, d_pv2, d_Ix, d_Iy, d_Iz, pW[level], pH[level], pS[level], lambda, theta, d_dumeds, d_dvmeds); //du1 = duhat output Swap(d_dumed, d_dumeds); Swap(d_dvmed, d_dvmeds); SolveSmoothDualTVGlobal(d_dumed, d_dvmed, d_pu1, d_pu2, d_pv1, d_pv2, pW[level], pH[level], pS[level], tau, theta, d_pu1s, d_pu2s, d_pv1s, d_pv2s); Swap(d_pu1, d_pu1s); Swap(d_pu2, d_pu2s); Swap(d_pv1, d_pv1s); Swap(d_pv2, d_pv2s); } // one median filtering MedianFilter(d_dumed, d_dvmed, pW[level], pH[level], pS[level], d_dumeds, d_dvmeds, 5); Swap(d_dumed, d_dumeds); Swap(d_dvmed, d_dvmeds); // update u, v Add(d_u, d_dumed, pH[level] * pS[level], d_u); Add(d_v, d_dvmed, pH[level] * pS[level], d_v); } //upscale if (level > 0) { // scale uv //float scale = (float)pW[level + 1] / (float)pW[level]; float scale = fScale; Upscale(d_u, pW[level], pH[level], pS[level], pW[level - 1], pH[level - 1], pS[level - 1], scale, d_us); //float scaleY = (float)pH[level + 1] / (float)pH[level]; Upscale(d_v, pW[level], pH[level], pS[level], pW[level - 1], pH[level - 1], pS[level - 1], scale, d_vs); Swap(d_u, d_us); Swap(d_v, d_vs); } } if (withVisualization) { FlowToHSV(d_u, d_v, width, height, stride, d_uvrgb, flowScale); } //FlowToHSV(d_u, d_v, width, height, stride, d_uvrgb, flowScale); //SolveSceneFlow(d_u, d_v, d_depth016u, d_depth116u, width, height, stride, d_sceneflow); //std::cout << stride << " " << height << " " << height << " " << inputChannels << std::endl; return 0; }
34c71e6ef90133a79f5923e16a8a6589e6776cf6.cu
#include "CudaFlow.h" int CudaFlow::copyMaskToDevice(cv::Mat mask0, cv::Mat mask1) { checkCudaErrors(cudaMemcpy(d_mask0, (float *)mask0.ptr(), dataSize32f, cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_mask1, (float *)mask1.ptr(), dataSize32f, cudaMemcpyHostToDevice)); return 0; } int CudaFlow::solveInpainting(float flowScale) { return _solveInpainting(flowScale); } int CudaFlow::_solveInpainting(float flowScale) { // Convert RGB to Gray if (inputType == CV_8UC3) { rgbToGray(d_i08uc3, pI0[0], width, height, stride); rgbToGray(d_i18uc3, pI1[0], width, height, stride); } else if ((inputType == CV_8U) || (inputType == CV_8UC1)) { Cv8uToGray(d_i08u, pI0[0], width, height, stride); Cv8uToGray(d_i18u, pI1[0], width, height, stride); } else if (inputType == CV_16U) { Cv16uToGray(d_i016u, pI0[0], width, height, stride); Cv16uToGray(d_i116u, pI1[0], width, height, stride); } else if (inputType == CV_32F) { Cv32fToGray(d_i032f, pI0[0], width, height, stride); Cv32fToGray(d_i132f, pI1[0], width, height, stride); } else { rgbToGray(d_i08uc3, pI0[0], width, height, stride); rgbToGray(d_i18uc3, pI1[0], width, height, stride); } Cv32fToGray(d_mask0, pMask0[0], width, height, stride); Cv32fToGray(d_mask1, pMask1[0], width, height, stride); // construct pyramid for (int level = 1; level < nLevels; level++) { Downscale(pI0[level - 1], pW[level - 1], pH[level - 1], pS[level - 1], pW[level], pH[level], pS[level], pI0[level]); Downscale(pI1[level - 1], pW[level - 1], pH[level - 1], pS[level - 1], pW[level], pH[level], pS[level], pI1[level]); //masks Downscale(pMask0[level - 1], pW[level - 1], pH[level - 1], pS[level - 1], pW[level], pH[level], pS[level], pMask0[level]); Downscale(pMask1[level - 1], pW[level - 1], pH[level - 1], pS[level - 1], pW[level], pH[level], pS[level], pMask1[level]); } // solve flow checkCudaErrors(cudaMemset(d_u, 0, dataSize)); checkCudaErrors(cudaMemset(d_v, 0, dataSize)); for (int level = nLevels - 1; level >= 0; level--) { for (int warpIter = 0; warpIter < nWarpIters; warpIter++) { //std::cout << level << std::endl; //initialize zeros checkCudaErrors(cudaMemset(d_du, 0, dataSize)); checkCudaErrors(cudaMemset(d_dv, 0, dataSize)); checkCudaErrors(cudaMemset(d_dus, 0, dataSize)); checkCudaErrors(cudaMemset(d_dvs, 0, dataSize)); checkCudaErrors(cudaMemset(d_dumed, 0, dataSize)); checkCudaErrors(cudaMemset(d_dvmed, 0, dataSize)); checkCudaErrors(cudaMemset(d_dumeds, 0, dataSize)); checkCudaErrors(cudaMemset(d_dvmeds, 0, dataSize)); checkCudaErrors(cudaMemset(d_pu1, 0, dataSize)); checkCudaErrors(cudaMemset(d_pu2, 0, dataSize)); checkCudaErrors(cudaMemset(d_pv1, 0, dataSize)); checkCudaErrors(cudaMemset(d_pv2, 0, dataSize)); //warp frame 1 WarpImage(pI1[level], pW[level], pH[level], pS[level], d_u, d_v, d_i1warp); //compute derivatives ComputeDerivatives(pI0[level], d_i1warp, pW[level], pH[level], pS[level], d_Ix, d_Iy, d_Iz); //inner iteration for (int iter = 0; iter < nSolverIters; ++iter) { SolveDataL1Inpaint(d_dumed, d_dvmed, pMask0[level], pMask1[level], d_pu1, d_pu2, d_pv1, d_pv2, d_Ix, d_Iy, d_Iz, pW[level], pH[level], pS[level], lambda, theta, d_dumeds, d_dvmeds); //du1 = duhat output Swap(d_dumed, d_dumeds); Swap(d_dvmed, d_dvmeds); SolveSmoothDualTVGlobal(d_dumed, d_dvmed, d_pu1, d_pu2, d_pv1, d_pv2, pW[level], pH[level], pS[level], tau, theta, d_pu1s, d_pu2s, d_pv1s, d_pv2s); Swap(d_pu1, d_pu1s); Swap(d_pu2, d_pu2s); Swap(d_pv1, d_pv1s); Swap(d_pv2, d_pv2s); } // one median filtering MedianFilter(d_dumed, d_dvmed, pW[level], pH[level], pS[level], d_dumeds, d_dvmeds, 5); Swap(d_dumed, d_dumeds); Swap(d_dvmed, d_dvmeds); // update u, v Add(d_u, d_dumed, pH[level] * pS[level], d_u); Add(d_v, d_dvmed, pH[level] * pS[level], d_v); } //upscale if (level > 0) { // scale uv //float scale = (float)pW[level + 1] / (float)pW[level]; float scale = fScale; Upscale(d_u, pW[level], pH[level], pS[level], pW[level - 1], pH[level - 1], pS[level - 1], scale, d_us); //float scaleY = (float)pH[level + 1] / (float)pH[level]; Upscale(d_v, pW[level], pH[level], pS[level], pW[level - 1], pH[level - 1], pS[level - 1], scale, d_vs); Swap(d_u, d_us); Swap(d_v, d_vs); } } if (withVisualization) { FlowToHSV(d_u, d_v, width, height, stride, d_uvrgb, flowScale); } //FlowToHSV(d_u, d_v, width, height, stride, d_uvrgb, flowScale); //SolveSceneFlow(d_u, d_v, d_depth016u, d_depth116u, width, height, stride, d_sceneflow); //std::cout << stride << " " << height << " " << height << " " << inputChannels << std::endl; return 0; }
7a18a51b9f336fe97d439504536a38f94290b1da.hip
// !!! This is a file automatically generated by hipify!!! #include "graph.hpp" void taskflow(const Graph& g, unsigned num_cpus, unsigned num_gpus) { const int N = 1000; std::atomic<int> counter{0}; int* cx = new int[N]; int* cy = new int[N]; int* cz = new int[N]; int* gx = nullptr; int* gy = nullptr; int* gz = nullptr; TF_CHECK_CUDA(hipMallocManaged(&gx, N*sizeof(int)), "failed at hipMalloc"); TF_CHECK_CUDA(hipMallocManaged(&gy, N*sizeof(int)), "failed at hipMalloc"); TF_CHECK_CUDA(hipMallocManaged(&gz, N*sizeof(int)), "failed at hipMalloc"); tf::Taskflow taskflow; tf::Executor executor(num_cpus, num_gpus); std::vector<tf::Task> tasks(g.num_nodes); // create a task for each node for(const auto& v : g.nodes) { // cpu task if(v.g == -1) { tasks[v.v] = taskflow.emplace([&](){ ++counter; for(int i=0; i<N; ++i) { cz[i] = cx[i] + cy[i]; } }); } else { tasks[v.v] = taskflow.emplace([&, d=v.g](tf::cudaFlow& cf){ ++counter; cf.device(d); auto sgx = cf.zero(gx, N); auto sgy = cf.zero(gy, N); auto sgz = cf.zero(gz, N); auto h2d_gx = cf.copy(gx, cx, N); auto h2d_gy = cf.copy(gy, cy, N); auto h2d_gz = cf.copy(gz, cz, N); auto kernel = cf.kernel((N+255)/256, 256, 0, add<int>, gx, gy, gz, N); auto d2h_gx = cf.copy(cx, gx, N); auto d2h_gy = cf.copy(cy, gy, N); auto d2h_gz = cf.copy(cz, gz, N); sgx.precede(h2d_gx); sgy.precede(h2d_gy); sgz.precede(h2d_gz); kernel.succeed(h2d_gx, h2d_gy, h2d_gz) .precede(d2h_gx, d2h_gy, d2h_gz); }); } } for(const auto& e : g.edges) { tasks[e.u].precede(tasks[e.v]); } executor.run(taskflow).wait(); //taskflow.dump(std::cout); delete [] cx; delete [] cy; delete [] cz; TF_CHECK_CUDA(hipFree(gx), "failed at hipFree"); TF_CHECK_CUDA(hipFree(gy), "failed at hipFree"); TF_CHECK_CUDA(hipFree(gz), "failed at hipFree"); if(counter != g.num_nodes) { throw std::runtime_error("wrong result"); } } std::chrono::microseconds measure_time_taskflow( const Graph& g, unsigned num_cpus, unsigned num_gpus ) { auto beg = std::chrono::high_resolution_clock::now(); taskflow(g, num_cpus, num_gpus); auto end = std::chrono::high_resolution_clock::now(); return std::chrono::duration_cast<std::chrono::microseconds>(end - beg); }
7a18a51b9f336fe97d439504536a38f94290b1da.cu
#include "graph.hpp" void taskflow(const Graph& g, unsigned num_cpus, unsigned num_gpus) { const int N = 1000; std::atomic<int> counter{0}; int* cx = new int[N]; int* cy = new int[N]; int* cz = new int[N]; int* gx = nullptr; int* gy = nullptr; int* gz = nullptr; TF_CHECK_CUDA(cudaMallocManaged(&gx, N*sizeof(int)), "failed at cudaMalloc"); TF_CHECK_CUDA(cudaMallocManaged(&gy, N*sizeof(int)), "failed at cudaMalloc"); TF_CHECK_CUDA(cudaMallocManaged(&gz, N*sizeof(int)), "failed at cudaMalloc"); tf::Taskflow taskflow; tf::Executor executor(num_cpus, num_gpus); std::vector<tf::Task> tasks(g.num_nodes); // create a task for each node for(const auto& v : g.nodes) { // cpu task if(v.g == -1) { tasks[v.v] = taskflow.emplace([&](){ ++counter; for(int i=0; i<N; ++i) { cz[i] = cx[i] + cy[i]; } }); } else { tasks[v.v] = taskflow.emplace([&, d=v.g](tf::cudaFlow& cf){ ++counter; cf.device(d); auto sgx = cf.zero(gx, N); auto sgy = cf.zero(gy, N); auto sgz = cf.zero(gz, N); auto h2d_gx = cf.copy(gx, cx, N); auto h2d_gy = cf.copy(gy, cy, N); auto h2d_gz = cf.copy(gz, cz, N); auto kernel = cf.kernel((N+255)/256, 256, 0, add<int>, gx, gy, gz, N); auto d2h_gx = cf.copy(cx, gx, N); auto d2h_gy = cf.copy(cy, gy, N); auto d2h_gz = cf.copy(cz, gz, N); sgx.precede(h2d_gx); sgy.precede(h2d_gy); sgz.precede(h2d_gz); kernel.succeed(h2d_gx, h2d_gy, h2d_gz) .precede(d2h_gx, d2h_gy, d2h_gz); }); } } for(const auto& e : g.edges) { tasks[e.u].precede(tasks[e.v]); } executor.run(taskflow).wait(); //taskflow.dump(std::cout); delete [] cx; delete [] cy; delete [] cz; TF_CHECK_CUDA(cudaFree(gx), "failed at cudaFree"); TF_CHECK_CUDA(cudaFree(gy), "failed at cudaFree"); TF_CHECK_CUDA(cudaFree(gz), "failed at cudaFree"); if(counter != g.num_nodes) { throw std::runtime_error("wrong result"); } } std::chrono::microseconds measure_time_taskflow( const Graph& g, unsigned num_cpus, unsigned num_gpus ) { auto beg = std::chrono::high_resolution_clock::now(); taskflow(g, num_cpus, num_gpus); auto end = std::chrono::high_resolution_clock::now(); return std::chrono::duration_cast<std::chrono::microseconds>(end - beg); }
285e90ceefd8df5a435958084ff10e153962a725.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "kunet.h" __global__ void _drop32(int n, float *x, float *xmask, double dropout, double scale) { int i = threadIdx.x + blockIdx.x * blockDim.x; while (i < n) { if (xmask[i] < dropout) x[i] = 0; else x[i] *= scale; i += blockDim.x * gridDim.x; } } __global__ void _drop64(int n, double *x, double *xmask, double dropout, double scale) { int i = threadIdx.x + blockIdx.x * blockDim.x; while (i < n) { if (xmask[i] < dropout) x[i] = 0; else x[i] *= scale; i += blockDim.x * gridDim.x; } } extern "C" { void drop32(int n, float *x, float *xmask, double dropout, double scale) KCALL(_drop32,n,x,xmask,dropout,scale); void drop64(int n, double *x, double *xmask, double dropout, double scale) KCALL(_drop64,n,x,xmask,dropout,scale); }
285e90ceefd8df5a435958084ff10e153962a725.cu
#include "kunet.h" __global__ void _drop32(int n, float *x, float *xmask, double dropout, double scale) { int i = threadIdx.x + blockIdx.x * blockDim.x; while (i < n) { if (xmask[i] < dropout) x[i] = 0; else x[i] *= scale; i += blockDim.x * gridDim.x; } } __global__ void _drop64(int n, double *x, double *xmask, double dropout, double scale) { int i = threadIdx.x + blockIdx.x * blockDim.x; while (i < n) { if (xmask[i] < dropout) x[i] = 0; else x[i] *= scale; i += blockDim.x * gridDim.x; } } extern "C" { void drop32(int n, float *x, float *xmask, double dropout, double scale) KCALL(_drop32,n,x,xmask,dropout,scale); void drop64(int n, double *x, double *xmask, double dropout, double scale) KCALL(_drop64,n,x,xmask,dropout,scale); }
6f155f3d268a4a6d49aa6aac23d12ec08aa1f86b.hip
// !!! This is a file automatically generated by hipify!!! #include "tools.hpp" #include "density_clustering_cuda.hpp" #include "density_clustering_cuda_kernels.hpp" #include "logger.hpp" #include <algorithm> #include <unordered_set> #include <unordered_map> #include <chrono> #include <hip/hip_runtime.h> #include <omp.h> namespace Clustering { namespace Density { namespace CUDA { void check_error(std::string msg) { hipError_t err = hipGetLastError(); if (err != hipSuccess) { std::cerr << "CUDA error: " << msg << "\n" << hipGetErrorString(err) << std::endl; exit(EXIT_FAILURE); } } int get_num_gpus() { int n_gpus; hipGetDeviceCount(&n_gpus); check_error("trying to get number of available GPUs"); if (n_gpus == 0) { std::cerr << "error: no CUDA-compatible GPUs found" << std::endl; exit(EXIT_FAILURE); } else { return n_gpus; } } Pops calculate_populations_per_gpu(const float* coords , std::size_t n_rows , std::size_t n_cols , std::vector<float> radii , std::size_t i_from , std::size_t i_to , int i_gpu) { using Clustering::Tools::min_multiplicator; ASSUME_ALIGNED(coords); unsigned int n_radii = radii.size(); std::vector<float> rad2(n_radii); for (std::size_t i=0; i < n_radii; ++i) { rad2[i] = radii[i]*radii[i]; } // GPU setup hipSetDevice(i_gpu); float* d_coords; float* d_rad2; unsigned int* d_pops; hipMalloc((void**) &d_coords , sizeof(float) * n_rows * n_cols); check_error("pop-calc device mallocs (coords)"); hipMalloc((void**) &d_pops , sizeof(unsigned int) * n_rows * n_radii); check_error("pop-calc device mallocs (pops)"); hipMalloc((void**) &d_rad2 , sizeof(float) * n_radii); check_error("pop-calc device mallocs (rad2)"); hipMemset(d_pops , 0 , sizeof(unsigned int) * n_rows * n_radii); check_error("pop-calc memset"); hipMemcpy(d_coords , coords , sizeof(float) * n_rows * n_cols , hipMemcpyHostToDevice); hipMemcpy(d_rad2 , rad2.data() , sizeof(float) * n_radii , hipMemcpyHostToDevice); check_error("pop-calc mem copies"); int max_shared_mem; hipDeviceGetAttribute(&max_shared_mem , hipDeviceAttributeMaxSharedMemoryPerBlock , i_gpu); check_error("getting max shared mem size"); unsigned int block_size = BSIZE_POPS; unsigned int shared_mem = 2 * block_size * n_cols * sizeof(float); if (shared_mem > max_shared_mem) { std::cerr << "error: max. shared mem per block too small on this GPU.\n" << " either reduce BSIZE_POPS or get a better GPU." << std::endl; exit(EXIT_FAILURE); } unsigned int block_rng = min_multiplicator(i_to-i_from, block_size); Clustering::logger(std::cout) << "# blocks needed: " << block_rng << std::endl; for (unsigned int i=0; i*block_size < n_rows; ++i) { hipLaunchKernelGGL(( Clustering::Density::CUDA::Kernel::population_count) , dim3(block_rng) , dim3(block_size) , shared_mem , 0, i*block_size , d_coords , n_rows , n_cols , d_rad2 , n_radii , d_pops , i_from , i_to); } hipDeviceSynchronize(); check_error("after kernel loop"); // get partial results from GPU std::vector<unsigned int> partial_pops(n_rows*n_radii); hipMemcpy(partial_pops.data() , d_pops , sizeof(unsigned int) * n_rows * n_radii , hipMemcpyDeviceToHost); // sort into resulting pops Pops pops; for (unsigned int r=0; r < n_radii; ++r) { pops[radii[r]].resize(n_rows, 0); for (unsigned int i=i_from; i < i_to; ++i) { pops[radii[r]][i] = partial_pops[r*n_rows+i]; } } hipFree(d_coords); hipFree(d_rad2); hipFree(d_pops); return pops; } Pops calculate_populations(const float* coords , const std::size_t n_rows , const std::size_t n_cols , std::vector<float> radii) { using Clustering::Tools::dim1_sorted_coords; using Clustering::Tools::boxlimits; ASSUME_ALIGNED(coords); std::sort(radii.begin(), radii.end(), std::greater<float>()); int n_gpus = get_num_gpus(); int gpu_range = n_rows / n_gpus; int i; std::vector<Pops> partial_pops(n_gpus); #pragma omp parallel for default(none)\ private(i)\ firstprivate(n_gpus,n_rows,n_cols,gpu_range)\ shared(partial_pops,radii,coords)\ num_threads(n_gpus)\ schedule(dynamic,1) for (i=0; i < n_gpus; ++i) { // compute partial populations in parallel // on all available GPUs partial_pops[i] = calculate_populations_per_gpu(coords , n_rows , n_cols , radii , i*gpu_range , i == (n_gpus-1) ? n_rows : (i+1)*gpu_range , i); } Pops pops; // combine pops for (float r: radii) { pops[r].resize(n_rows, 0); for (i=0; i < n_rows; ++i) { for (unsigned int i_gpu=0; i_gpu < n_gpus; ++i_gpu) { pops[r][i] += partial_pops[i_gpu][r][i]; } } } return pops; } std::tuple<Neighborhood, Neighborhood> nearest_neighbors_per_gpu(const float* coords , const std::size_t n_rows , const std::size_t n_cols , const std::vector<float>& free_energy , std::size_t i_from , std::size_t i_to , int i_gpu) { using Clustering::Tools::min_multiplicator; ASSUME_ALIGNED(coords); // GPU setup hipSetDevice(i_gpu); float* d_coords; float* d_fe; unsigned int* d_nh_nhhd_ndx; float* d_nh_nhhd_dist; // allocate memory hipMalloc((void**) &d_coords , sizeof(float) * n_rows * n_cols); hipMalloc((void**) &d_fe , sizeof(float) * n_rows); hipMalloc((void**) &d_nh_nhhd_ndx , sizeof(unsigned int) * n_rows * 2); hipMalloc((void**) &d_nh_nhhd_dist , sizeof(float) * n_rows * 2); // initialize all min dists and indices to zero hipMemset(d_nh_nhhd_ndx , 0 , sizeof(unsigned int) * n_rows * 2); hipMemset(d_nh_nhhd_dist , 0 , sizeof(float) * n_rows * 2); // copy coordinates and free energies to GPU hipMemcpy(d_coords , coords , sizeof(float) * n_rows * n_cols , hipMemcpyHostToDevice); hipMemcpy(d_fe , free_energy.data() , sizeof(float) * n_rows , hipMemcpyHostToDevice); int max_shared_mem; hipDeviceGetAttribute(&max_shared_mem , hipDeviceAttributeMaxSharedMemoryPerBlock , i_gpu); check_error("retrieving max shared mem"); unsigned int block_size = BSIZE_NH; // compute necessary size of shared memory of block for // coordinates (2*n_cols) and free energies (1 col) unsigned int shared_mem = (2*n_cols + 1) * block_size * sizeof(float); if (shared_mem > max_shared_mem) { std::cerr << "error: max. shared mem per block too small on this GPU.\n" << " either reduce block_size for NN search or get a " << "better GPU." << std::endl; exit(EXIT_FAILURE); } unsigned int block_rng = min_multiplicator(i_to-i_from, block_size); for (unsigned int i=0; i*block_size < n_rows; ++i) { hipLaunchKernelGGL(( Clustering::Density::CUDA::Kernel::nearest_neighbor_search) , dim3(block_rng) , dim3(block_size) , shared_mem , 0, i*block_size , d_coords , n_rows , n_cols , d_fe , d_nh_nhhd_ndx , d_nh_nhhd_dist , i_from , i_to); } hipDeviceSynchronize(); check_error("after kernel loop"); // initialize neighborhoods (on host) Neighborhood nh; Neighborhood nhhd; // collect results from GPU std::vector<unsigned int> buf_ndx(n_rows * 2); std::vector<float> buf_dist(n_rows * 2); hipMemcpy(buf_ndx.data() , d_nh_nhhd_ndx , sizeof(unsigned int) * n_rows * 2 , hipMemcpyDeviceToHost); hipMemcpy(buf_dist.data() , d_nh_nhhd_dist , sizeof(float) * n_rows * 2 , hipMemcpyDeviceToHost); for (unsigned int i=0; i < n_rows; ++i) { nh[i] = {buf_ndx[i] , buf_dist[i]}; nhhd[i] = {buf_ndx[n_rows+i] , buf_dist[n_rows+i]}; } // device cleanup hipFree(d_coords); hipFree(d_fe); hipFree(d_nh_nhhd_ndx); hipFree(d_nh_nhhd_dist); // results return std::make_tuple(nh, nhhd); } std::tuple<Neighborhood, Neighborhood> nearest_neighbors(const float* coords , const std::size_t n_rows , const std::size_t n_cols , const std::vector<float>& free_energy) { int n_gpus = get_num_gpus(); std::vector<std::tuple<Neighborhood, Neighborhood>> partials(n_gpus); unsigned int gpu_range = n_rows / n_gpus; unsigned int i_gpu; #pragma omp parallel for default(none)\ private(i_gpu)\ firstprivate(n_gpus,n_rows,n_cols,gpu_range)\ shared(partials,coords,free_energy)\ num_threads(n_gpus) for (i_gpu=0; i_gpu < n_gpus; ++i_gpu) { partials[i_gpu] = nearest_neighbors_per_gpu(coords , n_rows , n_cols , free_energy , i_gpu*gpu_range , (i_gpu == (n_gpus-1)) ? n_rows : (i_gpu+1)*gpu_range , i_gpu); } // combine partial neighborhood results from gpus Neighborhood nh; Neighborhood nhhd; std::tie(nh, nhhd) = partials[0]; for (i_gpu=1; i_gpu < n_gpus; ++i_gpu) { Neighborhood partial_nh; Neighborhood partial_nhhd; std::tie(partial_nh, partial_nhhd) = partials[i_gpu]; for (unsigned int i=0; i < n_rows; ++i) { if ((partial_nh[i].second != 0) || (partial_nhhd[i].second != 0)) { nh[i] = partial_nh[i]; nhhd[i] = partial_nhhd[i]; } } } return std::make_tuple(nh, nhhd); } std::vector<unsigned int> clustering_rebased(std::vector<unsigned int> clustering) { std::map<unsigned int, unsigned int> dict; // construct dictionary dict[0] = 0; for (unsigned int i=0; i < clustering.size(); ++i) { unsigned int s = clustering[i]; if (dict.count(s) == 0) { dict[s] = i+1; } } // rebase for (unsigned int& s: clustering) { s = dict[s]; } return clustering; } std::vector<unsigned int> merge_results(std::vector<std::vector<unsigned int>> clusterings , unsigned int max_row) { unsigned int n_results = clusterings.size(); if (n_results == 0) { std::cerr << "error: there are no partial clustering results to merge!" << std::endl; exit(EXIT_FAILURE); } else { if (clusterings[0].size() == 0) { std::cerr << "error: no sampling, nothing to merge" << std::endl; exit(EXIT_FAILURE); } } max_row = ::min(max_row , (unsigned int) clusterings[0].size()); for (unsigned int i=0; i < max_row; ++i) { // collect start points, i.e. cluster assignemnts // of all partial results std::set<unsigned int> start_points; for (unsigned int j=0; j < n_results; ++j) { start_points.insert(clusterings[j][i]); } // not interested in zero-ed states (aka no assignment) if (start_points.count(0) > 0) { start_points.erase(0); } // follow start_points (id = i_state-1), collect all // states and rebase them to min(state) std::set<unsigned int> need_update = start_points; for (unsigned int s: start_points) { unsigned int s_old = 1; while (s != 0 && s_old != s) { s_old = s; s = clusterings[0][s-1]; need_update.insert(s); } } // std::set is guaranteed to be ordered! unsigned int min_s = (*need_update.begin()); for (unsigned int s: need_update) { clusterings[0][s-1] = min_s; } } return clusterings[0]; } std::vector<std::size_t> screening(const std::vector<float>& free_energy , const Neighborhood& nh , const float free_energy_threshold , const float* coords , const std::size_t n_rows , const std::size_t n_cols , const std::vector<std::size_t> initial_clusters) { using Clustering::Tools::min_multiplicator; // data preparation std::size_t first_frame_above_threshold; double sigma2; std::vector<FreeEnergy> fe_sorted; std::vector<std::size_t> prev_clustering; unsigned int prev_max_state; std::tie(prev_clustering , first_frame_above_threshold , sigma2 , fe_sorted , std::ignore , prev_max_state) = prepare_initial_clustering(free_energy , nh , free_energy_threshold , n_rows , initial_clusters); // measure runtime & give some informative output std::chrono::steady_clock::time_point t0 = std::chrono::steady_clock::now(); Clustering::logger(std::cout) << "FE: " << free_energy_threshold << " frames: " << first_frame_above_threshold; float max_dist2 = 4*sigma2; // prepare CUDA environment int n_gpus = get_num_gpus(); std::vector<float*> d_coords_sorted(n_gpus); std::vector<unsigned int*> d_clustering(n_gpus); // sort coords (and previous clustering results) // according to free energies std::vector<float> tmp_coords_sorted(n_rows * n_cols); std::vector<unsigned int> clustering_sorted(n_rows); for (unsigned int i=0; i < n_rows; ++i) { for (unsigned int j=0; j < n_cols; ++j) { tmp_coords_sorted[i*n_cols+j] = coords[fe_sorted[i].first*n_cols+j]; } // intialize energy-sorted clustering results if (i < first_frame_above_threshold) { clustering_sorted[i] = prev_clustering[fe_sorted[i].first]; } } // initialize new (unclustered) frames unsigned int prev_last_frame = std::distance(clustering_sorted.begin() , std::find(clustering_sorted.begin() , clustering_sorted.end() , 0)); for (unsigned int i=prev_last_frame; i < first_frame_above_threshold; ++i) { clustering_sorted[i] = ++prev_max_state; } // computational range of single gpu unsigned int gpu_rng = min_multiplicator(first_frame_above_threshold - prev_last_frame , n_gpus); if (gpu_rng == 0) { // nothing to do, since all frames below threshold were already // below previous threshold return initial_clusters; } int max_shared_mem; // assuming GPUs are of same type with same amount of memory hipDeviceGetAttribute(&max_shared_mem , hipDeviceAttributeMaxSharedMemoryPerBlock , 0); check_error("getting max shared mem size"); unsigned int shared_mem = 2 * BSIZE_SCR * n_cols * sizeof(float); //TODO!!!!!!!!! check shared_mem + const(shared_mem) < max_shared_mem unsigned int block_rng, i_from, i_to, i, i_gpu; // initialize GPUs for (i_gpu=0; i_gpu < n_gpus; ++i_gpu) { hipSetDevice(i_gpu); // allocate memory on GPUs hipMalloc((void**) &d_coords_sorted[i_gpu] , sizeof(float) * n_rows * n_cols); check_error("after malloc"); hipMalloc((void**) &d_clustering[i_gpu] , sizeof(unsigned int) * n_rows); check_error("after malloc"); // copy sorted coords and previous clustering results to GPUs hipMemcpy(d_coords_sorted[i_gpu] , tmp_coords_sorted.data() , sizeof(float) * n_rows * n_cols , hipMemcpyHostToDevice); check_error("after memcopy of sorted coords"); } // change state names in clustering to conform to lowest indices clustering_sorted = clustering_rebased(clustering_sorted); // fill zero-set indices with their own index for (unsigned int i=0; i < first_frame_above_threshold; ++i) { if (clustering_sorted[i] == 0) { clustering_sorted[i] = i+1; } } std::vector<unsigned int> clustering_sorted_old; while (clustering_sorted_old != clustering_sorted) { // cache old results clustering_sorted_old = clustering_sorted; // (re-)cluster #pragma omp parallel for\ default(none)\ private(i,i_gpu,block_rng,i_from,i_to)\ firstprivate(n_gpus,n_rows,n_cols,gpu_rng,max_dist2,\ prev_last_frame,\ shared_mem,first_frame_above_threshold)\ shared(d_coords_sorted,d_clustering,\ tmp_coords_sorted,clustering_sorted)\ num_threads(n_gpus) for (i_gpu=0; i_gpu < n_gpus; ++i_gpu) { hipSetDevice(i_gpu); check_error("after setting gpu device"); hipMemcpy(d_clustering[i_gpu] , clustering_sorted.data() , sizeof(unsigned int) * n_rows , hipMemcpyHostToDevice); check_error("after memcopy of prev clustering"); i_from = prev_last_frame + i_gpu * gpu_rng; i_to = (i_gpu == (n_gpus-1)) ? first_frame_above_threshold : prev_last_frame + (i_gpu+1) * gpu_rng; block_rng = min_multiplicator(i_to-i_from , BSIZE_SCR); for (i=0; i*BSIZE_SCR < first_frame_above_threshold; ++i) { hipLaunchKernelGGL(( Clustering::Density::CUDA::Kernel::screening) , dim3(block_rng) , dim3(BSIZE_SCR) , shared_mem , 0, i*BSIZE_SCR , d_coords_sorted[i_gpu] , ::min(first_frame_above_threshold, n_rows) , n_cols , max_dist2 , d_clustering[i_gpu] , i_from , i_to); } hipDeviceSynchronize(); check_error("after kernel loop"); } // collect & merge clustering results from GPUs std::vector<std::vector<unsigned int>> clstr_results(n_gpus , std::vector<unsigned int>(n_rows)); for (int i_gpu=0; i_gpu < n_gpus; ++i_gpu) { hipMemcpy(clstr_results[i_gpu].data() , d_clustering[i_gpu] , sizeof(unsigned int) * n_rows , hipMemcpyDeviceToHost); } clustering_sorted = merge_results(clstr_results , first_frame_above_threshold); // update references by comparing old to new results std::unordered_map<unsigned int, unsigned int> dict; dict[0] = 0; for (i=0; i < first_frame_above_threshold; ++i) { unsigned int state_old = clustering_sorted_old[i]; unsigned int state_new = clustering_sorted[i]; if (dict.count(state_old) == 0) { dict[state_old] = ::min(state_old, state_new); } else { dict[state_old] = ::min(dict[state_old], state_new); } } for (unsigned int& s: clustering_sorted) { s = dict[s]; } } // end while // cleanup CUDA environment for (int i_gpu=0; i_gpu < n_gpus; ++i_gpu) { hipFree(d_coords_sorted[i_gpu]); hipFree(d_clustering[i_gpu]); } // convert state trajectory from // FE-sorted order to original order std::vector<std::size_t> clustering(n_rows); for (unsigned int i=0; i < n_rows; ++i) { clustering[fe_sorted[i].first] = clustering_sorted[i]; } // final output std::chrono::steady_clock::time_point t1 = std::chrono::steady_clock::now(); Clustering::logger(std::cout) << " runtime: " << std::chrono::duration_cast <std::chrono::seconds>(t1-t0).count() << " secs" << std::endl; return normalized_cluster_names(first_frame_above_threshold , clustering , fe_sorted); } }}} // end Clustering::Density::CUDA
6f155f3d268a4a6d49aa6aac23d12ec08aa1f86b.cu
#include "tools.hpp" #include "density_clustering_cuda.hpp" #include "density_clustering_cuda_kernels.hpp" #include "logger.hpp" #include <algorithm> #include <unordered_set> #include <unordered_map> #include <chrono> #include <cuda.h> #include <omp.h> namespace Clustering { namespace Density { namespace CUDA { void check_error(std::string msg) { cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { std::cerr << "CUDA error: " << msg << "\n" << cudaGetErrorString(err) << std::endl; exit(EXIT_FAILURE); } } int get_num_gpus() { int n_gpus; cudaGetDeviceCount(&n_gpus); check_error("trying to get number of available GPUs"); if (n_gpus == 0) { std::cerr << "error: no CUDA-compatible GPUs found" << std::endl; exit(EXIT_FAILURE); } else { return n_gpus; } } Pops calculate_populations_per_gpu(const float* coords , std::size_t n_rows , std::size_t n_cols , std::vector<float> radii , std::size_t i_from , std::size_t i_to , int i_gpu) { using Clustering::Tools::min_multiplicator; ASSUME_ALIGNED(coords); unsigned int n_radii = radii.size(); std::vector<float> rad2(n_radii); for (std::size_t i=0; i < n_radii; ++i) { rad2[i] = radii[i]*radii[i]; } // GPU setup cudaSetDevice(i_gpu); float* d_coords; float* d_rad2; unsigned int* d_pops; cudaMalloc((void**) &d_coords , sizeof(float) * n_rows * n_cols); check_error("pop-calc device mallocs (coords)"); cudaMalloc((void**) &d_pops , sizeof(unsigned int) * n_rows * n_radii); check_error("pop-calc device mallocs (pops)"); cudaMalloc((void**) &d_rad2 , sizeof(float) * n_radii); check_error("pop-calc device mallocs (rad2)"); cudaMemset(d_pops , 0 , sizeof(unsigned int) * n_rows * n_radii); check_error("pop-calc memset"); cudaMemcpy(d_coords , coords , sizeof(float) * n_rows * n_cols , cudaMemcpyHostToDevice); cudaMemcpy(d_rad2 , rad2.data() , sizeof(float) * n_radii , cudaMemcpyHostToDevice); check_error("pop-calc mem copies"); int max_shared_mem; cudaDeviceGetAttribute(&max_shared_mem , cudaDevAttrMaxSharedMemoryPerBlock , i_gpu); check_error("getting max shared mem size"); unsigned int block_size = BSIZE_POPS; unsigned int shared_mem = 2 * block_size * n_cols * sizeof(float); if (shared_mem > max_shared_mem) { std::cerr << "error: max. shared mem per block too small on this GPU.\n" << " either reduce BSIZE_POPS or get a better GPU." << std::endl; exit(EXIT_FAILURE); } unsigned int block_rng = min_multiplicator(i_to-i_from, block_size); Clustering::logger(std::cout) << "# blocks needed: " << block_rng << std::endl; for (unsigned int i=0; i*block_size < n_rows; ++i) { Clustering::Density::CUDA::Kernel::population_count <<< block_rng , block_size , shared_mem >>> (i*block_size , d_coords , n_rows , n_cols , d_rad2 , n_radii , d_pops , i_from , i_to); } cudaDeviceSynchronize(); check_error("after kernel loop"); // get partial results from GPU std::vector<unsigned int> partial_pops(n_rows*n_radii); cudaMemcpy(partial_pops.data() , d_pops , sizeof(unsigned int) * n_rows * n_radii , cudaMemcpyDeviceToHost); // sort into resulting pops Pops pops; for (unsigned int r=0; r < n_radii; ++r) { pops[radii[r]].resize(n_rows, 0); for (unsigned int i=i_from; i < i_to; ++i) { pops[radii[r]][i] = partial_pops[r*n_rows+i]; } } cudaFree(d_coords); cudaFree(d_rad2); cudaFree(d_pops); return pops; } Pops calculate_populations(const float* coords , const std::size_t n_rows , const std::size_t n_cols , std::vector<float> radii) { using Clustering::Tools::dim1_sorted_coords; using Clustering::Tools::boxlimits; ASSUME_ALIGNED(coords); std::sort(radii.begin(), radii.end(), std::greater<float>()); int n_gpus = get_num_gpus(); int gpu_range = n_rows / n_gpus; int i; std::vector<Pops> partial_pops(n_gpus); #pragma omp parallel for default(none)\ private(i)\ firstprivate(n_gpus,n_rows,n_cols,gpu_range)\ shared(partial_pops,radii,coords)\ num_threads(n_gpus)\ schedule(dynamic,1) for (i=0; i < n_gpus; ++i) { // compute partial populations in parallel // on all available GPUs partial_pops[i] = calculate_populations_per_gpu(coords , n_rows , n_cols , radii , i*gpu_range , i == (n_gpus-1) ? n_rows : (i+1)*gpu_range , i); } Pops pops; // combine pops for (float r: radii) { pops[r].resize(n_rows, 0); for (i=0; i < n_rows; ++i) { for (unsigned int i_gpu=0; i_gpu < n_gpus; ++i_gpu) { pops[r][i] += partial_pops[i_gpu][r][i]; } } } return pops; } std::tuple<Neighborhood, Neighborhood> nearest_neighbors_per_gpu(const float* coords , const std::size_t n_rows , const std::size_t n_cols , const std::vector<float>& free_energy , std::size_t i_from , std::size_t i_to , int i_gpu) { using Clustering::Tools::min_multiplicator; ASSUME_ALIGNED(coords); // GPU setup cudaSetDevice(i_gpu); float* d_coords; float* d_fe; unsigned int* d_nh_nhhd_ndx; float* d_nh_nhhd_dist; // allocate memory cudaMalloc((void**) &d_coords , sizeof(float) * n_rows * n_cols); cudaMalloc((void**) &d_fe , sizeof(float) * n_rows); cudaMalloc((void**) &d_nh_nhhd_ndx , sizeof(unsigned int) * n_rows * 2); cudaMalloc((void**) &d_nh_nhhd_dist , sizeof(float) * n_rows * 2); // initialize all min dists and indices to zero cudaMemset(d_nh_nhhd_ndx , 0 , sizeof(unsigned int) * n_rows * 2); cudaMemset(d_nh_nhhd_dist , 0 , sizeof(float) * n_rows * 2); // copy coordinates and free energies to GPU cudaMemcpy(d_coords , coords , sizeof(float) * n_rows * n_cols , cudaMemcpyHostToDevice); cudaMemcpy(d_fe , free_energy.data() , sizeof(float) * n_rows , cudaMemcpyHostToDevice); int max_shared_mem; cudaDeviceGetAttribute(&max_shared_mem , cudaDevAttrMaxSharedMemoryPerBlock , i_gpu); check_error("retrieving max shared mem"); unsigned int block_size = BSIZE_NH; // compute necessary size of shared memory of block for // coordinates (2*n_cols) and free energies (1 col) unsigned int shared_mem = (2*n_cols + 1) * block_size * sizeof(float); if (shared_mem > max_shared_mem) { std::cerr << "error: max. shared mem per block too small on this GPU.\n" << " either reduce block_size for NN search or get a " << "better GPU." << std::endl; exit(EXIT_FAILURE); } unsigned int block_rng = min_multiplicator(i_to-i_from, block_size); for (unsigned int i=0; i*block_size < n_rows; ++i) { Clustering::Density::CUDA::Kernel::nearest_neighbor_search <<< block_rng , block_size , shared_mem >>> (i*block_size , d_coords , n_rows , n_cols , d_fe , d_nh_nhhd_ndx , d_nh_nhhd_dist , i_from , i_to); } cudaDeviceSynchronize(); check_error("after kernel loop"); // initialize neighborhoods (on host) Neighborhood nh; Neighborhood nhhd; // collect results from GPU std::vector<unsigned int> buf_ndx(n_rows * 2); std::vector<float> buf_dist(n_rows * 2); cudaMemcpy(buf_ndx.data() , d_nh_nhhd_ndx , sizeof(unsigned int) * n_rows * 2 , cudaMemcpyDeviceToHost); cudaMemcpy(buf_dist.data() , d_nh_nhhd_dist , sizeof(float) * n_rows * 2 , cudaMemcpyDeviceToHost); for (unsigned int i=0; i < n_rows; ++i) { nh[i] = {buf_ndx[i] , buf_dist[i]}; nhhd[i] = {buf_ndx[n_rows+i] , buf_dist[n_rows+i]}; } // device cleanup cudaFree(d_coords); cudaFree(d_fe); cudaFree(d_nh_nhhd_ndx); cudaFree(d_nh_nhhd_dist); // results return std::make_tuple(nh, nhhd); } std::tuple<Neighborhood, Neighborhood> nearest_neighbors(const float* coords , const std::size_t n_rows , const std::size_t n_cols , const std::vector<float>& free_energy) { int n_gpus = get_num_gpus(); std::vector<std::tuple<Neighborhood, Neighborhood>> partials(n_gpus); unsigned int gpu_range = n_rows / n_gpus; unsigned int i_gpu; #pragma omp parallel for default(none)\ private(i_gpu)\ firstprivate(n_gpus,n_rows,n_cols,gpu_range)\ shared(partials,coords,free_energy)\ num_threads(n_gpus) for (i_gpu=0; i_gpu < n_gpus; ++i_gpu) { partials[i_gpu] = nearest_neighbors_per_gpu(coords , n_rows , n_cols , free_energy , i_gpu*gpu_range , (i_gpu == (n_gpus-1)) ? n_rows : (i_gpu+1)*gpu_range , i_gpu); } // combine partial neighborhood results from gpus Neighborhood nh; Neighborhood nhhd; std::tie(nh, nhhd) = partials[0]; for (i_gpu=1; i_gpu < n_gpus; ++i_gpu) { Neighborhood partial_nh; Neighborhood partial_nhhd; std::tie(partial_nh, partial_nhhd) = partials[i_gpu]; for (unsigned int i=0; i < n_rows; ++i) { if ((partial_nh[i].second != 0) || (partial_nhhd[i].second != 0)) { nh[i] = partial_nh[i]; nhhd[i] = partial_nhhd[i]; } } } return std::make_tuple(nh, nhhd); } std::vector<unsigned int> clustering_rebased(std::vector<unsigned int> clustering) { std::map<unsigned int, unsigned int> dict; // construct dictionary dict[0] = 0; for (unsigned int i=0; i < clustering.size(); ++i) { unsigned int s = clustering[i]; if (dict.count(s) == 0) { dict[s] = i+1; } } // rebase for (unsigned int& s: clustering) { s = dict[s]; } return clustering; } std::vector<unsigned int> merge_results(std::vector<std::vector<unsigned int>> clusterings , unsigned int max_row) { unsigned int n_results = clusterings.size(); if (n_results == 0) { std::cerr << "error: there are no partial clustering results to merge!" << std::endl; exit(EXIT_FAILURE); } else { if (clusterings[0].size() == 0) { std::cerr << "error: no sampling, nothing to merge" << std::endl; exit(EXIT_FAILURE); } } max_row = std::min(max_row , (unsigned int) clusterings[0].size()); for (unsigned int i=0; i < max_row; ++i) { // collect start points, i.e. cluster assignemnts // of all partial results std::set<unsigned int> start_points; for (unsigned int j=0; j < n_results; ++j) { start_points.insert(clusterings[j][i]); } // not interested in zero-ed states (aka no assignment) if (start_points.count(0) > 0) { start_points.erase(0); } // follow start_points (id = i_state-1), collect all // states and rebase them to min(state) std::set<unsigned int> need_update = start_points; for (unsigned int s: start_points) { unsigned int s_old = 1; while (s != 0 && s_old != s) { s_old = s; s = clusterings[0][s-1]; need_update.insert(s); } } // std::set is guaranteed to be ordered! unsigned int min_s = (*need_update.begin()); for (unsigned int s: need_update) { clusterings[0][s-1] = min_s; } } return clusterings[0]; } std::vector<std::size_t> screening(const std::vector<float>& free_energy , const Neighborhood& nh , const float free_energy_threshold , const float* coords , const std::size_t n_rows , const std::size_t n_cols , const std::vector<std::size_t> initial_clusters) { using Clustering::Tools::min_multiplicator; // data preparation std::size_t first_frame_above_threshold; double sigma2; std::vector<FreeEnergy> fe_sorted; std::vector<std::size_t> prev_clustering; unsigned int prev_max_state; std::tie(prev_clustering , first_frame_above_threshold , sigma2 , fe_sorted , std::ignore , prev_max_state) = prepare_initial_clustering(free_energy , nh , free_energy_threshold , n_rows , initial_clusters); // measure runtime & give some informative output std::chrono::steady_clock::time_point t0 = std::chrono::steady_clock::now(); Clustering::logger(std::cout) << "FE: " << free_energy_threshold << " frames: " << first_frame_above_threshold; float max_dist2 = 4*sigma2; // prepare CUDA environment int n_gpus = get_num_gpus(); std::vector<float*> d_coords_sorted(n_gpus); std::vector<unsigned int*> d_clustering(n_gpus); // sort coords (and previous clustering results) // according to free energies std::vector<float> tmp_coords_sorted(n_rows * n_cols); std::vector<unsigned int> clustering_sorted(n_rows); for (unsigned int i=0; i < n_rows; ++i) { for (unsigned int j=0; j < n_cols; ++j) { tmp_coords_sorted[i*n_cols+j] = coords[fe_sorted[i].first*n_cols+j]; } // intialize energy-sorted clustering results if (i < first_frame_above_threshold) { clustering_sorted[i] = prev_clustering[fe_sorted[i].first]; } } // initialize new (unclustered) frames unsigned int prev_last_frame = std::distance(clustering_sorted.begin() , std::find(clustering_sorted.begin() , clustering_sorted.end() , 0)); for (unsigned int i=prev_last_frame; i < first_frame_above_threshold; ++i) { clustering_sorted[i] = ++prev_max_state; } // computational range of single gpu unsigned int gpu_rng = min_multiplicator(first_frame_above_threshold - prev_last_frame , n_gpus); if (gpu_rng == 0) { // nothing to do, since all frames below threshold were already // below previous threshold return initial_clusters; } int max_shared_mem; // assuming GPUs are of same type with same amount of memory cudaDeviceGetAttribute(&max_shared_mem , cudaDevAttrMaxSharedMemoryPerBlock , 0); check_error("getting max shared mem size"); unsigned int shared_mem = 2 * BSIZE_SCR * n_cols * sizeof(float); //TODO!!!!!!!!! check shared_mem + const(shared_mem) < max_shared_mem unsigned int block_rng, i_from, i_to, i, i_gpu; // initialize GPUs for (i_gpu=0; i_gpu < n_gpus; ++i_gpu) { cudaSetDevice(i_gpu); // allocate memory on GPUs cudaMalloc((void**) &d_coords_sorted[i_gpu] , sizeof(float) * n_rows * n_cols); check_error("after malloc"); cudaMalloc((void**) &d_clustering[i_gpu] , sizeof(unsigned int) * n_rows); check_error("after malloc"); // copy sorted coords and previous clustering results to GPUs cudaMemcpy(d_coords_sorted[i_gpu] , tmp_coords_sorted.data() , sizeof(float) * n_rows * n_cols , cudaMemcpyHostToDevice); check_error("after memcopy of sorted coords"); } // change state names in clustering to conform to lowest indices clustering_sorted = clustering_rebased(clustering_sorted); // fill zero-set indices with their own index for (unsigned int i=0; i < first_frame_above_threshold; ++i) { if (clustering_sorted[i] == 0) { clustering_sorted[i] = i+1; } } std::vector<unsigned int> clustering_sorted_old; while (clustering_sorted_old != clustering_sorted) { // cache old results clustering_sorted_old = clustering_sorted; // (re-)cluster #pragma omp parallel for\ default(none)\ private(i,i_gpu,block_rng,i_from,i_to)\ firstprivate(n_gpus,n_rows,n_cols,gpu_rng,max_dist2,\ prev_last_frame,\ shared_mem,first_frame_above_threshold)\ shared(d_coords_sorted,d_clustering,\ tmp_coords_sorted,clustering_sorted)\ num_threads(n_gpus) for (i_gpu=0; i_gpu < n_gpus; ++i_gpu) { cudaSetDevice(i_gpu); check_error("after setting gpu device"); cudaMemcpy(d_clustering[i_gpu] , clustering_sorted.data() , sizeof(unsigned int) * n_rows , cudaMemcpyHostToDevice); check_error("after memcopy of prev clustering"); i_from = prev_last_frame + i_gpu * gpu_rng; i_to = (i_gpu == (n_gpus-1)) ? first_frame_above_threshold : prev_last_frame + (i_gpu+1) * gpu_rng; block_rng = min_multiplicator(i_to-i_from , BSIZE_SCR); for (i=0; i*BSIZE_SCR < first_frame_above_threshold; ++i) { Clustering::Density::CUDA::Kernel::screening <<< block_rng , BSIZE_SCR , shared_mem >>> (i*BSIZE_SCR , d_coords_sorted[i_gpu] , std::min(first_frame_above_threshold, n_rows) , n_cols , max_dist2 , d_clustering[i_gpu] , i_from , i_to); } cudaDeviceSynchronize(); check_error("after kernel loop"); } // collect & merge clustering results from GPUs std::vector<std::vector<unsigned int>> clstr_results(n_gpus , std::vector<unsigned int>(n_rows)); for (int i_gpu=0; i_gpu < n_gpus; ++i_gpu) { cudaMemcpy(clstr_results[i_gpu].data() , d_clustering[i_gpu] , sizeof(unsigned int) * n_rows , cudaMemcpyDeviceToHost); } clustering_sorted = merge_results(clstr_results , first_frame_above_threshold); // update references by comparing old to new results std::unordered_map<unsigned int, unsigned int> dict; dict[0] = 0; for (i=0; i < first_frame_above_threshold; ++i) { unsigned int state_old = clustering_sorted_old[i]; unsigned int state_new = clustering_sorted[i]; if (dict.count(state_old) == 0) { dict[state_old] = std::min(state_old, state_new); } else { dict[state_old] = std::min(dict[state_old], state_new); } } for (unsigned int& s: clustering_sorted) { s = dict[s]; } } // end while // cleanup CUDA environment for (int i_gpu=0; i_gpu < n_gpus; ++i_gpu) { cudaFree(d_coords_sorted[i_gpu]); cudaFree(d_clustering[i_gpu]); } // convert state trajectory from // FE-sorted order to original order std::vector<std::size_t> clustering(n_rows); for (unsigned int i=0; i < n_rows; ++i) { clustering[fe_sorted[i].first] = clustering_sorted[i]; } // final output std::chrono::steady_clock::time_point t1 = std::chrono::steady_clock::now(); Clustering::logger(std::cout) << " runtime: " << std::chrono::duration_cast <std::chrono::seconds>(t1-t0).count() << " secs" << std::endl; return normalized_cluster_names(first_frame_above_threshold , clustering , fe_sorted); } }}} // end Clustering::Density::CUDA
91a73ccfc2de87d2ec0c1026b15808586c83455f.hip
// !!! This is a file automatically generated by hipify!!! #include <demo_util.h> #include <cuda_util.h> #include <math.h> #include <string.h> /* For atoi */ #define PI 3.14159265358979323846264338327 #ifdef USE_GPU /* From CG kernels */ int get_N(); int get_blocksPerGrid(); double dot_norm(int N, double *a, double *dev_a, double *dev_partial_c); double dot_gpu(int N, double *a, double *b, double *dev_a, double *dev_b, double *dev_partial_c); double cg_loop(int N, double alpha, double *pk, double *uk, double *rk, double *wk, double *dev_pk, double *dev_uk, double *dev_rk, double *dev_wk, double *dev_partial_c, double *dev_partial_d, double *zk_norm); #endif double utrue(double x) { double u; double pi2; pi2 = 2*PI; u = cos(pi2*x); return u; } double rhs(double x) { double fx; double pi2; pi2 = 2*PI; fx = -(pi2)*(pi2)*cos(pi2*x); return fx; } int main(int argc, char** argv) { /* Data arrays */ double a,b; int n_global; double *x, *F, *B; /* Iterative variables */ double tol; int kmax,j,k; double range[2]; /* Stuff that was handled by MPI */ int my_rank = 0; set_rank(my_rank); /* Used in printing */ read_loglevel(argc,argv); #ifdef USE_GPU printf("Using GPU\n"); #endif if (my_rank == 0) { /* Input */ int err; int mp; read_int(argc,argv, "-m", &mp, &err); if (err > 0) { print_global("Command line argument '-m' not found\n"); exit(0); } #ifdef USE_GPU n_global = get_N(); if (n_global != (1 << mp)) { print_global("1<< mp != N (defined in cg_kernel.cu)\n"); exit(0); } #else n_global = 1 << mp; /* Number of sub-intervals used for integration */ #endif read_int(argc,argv, "--kmax", &kmax, &err); if (err > 0) { print_global("Command line argument '--kmax' not found\n"); exit(0); } read_double(argc,argv, "--tol", &tol, &err); if (err > 0) { print_global("Command line argument '--tol' not found\n"); exit(0); } /* Hardwire values */ a = 0; b = 1; } /* Setup mesh */ range[0] = a; range[1] = b; int m = n_global; /* Number of panels in each section */ double h = (range[1] - range[0])/((double)m); double h2; h2 = h*h; /* --------------------------------------------------------------- Set up right hand side --------------------------------------------------------------- */ zeros_array(m+2,&B); /* Include ghost values */ zeros_array(m+2,&F); linspace_array(range[0]-h/2.0,range[1]+h/2.0,m+2,&x); /* Left edge BC */ double u0 = utrue(range[0]); B[0] = 2*u0; /* Right edge BC */ double u1 = utrue(range[1]); B[m+1] = 2*u1; /* Compute right hand side */ for(j = 1; j < m+1; j++) { F[j] = rhs(x[j]) - (B[j-1] - 2*B[j] + B[j+1])/h2; } /* ---------------------------------------------------------------- Set up arrays and other vars needed for iterative method ---------------------------------------------------------------- */ double *pk, *uk, *rk, *wk; zeros_array(m+2,&pk); zeros_array(m+2,&uk); zeros_array(m+2,&rk); zeros_array(m+2,&wk); double alpha,beta; int it_cnt; double res; /* ---------------------------------------------------------------- Start iterations ---------------------------------------------------------------- */ for(j = 1; j < m+1; j++) { /* Compute residual rk - F - A*uk */ rk[j] = F[j] - (uk[j-1] - 2*uk[j] + uk[j+1])/h2; pk[j] = rk[j]; } double bv[2] = {0,0}; #ifdef USE_GPU int bpg = get_blocksPerGrid(); double *dev_pk, *dev_uk, *dev_rk, *dev_wk; CHECK(hipMalloc((void**) &dev_pk, m*sizeof(double))); CHECK(hipMalloc((void**) &dev_uk, m*sizeof(double))); CHECK(hipMalloc((void**) &dev_rk, m*sizeof(double))); CHECK(hipMalloc((void**) &dev_wk, m*sizeof(double))); double *dev_partial_c, *dev_partial_d; CHECK(hipMalloc((void**) &dev_partial_c, bpg*sizeof(double) ) ); CHECK(hipMalloc((void**) &dev_partial_d, bpg*sizeof(double) ) ); bv[0] = dot_norm(m, &rk[1], dev_rk,dev_partial_c); #else for(j = 1; j < m+1; j++) { bv[0] += rk[j]*rk[j]; } #endif for(k = 0; k < kmax; k++) { /* Left edge */ pk[0] = -pk[1]; /* Right edge */ pk[m+1] = -pk[m]; for(j = 1; j < m+1; j++) { wk[j] = (pk[j-1] - 2*pk[j] + pk[j+1])/h2; } double av[2] = {0,0}; av[0] = bv[0]; for(j = 1; j < m+1; j++) { av[1] += pk[j]*wk[j]; } alpha = av[0]/av[1]; double norm_zk = 0; bv[1] = av[0]; #ifdef USE_GPU bv[0] = cg_loop(m, alpha, &pk[1], &uk[1], &rk[1], &wk[1], dev_pk, dev_uk,dev_rk, dev_wk, dev_partial_c, dev_partial_d, &norm_zk); #else bv[0] = 0; double zk; for(j = 1; j < m+1; j++) { zk = alpha*pk[j]; uk[j] = uk[j] + zk; rk[j] = rk[j] - alpha*wk[j]; bv[0] += rk[j]*rk[j]; norm_zk = fabs(zk) > norm_zk ? fabs(zk) : norm_zk; } #endif beta = bv[0]/bv[1]; /* (rkp1 dot rkp1)/(rk dot rk) */ print_info("%8d %16.8e\n",k,norm_zk); /* save results for output */ it_cnt = k+1; res = norm_zk; if (norm_zk < tol) { break; } for(j = 1; j < m+1; j++) { pk[j] = rk[j] + beta*pk[j]; } } /* ---------------------------------------------------------------- Calculate error and report results ---------------------------------------------------------------- */ double err[3] = {0,0,0}; for(j = 1; j < m+1; j++) { double udiff = uk[j] - utrue(x[j]); err[0] += fabs(udiff)*h; err[1] += fabs(udiff*udiff)*h; err[2] = fabs(udiff) > err[2] ? fabs(udiff) : err[2]; } err[1] = sqrt(err[1]); /* 2-norm */ print_essential("%10d %10d %12.4e %12.4e %12.4e %12.4e\n",n_global,it_cnt, res, err[0],err[1],err[2]); delete_array((void**) &B); delete_array((void**) &F); delete_array((void**) &x); delete_array((void**) &pk); delete_array((void**) &uk); delete_array((void**) &rk); delete_array((void**) &wk); #ifdef USE_GPU /* free memory on the gpu side */ CHECK(hipFree(dev_pk)); CHECK(hipFree(dev_uk)); CHECK(hipFree(dev_rk)); CHECK(hipFree(dev_wk)); CHECK(hipFree(dev_partial_c)); CHECK(hipFree(dev_partial_d)); #endif return 0; }
91a73ccfc2de87d2ec0c1026b15808586c83455f.cu
#include <demo_util.h> #include <cuda_util.h> #include <math.h> #include <string.h> /* For atoi */ #define PI 3.14159265358979323846264338327 #ifdef USE_GPU /* From CG kernels */ int get_N(); int get_blocksPerGrid(); double dot_norm(int N, double *a, double *dev_a, double *dev_partial_c); double dot_gpu(int N, double *a, double *b, double *dev_a, double *dev_b, double *dev_partial_c); double cg_loop(int N, double alpha, double *pk, double *uk, double *rk, double *wk, double *dev_pk, double *dev_uk, double *dev_rk, double *dev_wk, double *dev_partial_c, double *dev_partial_d, double *zk_norm); #endif double utrue(double x) { double u; double pi2; pi2 = 2*PI; u = cos(pi2*x); return u; } double rhs(double x) { double fx; double pi2; pi2 = 2*PI; fx = -(pi2)*(pi2)*cos(pi2*x); return fx; } int main(int argc, char** argv) { /* Data arrays */ double a,b; int n_global; double *x, *F, *B; /* Iterative variables */ double tol; int kmax,j,k; double range[2]; /* Stuff that was handled by MPI */ int my_rank = 0; set_rank(my_rank); /* Used in printing */ read_loglevel(argc,argv); #ifdef USE_GPU printf("Using GPU\n"); #endif if (my_rank == 0) { /* Input */ int err; int mp; read_int(argc,argv, "-m", &mp, &err); if (err > 0) { print_global("Command line argument '-m' not found\n"); exit(0); } #ifdef USE_GPU n_global = get_N(); if (n_global != (1 << mp)) { print_global("1<< mp != N (defined in cg_kernel.cu)\n"); exit(0); } #else n_global = 1 << mp; /* Number of sub-intervals used for integration */ #endif read_int(argc,argv, "--kmax", &kmax, &err); if (err > 0) { print_global("Command line argument '--kmax' not found\n"); exit(0); } read_double(argc,argv, "--tol", &tol, &err); if (err > 0) { print_global("Command line argument '--tol' not found\n"); exit(0); } /* Hardwire values */ a = 0; b = 1; } /* Setup mesh */ range[0] = a; range[1] = b; int m = n_global; /* Number of panels in each section */ double h = (range[1] - range[0])/((double)m); double h2; h2 = h*h; /* --------------------------------------------------------------- Set up right hand side --------------------------------------------------------------- */ zeros_array(m+2,&B); /* Include ghost values */ zeros_array(m+2,&F); linspace_array(range[0]-h/2.0,range[1]+h/2.0,m+2,&x); /* Left edge BC */ double u0 = utrue(range[0]); B[0] = 2*u0; /* Right edge BC */ double u1 = utrue(range[1]); B[m+1] = 2*u1; /* Compute right hand side */ for(j = 1; j < m+1; j++) { F[j] = rhs(x[j]) - (B[j-1] - 2*B[j] + B[j+1])/h2; } /* ---------------------------------------------------------------- Set up arrays and other vars needed for iterative method ---------------------------------------------------------------- */ double *pk, *uk, *rk, *wk; zeros_array(m+2,&pk); zeros_array(m+2,&uk); zeros_array(m+2,&rk); zeros_array(m+2,&wk); double alpha,beta; int it_cnt; double res; /* ---------------------------------------------------------------- Start iterations ---------------------------------------------------------------- */ for(j = 1; j < m+1; j++) { /* Compute residual rk - F - A*uk */ rk[j] = F[j] - (uk[j-1] - 2*uk[j] + uk[j+1])/h2; pk[j] = rk[j]; } double bv[2] = {0,0}; #ifdef USE_GPU int bpg = get_blocksPerGrid(); double *dev_pk, *dev_uk, *dev_rk, *dev_wk; CHECK(cudaMalloc((void**) &dev_pk, m*sizeof(double))); CHECK(cudaMalloc((void**) &dev_uk, m*sizeof(double))); CHECK(cudaMalloc((void**) &dev_rk, m*sizeof(double))); CHECK(cudaMalloc((void**) &dev_wk, m*sizeof(double))); double *dev_partial_c, *dev_partial_d; CHECK(cudaMalloc((void**) &dev_partial_c, bpg*sizeof(double) ) ); CHECK(cudaMalloc((void**) &dev_partial_d, bpg*sizeof(double) ) ); bv[0] = dot_norm(m, &rk[1], dev_rk,dev_partial_c); #else for(j = 1; j < m+1; j++) { bv[0] += rk[j]*rk[j]; } #endif for(k = 0; k < kmax; k++) { /* Left edge */ pk[0] = -pk[1]; /* Right edge */ pk[m+1] = -pk[m]; for(j = 1; j < m+1; j++) { wk[j] = (pk[j-1] - 2*pk[j] + pk[j+1])/h2; } double av[2] = {0,0}; av[0] = bv[0]; for(j = 1; j < m+1; j++) { av[1] += pk[j]*wk[j]; } alpha = av[0]/av[1]; double norm_zk = 0; bv[1] = av[0]; #ifdef USE_GPU bv[0] = cg_loop(m, alpha, &pk[1], &uk[1], &rk[1], &wk[1], dev_pk, dev_uk,dev_rk, dev_wk, dev_partial_c, dev_partial_d, &norm_zk); #else bv[0] = 0; double zk; for(j = 1; j < m+1; j++) { zk = alpha*pk[j]; uk[j] = uk[j] + zk; rk[j] = rk[j] - alpha*wk[j]; bv[0] += rk[j]*rk[j]; norm_zk = fabs(zk) > norm_zk ? fabs(zk) : norm_zk; } #endif beta = bv[0]/bv[1]; /* (rkp1 dot rkp1)/(rk dot rk) */ print_info("%8d %16.8e\n",k,norm_zk); /* save results for output */ it_cnt = k+1; res = norm_zk; if (norm_zk < tol) { break; } for(j = 1; j < m+1; j++) { pk[j] = rk[j] + beta*pk[j]; } } /* ---------------------------------------------------------------- Calculate error and report results ---------------------------------------------------------------- */ double err[3] = {0,0,0}; for(j = 1; j < m+1; j++) { double udiff = uk[j] - utrue(x[j]); err[0] += fabs(udiff)*h; err[1] += fabs(udiff*udiff)*h; err[2] = fabs(udiff) > err[2] ? fabs(udiff) : err[2]; } err[1] = sqrt(err[1]); /* 2-norm */ print_essential("%10d %10d %12.4e %12.4e %12.4e %12.4e\n",n_global,it_cnt, res, err[0],err[1],err[2]); delete_array((void**) &B); delete_array((void**) &F); delete_array((void**) &x); delete_array((void**) &pk); delete_array((void**) &uk); delete_array((void**) &rk); delete_array((void**) &wk); #ifdef USE_GPU /* free memory on the gpu side */ CHECK(cudaFree(dev_pk)); CHECK(cudaFree(dev_uk)); CHECK(cudaFree(dev_rk)); CHECK(cudaFree(dev_wk)); CHECK(cudaFree(dev_partial_c)); CHECK(cudaFree(dev_partial_d)); #endif return 0; }
13f8060c99cd826d47708e2ab3cd1f5c98b24ac7.hip
// !!! This is a file automatically generated by hipify!!! //pass //--blockDim=64 --gridDim=64 --no-inline #include <hip/hip_runtime.h> #include <stdio.h> #define N 2 __device__ void bar(int* q) { } __global__ void foo(int* p) { __shared__ int A[10]; bar(p); bar(A); }
13f8060c99cd826d47708e2ab3cd1f5c98b24ac7.cu
//pass //--blockDim=64 --gridDim=64 --no-inline #include <cuda.h> #include <stdio.h> #define N 2 __device__ void bar(int* q) { } __global__ void foo(int* p) { __shared__ int A[10]; bar(p); bar(A); }
625e9feb9bb41e0f5e712f52593c799275033461.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) Facebook, Inc. and its affiliates. * All rights reserved. * This source code is licensed under the BSD-style license found in the * LICENSE file in the root directory of this source tree. */ #include "fbgemm_gpu/layout_transform_ops.cuh" #include "fbgemm_gpu/sparse_ops.h" #include <ATen/ATen.h> #include <ATen/core/op_registration/op_registration.h> #include <ATen/hip/HIPContext.h> #include <ATen/hip/Exceptions.h> #include <ATen/hip/impl/HIPGuardImplMasqueradingAsCUDA.h> #include <torch/library.h> #include "ATen/Parallel.h" #include "hipcub/hipcub.hpp" namespace at { at::Tensor recat_embedding_grad_output_cuda( Tensor grad_output, // [B_local][T_global][D] std::vector<int64_t> num_features_per_rank) { at::hip::OptionalHIPGuardMasqueradingAsCUDA device_guard; device_guard.set_index(grad_output.get_device()); TORCH_CHECK(grad_output.is_contiguous()); const auto B_local = grad_output.size(0); const auto T_global = grad_output.size(1); const auto D = grad_output.size(2); Tensor sharded_grad_output = at::empty({grad_output.numel()}, grad_output.options()); AT_DISPATCH_FLOATING_TYPES_AND_HALF( grad_output.type(), "recat_embedding_gradients", ([&] { const auto go = grad_output.accessor<scalar_t, 3>(); auto sgo = sharded_grad_output.accessor<scalar_t, 1>(); int64_t feature_offset = 0; int64_t sgo_offset = 0; for (auto num_features : num_features_per_rank) { AT_CUDA_CHECK(hipMemcpy2DAsync( &sgo[sgo_offset], num_features * D * sizeof(scalar_t), &go[0][feature_offset][0], T_global * D * sizeof(scalar_t), num_features * D * sizeof(scalar_t), B_local, hipMemcpyDeviceToDevice, at::hip::getCurrentHIPStreamMasqueradingAsCUDA())); feature_offset += num_features; sgo_offset += B_local * num_features * D; } TORCH_CHECK(sgo_offset == grad_output.numel()); TORCH_CHECK(feature_offset == T_global); })); return sharded_grad_output; } Tensor recat_embedding_grad_output_mixed_D_cuda( const Tensor& grad_output, // [B_local][Sum_T_global(D)] const std::vector<int64_t>& dim_sum_per_rank) { TORCH_CHECK(grad_output.is_contiguous()); at::hip::OptionalHIPGuardMasqueradingAsCUDA device_guard; device_guard.set_index(grad_output.get_device()); const auto B_local = grad_output.size(0); const auto global_dim_sum = at::sum_integers(dim_sum_per_rank); Tensor sharded_grad_output = at::empty({grad_output.numel()}, grad_output.options()); AT_DISPATCH_FLOATING_TYPES_AND_HALF( grad_output.type(), "recat_embedding_gradients", ([&] { const auto go = grad_output.accessor<scalar_t, 2>(); auto sgo = sharded_grad_output.accessor<scalar_t, 1>(); int64_t sgo_offset = 0; int64_t accum_dim_sum = 0; for (auto dim_sum : dim_sum_per_rank) { AT_CUDA_CHECK(hipMemcpy2DAsync( &sgo[sgo_offset], dim_sum * sizeof(scalar_t), &go[0][accum_dim_sum], global_dim_sum * sizeof(scalar_t), dim_sum * sizeof(scalar_t), B_local, hipMemcpyDeviceToDevice, at::hip::getCurrentHIPStreamMasqueradingAsCUDA())); sgo_offset += B_local * dim_sum; accum_dim_sum += dim_sum; } TORCH_CHECK(sgo_offset == grad_output.numel()); TORCH_CHECK(accum_dim_sum == global_dim_sum); })); return sharded_grad_output; } Tensor recat_embedding_grad_output_mixed_D_batch_cuda( const Tensor& grad_output, // [B_local][Sum_T_global(D)] const Tensor& dim_sum_per_rank, const Tensor& cumsum_dim_sum_per_rank) { TORCH_CHECK(grad_output.is_contiguous()); at::hip::OptionalHIPGuardMasqueradingAsCUDA device_guard; device_guard.set_index(grad_output.get_device()); const auto B_local = grad_output.size(0); Tensor sharded_grad_output = at::empty({grad_output.numel()}, grad_output.options()); const auto dim_num = dim_sum_per_rank.size(0); const auto dim_sum = grad_output.size(1); const dim3 threads( fbgemm_gpu::kWarpSize, fbgemm_gpu::kMaxThreads / fbgemm_gpu::kWarpSize); const dim3 blocks(fbgemm_gpu::div_round_up( (B_local * dim_num), fbgemm_gpu::kMaxThreads / fbgemm_gpu::kWarpSize)); AT_DISPATCH_FLOATING_TYPES_AND_HALF( grad_output.type(), "recat_embedding_gradients", ([&] { hipLaunchKernelGGL(( recat_copy_async_kernel<scalar_t>) , dim3(blocks), dim3(threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), dim_sum_per_rank.data_ptr<int64_t>(), cumsum_dim_sum_per_rank.data_ptr<int64_t>(), grad_output.data_ptr<scalar_t>(), sharded_grad_output.data_ptr<scalar_t>(), dim_num, B_local, dim_sum); C10_HIP_KERNEL_LAUNCH_CHECK(); })); return sharded_grad_output; } } // namespace at
625e9feb9bb41e0f5e712f52593c799275033461.cu
/* * Copyright (c) Facebook, Inc. and its affiliates. * All rights reserved. * This source code is licensed under the BSD-style license found in the * LICENSE file in the root directory of this source tree. */ #include "fbgemm_gpu/layout_transform_ops.cuh" #include "fbgemm_gpu/sparse_ops.h" #include <ATen/ATen.h> #include <ATen/core/op_registration/op_registration.h> #include <ATen/cuda/CUDAContext.h> #include <ATen/cuda/Exceptions.h> #include <c10/cuda/CUDAGuard.h> #include <torch/library.h> #include "ATen/Parallel.h" #include "cub/device/device_scan.cuh" namespace at { at::Tensor recat_embedding_grad_output_cuda( Tensor grad_output, // [B_local][T_global][D] std::vector<int64_t> num_features_per_rank) { at::cuda::OptionalCUDAGuard device_guard; device_guard.set_index(grad_output.get_device()); TORCH_CHECK(grad_output.is_contiguous()); const auto B_local = grad_output.size(0); const auto T_global = grad_output.size(1); const auto D = grad_output.size(2); Tensor sharded_grad_output = at::empty({grad_output.numel()}, grad_output.options()); AT_DISPATCH_FLOATING_TYPES_AND_HALF( grad_output.type(), "recat_embedding_gradients", ([&] { const auto go = grad_output.accessor<scalar_t, 3>(); auto sgo = sharded_grad_output.accessor<scalar_t, 1>(); int64_t feature_offset = 0; int64_t sgo_offset = 0; for (auto num_features : num_features_per_rank) { AT_CUDA_CHECK(cudaMemcpy2DAsync( &sgo[sgo_offset], num_features * D * sizeof(scalar_t), &go[0][feature_offset][0], T_global * D * sizeof(scalar_t), num_features * D * sizeof(scalar_t), B_local, cudaMemcpyDeviceToDevice, at::cuda::getCurrentCUDAStream())); feature_offset += num_features; sgo_offset += B_local * num_features * D; } TORCH_CHECK(sgo_offset == grad_output.numel()); TORCH_CHECK(feature_offset == T_global); })); return sharded_grad_output; } Tensor recat_embedding_grad_output_mixed_D_cuda( const Tensor& grad_output, // [B_local][Sum_T_global(D)] const std::vector<int64_t>& dim_sum_per_rank) { TORCH_CHECK(grad_output.is_contiguous()); at::cuda::OptionalCUDAGuard device_guard; device_guard.set_index(grad_output.get_device()); const auto B_local = grad_output.size(0); const auto global_dim_sum = at::sum_integers(dim_sum_per_rank); Tensor sharded_grad_output = at::empty({grad_output.numel()}, grad_output.options()); AT_DISPATCH_FLOATING_TYPES_AND_HALF( grad_output.type(), "recat_embedding_gradients", ([&] { const auto go = grad_output.accessor<scalar_t, 2>(); auto sgo = sharded_grad_output.accessor<scalar_t, 1>(); int64_t sgo_offset = 0; int64_t accum_dim_sum = 0; for (auto dim_sum : dim_sum_per_rank) { AT_CUDA_CHECK(cudaMemcpy2DAsync( &sgo[sgo_offset], dim_sum * sizeof(scalar_t), &go[0][accum_dim_sum], global_dim_sum * sizeof(scalar_t), dim_sum * sizeof(scalar_t), B_local, cudaMemcpyDeviceToDevice, at::cuda::getCurrentCUDAStream())); sgo_offset += B_local * dim_sum; accum_dim_sum += dim_sum; } TORCH_CHECK(sgo_offset == grad_output.numel()); TORCH_CHECK(accum_dim_sum == global_dim_sum); })); return sharded_grad_output; } Tensor recat_embedding_grad_output_mixed_D_batch_cuda( const Tensor& grad_output, // [B_local][Sum_T_global(D)] const Tensor& dim_sum_per_rank, const Tensor& cumsum_dim_sum_per_rank) { TORCH_CHECK(grad_output.is_contiguous()); at::cuda::OptionalCUDAGuard device_guard; device_guard.set_index(grad_output.get_device()); const auto B_local = grad_output.size(0); Tensor sharded_grad_output = at::empty({grad_output.numel()}, grad_output.options()); const auto dim_num = dim_sum_per_rank.size(0); const auto dim_sum = grad_output.size(1); const dim3 threads( fbgemm_gpu::kWarpSize, fbgemm_gpu::kMaxThreads / fbgemm_gpu::kWarpSize); const dim3 blocks(fbgemm_gpu::div_round_up( (B_local * dim_num), fbgemm_gpu::kMaxThreads / fbgemm_gpu::kWarpSize)); AT_DISPATCH_FLOATING_TYPES_AND_HALF( grad_output.type(), "recat_embedding_gradients", ([&] { recat_copy_async_kernel<scalar_t> <<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>( dim_sum_per_rank.data_ptr<int64_t>(), cumsum_dim_sum_per_rank.data_ptr<int64_t>(), grad_output.data_ptr<scalar_t>(), sharded_grad_output.data_ptr<scalar_t>(), dim_num, B_local, dim_sum); C10_CUDA_KERNEL_LAUNCH_CHECK(); })); return sharded_grad_output; } } // namespace at
1c87b3da54fd1dc0eecd11f17d259e97b85c6a53.hip
// !!! This is a file automatically generated by hipify!!! #define TORCH_ASSERT_NO_OPERATORS #include <ATen/Dispatch.h> #include <ATen/native/BinaryOps.h> #include <ATen/native/DispatchStub.h> #include <ATen/native/TensorIterator.h> #include <ATen/native/hip/Loops.cuh> // NOTE: CUDA on Windows requires that the enclosing function // of a __device__ lambda not have internal linkage. namespace at { namespace native { template<typename scalar_t> struct CompareGTFunctor { __device__ __forceinline__ bool operator() (scalar_t a, scalar_t b) const { return a > b; } }; void gt_kernel_cuda(TensorIteratorBase& iter) { AT_DISPATCH_ALL_TYPES_AND3(kHalf, kBFloat16, kBool, iter.common_dtype(), "gt_cuda", [&]() { gpu_kernel_with_scalars(iter, CompareGTFunctor<scalar_t>()); }); } REGISTER_DISPATCH(gt_stub, &gt_kernel_cuda); }} // namespace at::native
1c87b3da54fd1dc0eecd11f17d259e97b85c6a53.cu
#define TORCH_ASSERT_NO_OPERATORS #include <ATen/Dispatch.h> #include <ATen/native/BinaryOps.h> #include <ATen/native/DispatchStub.h> #include <ATen/native/TensorIterator.h> #include <ATen/native/cuda/Loops.cuh> // NOTE: CUDA on Windows requires that the enclosing function // of a __device__ lambda not have internal linkage. namespace at { namespace native { template<typename scalar_t> struct CompareGTFunctor { __device__ __forceinline__ bool operator() (scalar_t a, scalar_t b) const { return a > b; } }; void gt_kernel_cuda(TensorIteratorBase& iter) { AT_DISPATCH_ALL_TYPES_AND3(kHalf, kBFloat16, kBool, iter.common_dtype(), "gt_cuda", [&]() { gpu_kernel_with_scalars(iter, CompareGTFunctor<scalar_t>()); }); } REGISTER_DISPATCH(gt_stub, &gt_kernel_cuda); }} // namespace at::native
01573fbcc713dd1f7de80ed742fd2baeba51fc15.hip
// !!! This is a file automatically generated by hipify!!! #include "common.h" // Like printf, but red. Limited to 1000 characters. void red_printf(const char *format, ...) { #define RED_LIM 1000 va_list args; int i; char buf1[RED_LIM], buf2[RED_LIM]; memset(buf1, 0, RED_LIM); memset(buf2, 0, RED_LIM); va_start(args, format); // Marshal the stuff to print in a buffer vsnprintf(buf1, RED_LIM, format, args); // Probably a bad check for buffer overflow for(i = RED_LIM - 1; i >= RED_LIM - 50; i --) { assert(buf1[i] == 0); } // Add markers for red color and reset color snprintf(buf2, 1000, "\033[31m%s\033[0m", buf1); // Probably another bad check for buffer overflow for(i = RED_LIM - 1; i >= RED_LIM - 50; i --) { assert(buf2[i] == 0); } printf("%s", buf2); va_end(args); } void printDeviceProperties() { struct hipDeviceProp_t deviceProp; int ret = hipGetDeviceProperties(&deviceProp, 0); CPE(ret != hipSuccess, "Get Device Properties failed\n"); printf("\n=================DEVICE PROPERTIES=================\n"); printf("\tDevice name: %s\n", deviceProp.name); printf("\tTotal global memory: %lu bytes\n", deviceProp.totalGlobalMem); printf("\tWarp size: %d\n", deviceProp.warpSize); printf("\tCompute capability: %d.%d\n", deviceProp.major, deviceProp.minor); printf("\tMulti-processor count: %d\n", deviceProp.multiProcessorCount); printf("\tThreads per multi-processor: %d\n", deviceProp.maxThreadsPerMultiProcessor); printf("\n"); } // Returns when all N elements in A are non-zero void waitForNonZero(volatile int *A, int N) { int i, turns = 0; while(1) { int allNonZero = 1; for(i = 0; i < N; i ++) { if(A[i] == 0) { allNonZero = 0; } } if(allNonZero) { return; } turns ++; if(turns > 1000000) { printf("Waiting for non-zero ...\n"); turns = 0; } } } /** < Useful for sorting an array of doubles */ int cmpfunc (const void *a, const void *b) { double a_d = *(double *) a; double b_d = *(double *) b; if(a_d > b_d) { return 1; } else if(a_d < b_d) { return -1; } else { return 0; } } double get_timespec_us(struct timespec start, struct timespec end) { double ret = (double) (end.tv_nsec - start.tv_nsec) / 1000 + (end.tv_sec - start.tv_sec) * 1000000; return ret; }
01573fbcc713dd1f7de80ed742fd2baeba51fc15.cu
#include "common.h" // Like printf, but red. Limited to 1000 characters. void red_printf(const char *format, ...) { #define RED_LIM 1000 va_list args; int i; char buf1[RED_LIM], buf2[RED_LIM]; memset(buf1, 0, RED_LIM); memset(buf2, 0, RED_LIM); va_start(args, format); // Marshal the stuff to print in a buffer vsnprintf(buf1, RED_LIM, format, args); // Probably a bad check for buffer overflow for(i = RED_LIM - 1; i >= RED_LIM - 50; i --) { assert(buf1[i] == 0); } // Add markers for red color and reset color snprintf(buf2, 1000, "\033[31m%s\033[0m", buf1); // Probably another bad check for buffer overflow for(i = RED_LIM - 1; i >= RED_LIM - 50; i --) { assert(buf2[i] == 0); } printf("%s", buf2); va_end(args); } void printDeviceProperties() { struct cudaDeviceProp deviceProp; int ret = cudaGetDeviceProperties(&deviceProp, 0); CPE(ret != cudaSuccess, "Get Device Properties failed\n"); printf("\n=================DEVICE PROPERTIES=================\n"); printf("\tDevice name: %s\n", deviceProp.name); printf("\tTotal global memory: %lu bytes\n", deviceProp.totalGlobalMem); printf("\tWarp size: %d\n", deviceProp.warpSize); printf("\tCompute capability: %d.%d\n", deviceProp.major, deviceProp.minor); printf("\tMulti-processor count: %d\n", deviceProp.multiProcessorCount); printf("\tThreads per multi-processor: %d\n", deviceProp.maxThreadsPerMultiProcessor); printf("\n"); } // Returns when all N elements in A are non-zero void waitForNonZero(volatile int *A, int N) { int i, turns = 0; while(1) { int allNonZero = 1; for(i = 0; i < N; i ++) { if(A[i] == 0) { allNonZero = 0; } } if(allNonZero) { return; } turns ++; if(turns > 1000000) { printf("Waiting for non-zero ...\n"); turns = 0; } } } /** < Useful for sorting an array of doubles */ int cmpfunc (const void *a, const void *b) { double a_d = *(double *) a; double b_d = *(double *) b; if(a_d > b_d) { return 1; } else if(a_d < b_d) { return -1; } else { return 0; } } double get_timespec_us(struct timespec start, struct timespec end) { double ret = (double) (end.tv_nsec - start.tv_nsec) / 1000 + (end.tv_sec - start.tv_sec) * 1000000; return ret; }
6a9bffbac2256583542a1279143627892c4cda4c.hip
// !!! This is a file automatically generated by hipify!!! /******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author raver119@gmail.com // #include <rocblas.h> #include <cusolverDn.h> #include "../cublasHelper.h" #include <exceptions/cuda_exception.h> #include <helpers/logger.h> #include <execution/AffinityManager.h> #include "config.h" #ifdef HAVE_CUDNN #include <cudnn.h> #endif namespace nd4j { std::mutex CublasHelper::_mutex; static void* handle_() { auto _handle = new hipblasHandle_t(); auto status = hipblasCreate(_handle); // initialize CUBLAS context if (status != HIPBLAS_STATUS_SUCCESS) throw cuda_exception::build("cuBLAS handle creation failed !", status); return reinterpret_cast<void *>(_handle); } static void* solver_() { auto cusolverH = new hipsolverDnHandle_t(); auto status = hipsolverDnCreate(cusolverH); if (status != CUSOLVER_STATUS_SUCCESS) throw cuda_exception::build("cuSolver handle creation failed !", status); return cusolverH; } static void* cudnn_() { #ifdef HAVE_CUDNN auto cudnnH = new cudnnHandle_t(); auto status = cudnnCreate(cudnnH); if (status != CUDNN_STATUS_SUCCESS) throw cuda_exception::build("cuDNN handle creation failed !", status); return cudnnH; #endif return nullptr; } static void destroyHandle_(void* handle) { auto ch = reinterpret_cast<hipblasHandle_t *>(handle); auto status = hipblasDestroy(*ch); if (status != HIPBLAS_STATUS_SUCCESS) throw cuda_exception::build("cuBLAS handle destruction failed !", status); delete ch; } CublasHelper::CublasHelper() { //nd4j_printf("Initializing cuBLAS\n",""); auto numDevices = AffinityManager::numberOfDevices(); auto currentDevice = AffinityManager::currentDeviceId(); _cache.resize(numDevices); _solvers.resize(numDevices); _cudnn.resize(numDevices); for (int e = 0; e < numDevices; e++) { AffinityManager::setCurrentNativeDevice(e); _cache[e] = handle_(); _solvers[e] = solver_(); _cudnn[e] = cudnn_(); } // don't forget to restore back original device AffinityManager::setCurrentNativeDevice(currentDevice); } CublasHelper::~CublasHelper() { nd4j_printf("Releasing cuBLAS\n",""); auto numDevices = AffinityManager::numberOfDevices(); for (int e = 0; e < numDevices; e++) destroyHandle_(_cache[e]); } CublasHelper* CublasHelper::getInstance() { _mutex.lock(); if (!_INSTANCE) _INSTANCE = new nd4j::CublasHelper(); _mutex.unlock(); return _INSTANCE; } void* CublasHelper::cudnn() { auto deviceId = AffinityManager::currentDeviceId(); if (deviceId < 0 || deviceId > _cudnn.size()) throw cuda_exception::build("requested deviceId doesn't look valid", deviceId); return _cudnn[deviceId]; } void* CublasHelper::handle() { auto deviceId = AffinityManager::currentDeviceId(); return handle(deviceId); } void* CublasHelper::solver() { auto deviceId = AffinityManager::currentDeviceId(); if (deviceId < 0 || deviceId > _solvers.size()) throw cuda_exception::build("requested deviceId doesn't look valid", deviceId); return _solvers[deviceId]; } void* CublasHelper::handle(int deviceId) { if (deviceId < 0 || deviceId > _cache.size()) throw cuda_exception::build("requested deviceId doesn't look valid", deviceId); return _cache[deviceId]; } nd4j::CublasHelper* nd4j::CublasHelper::_INSTANCE = 0; }
6a9bffbac2256583542a1279143627892c4cda4c.cu
/******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author raver119@gmail.com // #include <cublas_v2.h> #include <cusolverDn.h> #include "../cublasHelper.h" #include <exceptions/cuda_exception.h> #include <helpers/logger.h> #include <execution/AffinityManager.h> #include "config.h" #ifdef HAVE_CUDNN #include <cudnn.h> #endif namespace nd4j { std::mutex CublasHelper::_mutex; static void* handle_() { auto _handle = new cublasHandle_t(); auto status = cublasCreate_v2(_handle); // initialize CUBLAS context if (status != CUBLAS_STATUS_SUCCESS) throw cuda_exception::build("cuBLAS handle creation failed !", status); return reinterpret_cast<void *>(_handle); } static void* solver_() { auto cusolverH = new cusolverDnHandle_t(); auto status = cusolverDnCreate(cusolverH); if (status != CUSOLVER_STATUS_SUCCESS) throw cuda_exception::build("cuSolver handle creation failed !", status); return cusolverH; } static void* cudnn_() { #ifdef HAVE_CUDNN auto cudnnH = new cudnnHandle_t(); auto status = cudnnCreate(cudnnH); if (status != CUDNN_STATUS_SUCCESS) throw cuda_exception::build("cuDNN handle creation failed !", status); return cudnnH; #endif return nullptr; } static void destroyHandle_(void* handle) { auto ch = reinterpret_cast<cublasHandle_t *>(handle); auto status = cublasDestroy_v2(*ch); if (status != CUBLAS_STATUS_SUCCESS) throw cuda_exception::build("cuBLAS handle destruction failed !", status); delete ch; } CublasHelper::CublasHelper() { //nd4j_printf("Initializing cuBLAS\n",""); auto numDevices = AffinityManager::numberOfDevices(); auto currentDevice = AffinityManager::currentDeviceId(); _cache.resize(numDevices); _solvers.resize(numDevices); _cudnn.resize(numDevices); for (int e = 0; e < numDevices; e++) { AffinityManager::setCurrentNativeDevice(e); _cache[e] = handle_(); _solvers[e] = solver_(); _cudnn[e] = cudnn_(); } // don't forget to restore back original device AffinityManager::setCurrentNativeDevice(currentDevice); } CublasHelper::~CublasHelper() { nd4j_printf("Releasing cuBLAS\n",""); auto numDevices = AffinityManager::numberOfDevices(); for (int e = 0; e < numDevices; e++) destroyHandle_(_cache[e]); } CublasHelper* CublasHelper::getInstance() { _mutex.lock(); if (!_INSTANCE) _INSTANCE = new nd4j::CublasHelper(); _mutex.unlock(); return _INSTANCE; } void* CublasHelper::cudnn() { auto deviceId = AffinityManager::currentDeviceId(); if (deviceId < 0 || deviceId > _cudnn.size()) throw cuda_exception::build("requested deviceId doesn't look valid", deviceId); return _cudnn[deviceId]; } void* CublasHelper::handle() { auto deviceId = AffinityManager::currentDeviceId(); return handle(deviceId); } void* CublasHelper::solver() { auto deviceId = AffinityManager::currentDeviceId(); if (deviceId < 0 || deviceId > _solvers.size()) throw cuda_exception::build("requested deviceId doesn't look valid", deviceId); return _solvers[deviceId]; } void* CublasHelper::handle(int deviceId) { if (deviceId < 0 || deviceId > _cache.size()) throw cuda_exception::build("requested deviceId doesn't look valid", deviceId); return _cache[deviceId]; } nd4j::CublasHelper* nd4j::CublasHelper::_INSTANCE = 0; }
ffd8e7e1a29db7f975cc676a78a47a84e644e4c9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* ****************************************************************************** * * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * See the NOTICE file distributed with this work for additional * information regarding copyright ownership. * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author sgazeos@gmail.com // #include <ops/declarable/helpers/nth_element.h> #include <helpers/TAD.h> #include <helpers/ShapeUtils.h> #include <helpers/PointersManager.h> #include <legacy/NativeOps.h> #include <helpers/ConstantTadHelper.h> namespace sd { namespace ops { namespace helpers { template <typename T> static __global__ void fillUpElementKernel(void* outputBuffer, Nd4jLong const* outputShapeInfo, void* inputBuffer, Nd4jLong const* inputShapeInfo, Nd4jLong const* pTadShape, Nd4jLong const* pTadOffsets, Nd4jLong n) { __shared__ Nd4jLong bufferLength; auto z = reinterpret_cast<T*>(outputBuffer); auto x = reinterpret_cast<T*>(inputBuffer); if (threadIdx.x == 0) bufferLength = shape::length(outputShapeInfo); __syncthreads(); const auto tid = blockIdx.x * blockDim.x + threadIdx.x; const auto step = gridDim.x * blockDim.x; for (int t = tid; t < bufferLength; t += step) { auto tX = x + pTadOffsets[t]; z[shape::getIndexOffset(t, outputShapeInfo)] = tX[shape::getIndexOffset(n, pTadShape)]; //tX]; } } template <typename T> void nthElementFunctor_(sd::LaunchContext * context, NDArray* input, Nd4jLong n, NDArray* output, bool reverse) { NDArray::prepareSpecialUse({output}, {input}); NDArray sortedVals(*input); Nd4jPointer params[2]; params[0] = context; params[1] = context->getCudaStream(); // Nth element in sorted sequence : basic algorithm sort and retrieve nth element in sorted if (input->isVector()) { sort(params, nullptr, sortedVals.shapeInfo(), sortedVals.specialBuffer(), sortedVals.specialShapeInfo(), reverse); hipMemcpy(reinterpret_cast<T*>(output->specialBuffer()), reinterpret_cast<T*>(sortedVals.specialBuffer()) + n, sizeof(T), hipMemcpyDeviceToDevice); } else { // rank greater than 1 std::vector<int> lastDims({input->rankOf() - 1});// = ShapeUtils::evalDimsToExclude(input->rankOf(), {input->rankOf() - 1}); auto packX = sd::ConstantTadHelper::getInstance().tadForDimensions(sortedVals.shapeInfo(), lastDims); auto pTadShape = packX.specialShapeInfo(); auto pTadShapeH = packX.primaryShapeInfo(); auto pTadOffsets = packX.specialOffsets(); sortTad(params, sortedVals.buffer(), sortedVals.shapeInfo(), sortedVals.specialBuffer(), sortedVals.specialShapeInfo(), lastDims.data(), lastDims.size(), pTadShape, pTadOffsets, reverse); sortedVals.tickWriteDevice(); sortedVals.syncToHost(); auto stream = context->getCudaStream(); hipLaunchKernelGGL(( fillUpElementKernel<T>), dim3(32), dim3(64), 1024, *stream, output->specialBuffer(), output->specialShapeInfo(), sortedVals.specialBuffer(), sortedVals.specialShapeInfo(), pTadShape, pTadOffsets, n); } NDArray::registerSpecialUse({output}, {input}); } void nthElementFunctor(sd::LaunchContext * context, NDArray* input, Nd4jLong n, NDArray* output, bool reverse) { BUILD_SINGLE_SELECTOR(input->dataType(), nthElementFunctor_, (context, input, n, output, reverse), LIBND4J_TYPES); } } } }
ffd8e7e1a29db7f975cc676a78a47a84e644e4c9.cu
/* ****************************************************************************** * * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * See the NOTICE file distributed with this work for additional * information regarding copyright ownership. * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author sgazeos@gmail.com // #include <ops/declarable/helpers/nth_element.h> #include <helpers/TAD.h> #include <helpers/ShapeUtils.h> #include <helpers/PointersManager.h> #include <legacy/NativeOps.h> #include <helpers/ConstantTadHelper.h> namespace sd { namespace ops { namespace helpers { template <typename T> static __global__ void fillUpElementKernel(void* outputBuffer, Nd4jLong const* outputShapeInfo, void* inputBuffer, Nd4jLong const* inputShapeInfo, Nd4jLong const* pTadShape, Nd4jLong const* pTadOffsets, Nd4jLong n) { __shared__ Nd4jLong bufferLength; auto z = reinterpret_cast<T*>(outputBuffer); auto x = reinterpret_cast<T*>(inputBuffer); if (threadIdx.x == 0) bufferLength = shape::length(outputShapeInfo); __syncthreads(); const auto tid = blockIdx.x * blockDim.x + threadIdx.x; const auto step = gridDim.x * blockDim.x; for (int t = tid; t < bufferLength; t += step) { auto tX = x + pTadOffsets[t]; z[shape::getIndexOffset(t, outputShapeInfo)] = tX[shape::getIndexOffset(n, pTadShape)]; //tX]; } } template <typename T> void nthElementFunctor_(sd::LaunchContext * context, NDArray* input, Nd4jLong n, NDArray* output, bool reverse) { NDArray::prepareSpecialUse({output}, {input}); NDArray sortedVals(*input); Nd4jPointer params[2]; params[0] = context; params[1] = context->getCudaStream(); // Nth element in sorted sequence : basic algorithm sort and retrieve nth element in sorted if (input->isVector()) { sort(params, nullptr, sortedVals.shapeInfo(), sortedVals.specialBuffer(), sortedVals.specialShapeInfo(), reverse); cudaMemcpy(reinterpret_cast<T*>(output->specialBuffer()), reinterpret_cast<T*>(sortedVals.specialBuffer()) + n, sizeof(T), cudaMemcpyDeviceToDevice); } else { // rank greater than 1 std::vector<int> lastDims({input->rankOf() - 1});// = ShapeUtils::evalDimsToExclude(input->rankOf(), {input->rankOf() - 1}); auto packX = sd::ConstantTadHelper::getInstance().tadForDimensions(sortedVals.shapeInfo(), lastDims); auto pTadShape = packX.specialShapeInfo(); auto pTadShapeH = packX.primaryShapeInfo(); auto pTadOffsets = packX.specialOffsets(); sortTad(params, sortedVals.buffer(), sortedVals.shapeInfo(), sortedVals.specialBuffer(), sortedVals.specialShapeInfo(), lastDims.data(), lastDims.size(), pTadShape, pTadOffsets, reverse); sortedVals.tickWriteDevice(); sortedVals.syncToHost(); auto stream = context->getCudaStream(); fillUpElementKernel<T><<<32, 64, 1024, *stream>>>(output->specialBuffer(), output->specialShapeInfo(), sortedVals.specialBuffer(), sortedVals.specialShapeInfo(), pTadShape, pTadOffsets, n); } NDArray::registerSpecialUse({output}, {input}); } void nthElementFunctor(sd::LaunchContext * context, NDArray* input, Nd4jLong n, NDArray* output, bool reverse) { BUILD_SINGLE_SELECTOR(input->dataType(), nthElementFunctor_, (context, input, n, output, reverse), LIBND4J_TYPES); } } } }
363c5d315e6cd345dec3c911d97a12bbc8bdcf49.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <opencv2/highgui/highgui.hpp> #include "gcube.h" #include "gpu_util.h" gcube::gcube(void) { this->d_pixels = NULL; this->create(0, 0, 0, gfill::none); } gcube::gcube(size_t n_rows, size_t n_cols, size_t n_slices, uint8_t fill_type) { this->d_pixels = NULL; this->create(n_rows, n_cols, n_slices, fill_type); } gcube::gcube(const gcube &gpucube) { this->d_pixels = NULL; this->copy(gpucube); } gcube::gcube(const gcube *gpucube) { this->d_pixels = NULL; this->copy(*gpucube); } gcube::gcube(const std::initializer_list<float> &list) { this->d_pixels = NULL; this->create(list); } gcube::gcube(const std::initializer_list< std::initializer_list<float> > &list) { this->d_pixels = NULL; this->create(list); } gcube::gcube(const std::initializer_list< std::initializer_list< std::initializer_list<float> > > &list) { this->d_pixels = NULL; this->create(list); } gcube::~gcube(void) { this->destroy(); } __global__ void GPU_map_id(float *F, size_t n_elems) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= n_elems) { return; } F[idx] = idx; } __global__ void GPU_map_assign(float *F, float val, size_t n_elems) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= n_elems) { return; } F[idx] = val; } void gcube::create(size_t n_rows, size_t n_cols, size_t n_slices, uint8_t fill_type) { this->destroy(); this->n_rows = n_rows; this->n_cols = n_cols; this->n_slices = n_slices; this->n_elem = n_rows * n_cols * n_slices; if (this->n_elem != 0) { checkCudaErrors(hipMalloc(&this->d_pixels, this->n_elem * sizeof(float))); switch (fill_type) { case gfill::none: break; case gfill::zeros: checkCudaErrors(hipMemset(this->d_pixels, 0, this->n_elem * sizeof(float))); break; case gfill::ones: hipLaunchKernelGGL(( GPU_map_assign), dim3((this->n_elem-1) / 128 + 1), dim3(128), 0, 0, this->d_pixels, 1, this->n_elem); checkCudaErrors(hipGetLastError()); break; case gfill::linspace: hipLaunchKernelGGL(( GPU_map_id), dim3((this->n_elem-1) / 128 + 1), dim3(128), 0, 0, this->d_pixels, this->n_elem); checkCudaErrors(hipGetLastError()); default: break; } } } void gcube::create(const std::initializer_list<float> &list) { int n_rows = list.size(); this->create(n_rows, 1, 1, gfill::none); if (this->n_elem == 0) { return; } float *data = new float[this->n_elem]; int i = 0; for (const float &f : list) { data[i] = f; i++; } checkCudaErrors(hipMemcpy(this->d_pixels, data, this->n_elem * sizeof(float), hipMemcpyHostToDevice)); delete data; } void gcube::create(const std::initializer_list< std::initializer_list<float> > &list) { int n_rows = list.size(); int n_cols = (n_rows != 0) ? list.begin()->size() : 0; this->create(n_rows, n_cols, 1, gfill::none); if (this->n_elem == 0) { return; } float *data = new float[this->n_elem]; int i = 0; for (const std::initializer_list<float> &fl : list) { int j = 0; for (const float &f : fl) { data[IJ2C(i, j, n_rows)] = f; j++; } i++; } checkCudaErrors(hipMemcpy(this->d_pixels, data, this->n_elem * sizeof(float), hipMemcpyHostToDevice)); delete data; } void gcube::create(const std::initializer_list< std::initializer_list< std::initializer_list<float> > > &list) { int n_rows = list.size(); int n_cols = (n_rows != 0) ? list.begin()->size() : 0; int n_slices = (n_cols != 0) ? list.begin()->begin()->size() : 0; this->create(n_rows, n_cols, n_slices, gfill::none); if (this->n_elem == 0) { return; } float *data = new float[this->n_elem]; int i = 0; for (const std::initializer_list< std::initializer_list<float> > &fll : list) { int j = 0; for (const std::initializer_list<float> &fl : fll) { int k = 0; for (const float &f : fl) { data[IJK2C(i, j, k, n_rows, n_cols)] = f; k++; } j++; } i++; } checkCudaErrors(hipMemcpy(this->d_pixels, data, this->n_elem * sizeof(float), hipMemcpyHostToDevice)); delete data; } void gcube::destroy(void) { if (this->d_pixels) { checkCudaErrors(hipFree(this->d_pixels)); this->d_pixels = NULL; } } // OPERATORS void gcube::set(float v, size_t i, size_t j, size_t k) { checkCudaErrors(hipMemcpy(&this->d_pixels[IJK2C(i, j, k, this->n_rows, this->n_cols)], &v, sizeof(float), hipMemcpyHostToDevice)); } float gcube::get(size_t i, size_t j, size_t k) { float v; checkCudaErrors(hipMemcpy(&v, &this->d_pixels[IJK2C(i, j, k, this->n_rows, this->n_cols)], sizeof(float), hipMemcpyDeviceToHost)); return v; } gcube &gcube::operator=(const gcube &gpucube) { this->copy(gpucube); return *this; } gcube &gcube::operator+=(const float &f) { dim3 blockSize(16, 16, 1); dim3 gridSize((this->n_rows-1) / 16 + 1, (this->n_cols-1) / 16 + 1, 1); hipLaunchKernelGGL(( GPU_addI), dim3(gridSize), dim3(blockSize), 0, 0, this->d_pixels, this->d_pixels, f, this->n_rows, this->n_cols); checkCudaErrors(hipGetLastError()); return *this; } gcube &gcube::operator-=(const float &f) { dim3 blockSize(16, 16, 1); dim3 gridSize((this->n_rows-1) / 16 + 1, (this->n_cols-1) / 16 + 1, 1); hipLaunchKernelGGL(( GPU_subI), dim3(gridSize), dim3(blockSize), 0, 0, this->d_pixels, this->d_pixels, f, this->n_rows, this->n_cols); checkCudaErrors(hipGetLastError()); return *this; } gcube &gcube::operator*=(const float &f) { dim3 blockSize(16, 16, 1); dim3 gridSize((this->n_rows-1) / 16 + 1, (this->n_cols-1) / 16 + 1, 1); hipLaunchKernelGGL(( GPU_mulI), dim3(gridSize), dim3(blockSize), 0, 0, this->d_pixels, this->d_pixels, f, this->n_rows, this->n_cols); checkCudaErrors(hipGetLastError()); return *this; } gcube &gcube::operator/=(const float &f) { dim3 blockSize(16, 16, 1); dim3 gridSize((this->n_rows-1) / 16 + 1, (this->n_cols-1) / 16 + 1, 1); hipLaunchKernelGGL(( GPU_divI), dim3(gridSize), dim3(blockSize), 0, 0, this->d_pixels, this->d_pixels, f, this->n_rows, this->n_cols); checkCudaErrors(hipGetLastError()); return *this; } gcube &gcube::operator+=(const gcube &other) { assert(this->n_rows == other.n_rows && this->n_cols == other.n_cols); dim3 blockSize(16, 16, 1); dim3 gridSize((this->n_rows-1) / 16 + 1, (this->n_cols-1) / 16 + 1, 1); hipLaunchKernelGGL(( GPU_add), dim3(gridSize), dim3(blockSize), 0, 0, this->d_pixels, this->d_pixels, other.d_pixels, this->n_rows, this->n_cols); checkCudaErrors(hipGetLastError()); return *this; } gcube &gcube::operator-=(const gcube &other) { assert(this->n_rows == other.n_rows && this->n_cols == other.n_cols); dim3 blockSize(16, 16, 1); dim3 gridSize((this->n_rows-1) / 16 + 1, (this->n_cols-1) / 16 + 1, 1); hipLaunchKernelGGL(( GPU_sub), dim3(gridSize), dim3(blockSize), 0, 0, this->d_pixels, this->d_pixels, other.d_pixels, this->n_rows, this->n_cols); checkCudaErrors(hipGetLastError()); return *this; } gcube &gcube::operator%=(const gcube &other) { // schur product assert(this->n_rows == other.n_rows && this->n_cols == other.n_cols); dim3 blockSize(16, 16, 1); dim3 gridSize((this->n_rows-1) / 16 + 1, (this->n_cols-1) / 16 + 1, 1); hipLaunchKernelGGL(( GPU_mul), dim3(gridSize), dim3(blockSize), 0, 0, this->d_pixels, this->d_pixels, other.d_pixels, this->n_rows, this->n_cols); checkCudaErrors(hipGetLastError()); return *this; } gcube &gcube::operator/=(const gcube &other) { assert(this->n_rows == other.n_rows && this->n_cols == other.n_cols); dim3 blockSize(16, 16, 1); dim3 gridSize((this->n_rows-1) / 16 + 1, (this->n_cols-1) / 16 + 1, 1); hipLaunchKernelGGL(( GPU_div), dim3(gridSize), dim3(blockSize), 0, 0, this->d_pixels, this->d_pixels, other.d_pixels, this->n_rows, this->n_cols); checkCudaErrors(hipGetLastError()); return *this; } gcube &gcube::operator*=(const gcube &other) { gcube G = (*this) * other; this->destroy(); this->d_pixels = G.d_pixels; this->n_rows = G.n_rows; this->n_cols = G.n_cols; this->n_slices = G.n_slices; this->n_elem = G.n_elem; return *this; } gcube gcube::operator+(const float &f) { gcube G(this->n_rows, this->n_cols, 1, gfill::none); dim3 blockSize(16, 16, 1); dim3 gridSize((this->n_rows-1) / 16 + 1, (this->n_cols-1) / 16 + 1, 1); hipLaunchKernelGGL(( GPU_addI), dim3(gridSize), dim3(blockSize), 0, 0, G.d_pixels, this->d_pixels, f, this->n_rows, this->n_cols); checkCudaErrors(hipGetLastError()); return G; } gcube gcube::operator-(const float &f) { gcube G(this->n_rows, this->n_cols, 1, gfill::none); dim3 blockSize(16, 16, 1); dim3 gridSize((this->n_rows-1) / 16 + 1, (this->n_cols-1) / 16 + 1, 1); hipLaunchKernelGGL(( GPU_subI), dim3(gridSize), dim3(blockSize), 0, 0, G.d_pixels, this->d_pixels, f, this->n_rows, this->n_cols); checkCudaErrors(hipGetLastError()); return G; } gcube gcube::operator*(const float &f) { gcube G(this->n_rows, this->n_cols, 1, gfill::none); dim3 blockSize(16, 16, 1); dim3 gridSize((this->n_rows-1) / 16 + 1, (this->n_cols-1) / 16 + 1, 1); hipLaunchKernelGGL(( GPU_mulI), dim3(gridSize), dim3(blockSize), 0, 0, G.d_pixels, this->d_pixels, f, this->n_rows, this->n_cols); checkCudaErrors(hipGetLastError()); return G; } gcube gcube::operator/(const float &f) { gcube G(this->n_rows, this->n_cols, 1, gfill::none); dim3 blockSize(16, 16, 1); dim3 gridSize((this->n_rows-1) / 16 + 1, (this->n_cols-1) / 16 + 1, 1); hipLaunchKernelGGL(( GPU_divI), dim3(gridSize), dim3(blockSize), 0, 0, G.d_pixels, this->d_pixels, f, this->n_rows, this->n_cols); checkCudaErrors(hipGetLastError()); return G; } gcube gcube::operator+(const gcube &other) { assert(this->n_rows == other.n_rows && this->n_cols == other.n_cols); gcube G(this->n_rows, this->n_cols, 1, gfill::none); dim3 blockSize(16, 16, 1); dim3 gridSize((this->n_rows-1) / 16 + 1, (this->n_cols-1) / 16 + 1, 1); hipLaunchKernelGGL(( GPU_add), dim3(gridSize), dim3(blockSize), 0, 0, G.d_pixels, this->d_pixels, other.d_pixels, this->n_rows, this->n_cols); checkCudaErrors(hipGetLastError()); return G; } gcube gcube::operator-(const gcube &other) { assert(this->n_rows == other.n_rows && this->n_cols == other.n_cols); gcube G(this->n_rows, this->n_cols, 1, gfill::none); dim3 blockSize(16, 16, 1); dim3 gridSize((this->n_rows-1) / 16 + 1, (this->n_cols-1) / 16 + 1, 1); hipLaunchKernelGGL(( GPU_sub), dim3(gridSize), dim3(blockSize), 0, 0, G.d_pixels, this->d_pixels, other.d_pixels, this->n_rows, this->n_cols); checkCudaErrors(hipGetLastError()); return G; } gcube gcube::operator%(const gcube &other) { assert(this->n_rows == other.n_rows && this->n_cols == other.n_cols); gcube G(this->n_rows, this->n_cols, 1, gfill::none); dim3 blockSize(16, 16, 1); dim3 gridSize((this->n_rows-1)/16+1, (this->n_cols-1)/16+1, 1); hipLaunchKernelGGL(( GPU_mul), dim3(gridSize), dim3(blockSize), 0, 0, G.d_pixels, this->d_pixels, other.d_pixels, this->n_rows, this->n_cols); checkCudaErrors(hipGetLastError()); return G; } gcube gcube::operator/(const gcube &other) { assert(this->n_rows == other.n_rows && this->n_cols == other.n_cols); gcube G(this->n_rows, this->n_cols, 1, gfill::none); dim3 blockSize(16, 16, 1); dim3 gridSize((this->n_rows-1)/16+1, (this->n_cols-1)/16 + 1, 1); hipLaunchKernelGGL(( GPU_div), dim3(gridSize), dim3(blockSize), 0, 0, G.d_pixels, this->d_pixels, other.d_pixels, this->n_rows, this->n_cols); checkCudaErrors(hipGetLastError()); return G; } gcube gcube::operator*(const gcube &other) { assert(this->n_cols == other.n_rows); gcube G(this->n_cols, this->n_rows * other.n_cols, 1, gfill::none); dim3 blockSize(8, 8, 8); dim3 gridSize((this->n_rows-1)/8+1, (other.n_cols-1)/8+1, (this->n_cols-1)/8+1); // set up the matrices (map mult) hipLaunchKernelGGL(( GPU_mmul), dim3(gridSize), dim3(blockSize), 0, 0, G.d_pixels, this->d_pixels, other.d_pixels, this->n_rows, other.n_cols, this->n_cols); checkCudaErrors(hipGetLastError()); // sum up each column blockSize.x = 128; blockSize.y = 1; blockSize.z = 1; gridSize.x = (G.n_rows-1)/128+1; gridSize.y = G.n_cols; gridSize.z = 1; for (int i = 0; (size_t)(1 << i) < G.n_rows; i += 8) { hipLaunchKernelGGL(( GPU_sum), dim3(gridSize), dim3(blockSize), sizeof(float) * 128, 0, G.d_pixels, G.d_pixels, G.n_rows, G.n_cols, 128, i); checkCudaErrors(hipGetLastError()); blockSize.x = MIN(gridSize.x, 128); gridSize.x = (blockSize.x-1)/128+1; } blockSize.x = 128; gridSize.x = (G.n_rows-1)/128+1; gridSize.y = 1; gcube F(this->n_rows * other.n_cols, 1, 1, gfill::none); hipLaunchKernelGGL(( GPU_copyRow), dim3(gridSize), dim3(blockSize), 0, 0, F.d_pixels, G.d_pixels, F.n_rows, 0); checkCudaErrors(hipGetLastError()); return F; } // MEMORY void gcube::copy(const gcube &gpucube) { this->create(gpucube.n_rows, gpucube.n_cols, gpucube.n_slices, gfill::none); checkCudaErrors(hipMemcpy(this->d_pixels, gpucube.d_pixels, this->n_elem * sizeof(float), hipMemcpyDeviceToDevice)); } /*void gcube::submatCopy(const gcube &gpucube, int x1, int x2, int y1, int y2) { this-> }*/ void gcube::load(const std::string &fname) { // change this->create(cv::imread(fname)); } void gcube::save(const std::string &fname) { // change cv::imwrite(fname, this->cv_img()); } // Specific OpenCV interaction (to make sure that they are backwards compatible) gcube::gcube(cv::Mat &cvMat) { this->d_pixels = NULL; this->create(cvMat); } __global__ void GPU_cv_img2gcube(float *dst, unsigned char *src, int dst_n_rows, int dst_n_cols, int src_n_rows, int src_n_cols, int n_slices, int ioffset, int joffset) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int k = blockIdx.z * blockDim.z + threadIdx.z; if (i >= dst_n_rows || j >= dst_n_cols || k >= n_slices) { return; } dst[IJK2C(i, j, n_slices-k-1, dst_n_rows, dst_n_cols)] = ((float)src[IJK2C(k, j+joffset, i+ioffset, n_slices, src_n_cols)]) / 255.0; } void gcube::create(const cv::Mat &cvMat, bool remalloc) { if (remalloc) { this->create(cvMat.rows, cvMat.cols, cvMat.channels(), gfill::none); } else { assert(cvMat.rows * cvMat.cols * cvMat.channels() == this->n_elem && this->d_pixels != NULL); } if (this->n_elem == 0) { return; } // copy to memory unsigned char *dimg; checkCudaErrors(hipMalloc(&dimg, sizeof(unsigned char) * this->n_elem)); checkCudaErrors(hipMemcpy(dimg, cvMat.data, sizeof(unsigned char) * this->n_elem, hipMemcpyHostToDevice)); // reformat dim3 blockSize(16, 16, 1); dim3 gridSize((this->n_rows-1)/16+1, (this->n_cols-1)/16+1, this->n_slices); hipLaunchKernelGGL(( GPU_cv_img2gcube), dim3(gridSize), dim3(blockSize), 0, 0, this->d_pixels, dimg, this->n_rows, this->n_cols, this->n_rows, this->n_cols, this->n_slices, 0, 0); checkCudaErrors(hipGetLastError()); checkCudaErrors(hipFree(dimg)); } void gcube::create(const cv::Mat &cvMat, int i1, int i2, int j1, int j2, bool remalloc) { assert(i1 <= i2 && j1 <= j2 && j2 <= cvMat.cols && i2 <= cvMat.rows); int di = i2 - i1; int dj = j2 - j1; if (remalloc) { this->create(di, dj, cvMat.channels(), gfill::none); } else { assert(di * dj * cvMat.channels() == this->n_elem && this->d_pixels != NULL); } if (this->n_elem == 0) { return; } // copy to memory size_t n_elem = cvMat.rows * cvMat.cols * cvMat.channels(); unsigned char *dimg; checkCudaErrors(hipMalloc(&dimg, sizeof(unsigned char) * n_elem)); checkCudaErrors(hipMemcpy(dimg, cvMat.data, sizeof(unsigned char) * n_elem, hipMemcpyHostToDevice)); // reformat dim3 blockSize(16, 16, 1); dim3 gridSize((di-1)/16+1, (dj-1)/16+1, this->n_slices); hipLaunchKernelGGL(( GPU_cv_img2gcube), dim3(gridSize), dim3(blockSize), 0, 0, this->d_pixels, dimg, di, dj, cvMat.rows, cvMat.cols, this->n_slices, i1, j1); checkCudaErrors(hipGetLastError()); checkCudaErrors(hipFree(dimg)); } /*static int limit(int x, int a, int b) { if (x < a) { return a; } else if (x > b) { return b; } else { return x; } }*/ __global__ void GPU_gcube2cv_img(unsigned char *dst, float *src, int n_rows, int n_cols, int n_slices) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int k = blockIdx.z * blockDim.z + threadIdx.z; if (i >= n_rows || j >= n_cols || k >= n_slices) { return; } dst[IJK2C(k, j, i, n_slices, n_cols)] = (unsigned char)(src[IJK2C(i, j, n_slices-k-1, n_rows, n_cols)] * 255.0); } cv::Mat gcube::cv_img(void) { if (this->n_elem == 0) { return cv::Mat(0, 0, CV_8UC1); } cv::Mat cv_image(this->n_rows, this->n_cols, (this->n_slices == 3) ? CV_8UC3 : CV_8UC1); // reformat unsigned char *dimg; checkCudaErrors(hipMalloc(&dimg, sizeof(unsigned char) * this->n_elem)); dim3 blockSize(16, 16, 1); dim3 gridSize((this->n_rows-1)/16+1, (this->n_cols-1)/16+1, this->n_slices); hipLaunchKernelGGL(( GPU_gcube2cv_img), dim3(gridSize), dim3(blockSize), 0, 0, dimg, this->d_pixels, this->n_rows, this->n_cols, this->n_slices); checkCudaErrors(hipGetLastError()); // place the matrix into the image checkCudaErrors(hipMemcpy(cv_image.data, dimg, sizeof(unsigned char) * this->n_elem, hipMemcpyDeviceToHost)); checkCudaErrors(hipFree(dimg)); return cv_image; } cv::Mat gcube::cv_mat(void) { cv::Mat cv_image(this->n_rows, this->n_cols, (this->n_slices == 1 ? CV_32F : CV_32FC3)); float *h_pixels = new float[this->n_elem]; checkCudaErrors(hipMemcpy(h_pixels, this->d_pixels, this->n_elem * sizeof(float), hipMemcpyDeviceToHost)); for (int i = 0; i < this->n_rows; i++) { for (int j = 0; j < this->n_cols; j++) { if (this->n_slices == 1) { cv_image.at<float>(i, j) = h_pixels[IJ2C(i, j, this->n_rows)]; } else if (this->n_slices == 3) { cv_image.at<cv::Vec3f>(i, j) = cv::Vec3f( h_pixels[IJK2C(i, j, 0, this->n_rows, this->n_cols)], h_pixels[IJK2C(i, j, 1, this->n_rows, this->n_cols)], h_pixels[IJK2C(i, j, 2, this->n_rows, this->n_cols)]); } } } delete h_pixels; return cv_image; } // specific armadillo compatibility gcube::gcube(arma::vec &armaCube) { this->d_pixels = NULL; this->create(armaCube); } gcube::gcube(arma::mat &armaCube) { this->d_pixels = NULL; this->create(armaCube); } gcube::gcube(arma::cube &armaCube) { this->d_pixels = NULL; this->create(armaCube); } void gcube::create(const arma::vec &armaCube) { this->create(armaCube.n_rows, 1, 1, gfill::none); if (this->n_elem == 0) { return; } float *h_pixels = new float[this->n_elem]; for (int i = 0; i < this->n_rows; i++) { h_pixels[i] = (float)armaCube(i); } checkCudaErrors(hipMemcpy(this->d_pixels, h_pixels, this->n_elem * sizeof(float), hipMemcpyHostToDevice)); delete h_pixels; } void gcube::create(const arma::mat &armaCube) { this->create(armaCube.n_rows, armaCube.n_cols, 1, gfill::none); if (this->n_elem == 0) { return; } float *h_pixels = new float[this->n_elem]; for (int i = 0; i < this->n_rows; i++) { for (int j = 0; j < this->n_cols; j++) { h_pixels[IJ2C(i, j, this->n_rows)] = (float)armaCube(i, j); } } checkCudaErrors(hipMemcpy(this->d_pixels, h_pixels, this->n_elem * sizeof(float), hipMemcpyHostToDevice)); delete h_pixels; } void gcube::create(const arma::cube &armaCube) { this->create(armaCube.n_rows, armaCube.n_cols, armaCube.n_slices, gfill::none); if (this->n_elem == 0) { return; } float *h_pixels = new float[this->n_elem]; for (int i = 0; i < this->n_rows; i++) { for (int j = 0; j < this->n_cols; j++) { for (int k = 0; k < this->n_slices; k++) { h_pixels[IJK2C(i, j, k, this->n_rows, this->n_cols)] = (float)armaCube(i, j, k); } } } checkCudaErrors(hipMemcpy(this->d_pixels, h_pixels, this->n_elem * sizeof(float), hipMemcpyHostToDevice)); delete h_pixels; } arma::cube gcube::arma_cube(void) { arma::cube ac(this->n_rows, this->n_cols, this->n_slices); float *h_pixels = new float[this->n_elem]; checkCudaErrors(hipMemcpy(h_pixels, this->d_pixels, this->n_elem * sizeof(float), hipMemcpyDeviceToHost)); for (int i = 0; i < this->n_rows; i++) { for (int j = 0; j < this->n_cols; j++) { for (int k = 0; k < this->n_slices; k++) { ac(i, j, k) = h_pixels[IJK2C(i, j, k, this->n_rows, this->n_cols)]; } } } delete h_pixels; return ac; } /*gcube &gcube::operator=(const cv::Mat &cvMat) { this->create(cvMat); return *this; }*/
363c5d315e6cd345dec3c911d97a12bbc8bdcf49.cu
#include <opencv2/highgui/highgui.hpp> #include "gcube.h" #include "gpu_util.h" gcube::gcube(void) { this->d_pixels = NULL; this->create(0, 0, 0, gfill::none); } gcube::gcube(size_t n_rows, size_t n_cols, size_t n_slices, uint8_t fill_type) { this->d_pixels = NULL; this->create(n_rows, n_cols, n_slices, fill_type); } gcube::gcube(const gcube &gpucube) { this->d_pixels = NULL; this->copy(gpucube); } gcube::gcube(const gcube *gpucube) { this->d_pixels = NULL; this->copy(*gpucube); } gcube::gcube(const std::initializer_list<float> &list) { this->d_pixels = NULL; this->create(list); } gcube::gcube(const std::initializer_list< std::initializer_list<float> > &list) { this->d_pixels = NULL; this->create(list); } gcube::gcube(const std::initializer_list< std::initializer_list< std::initializer_list<float> > > &list) { this->d_pixels = NULL; this->create(list); } gcube::~gcube(void) { this->destroy(); } __global__ void GPU_map_id(float *F, size_t n_elems) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= n_elems) { return; } F[idx] = idx; } __global__ void GPU_map_assign(float *F, float val, size_t n_elems) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= n_elems) { return; } F[idx] = val; } void gcube::create(size_t n_rows, size_t n_cols, size_t n_slices, uint8_t fill_type) { this->destroy(); this->n_rows = n_rows; this->n_cols = n_cols; this->n_slices = n_slices; this->n_elem = n_rows * n_cols * n_slices; if (this->n_elem != 0) { checkCudaErrors(cudaMalloc(&this->d_pixels, this->n_elem * sizeof(float))); switch (fill_type) { case gfill::none: break; case gfill::zeros: checkCudaErrors(cudaMemset(this->d_pixels, 0, this->n_elem * sizeof(float))); break; case gfill::ones: GPU_map_assign<<<(this->n_elem-1) / 128 + 1, 128>>>(this->d_pixels, 1, this->n_elem); checkCudaErrors(cudaGetLastError()); break; case gfill::linspace: GPU_map_id<<<(this->n_elem-1) / 128 + 1, 128>>>(this->d_pixels, this->n_elem); checkCudaErrors(cudaGetLastError()); default: break; } } } void gcube::create(const std::initializer_list<float> &list) { int n_rows = list.size(); this->create(n_rows, 1, 1, gfill::none); if (this->n_elem == 0) { return; } float *data = new float[this->n_elem]; int i = 0; for (const float &f : list) { data[i] = f; i++; } checkCudaErrors(cudaMemcpy(this->d_pixels, data, this->n_elem * sizeof(float), cudaMemcpyHostToDevice)); delete data; } void gcube::create(const std::initializer_list< std::initializer_list<float> > &list) { int n_rows = list.size(); int n_cols = (n_rows != 0) ? list.begin()->size() : 0; this->create(n_rows, n_cols, 1, gfill::none); if (this->n_elem == 0) { return; } float *data = new float[this->n_elem]; int i = 0; for (const std::initializer_list<float> &fl : list) { int j = 0; for (const float &f : fl) { data[IJ2C(i, j, n_rows)] = f; j++; } i++; } checkCudaErrors(cudaMemcpy(this->d_pixels, data, this->n_elem * sizeof(float), cudaMemcpyHostToDevice)); delete data; } void gcube::create(const std::initializer_list< std::initializer_list< std::initializer_list<float> > > &list) { int n_rows = list.size(); int n_cols = (n_rows != 0) ? list.begin()->size() : 0; int n_slices = (n_cols != 0) ? list.begin()->begin()->size() : 0; this->create(n_rows, n_cols, n_slices, gfill::none); if (this->n_elem == 0) { return; } float *data = new float[this->n_elem]; int i = 0; for (const std::initializer_list< std::initializer_list<float> > &fll : list) { int j = 0; for (const std::initializer_list<float> &fl : fll) { int k = 0; for (const float &f : fl) { data[IJK2C(i, j, k, n_rows, n_cols)] = f; k++; } j++; } i++; } checkCudaErrors(cudaMemcpy(this->d_pixels, data, this->n_elem * sizeof(float), cudaMemcpyHostToDevice)); delete data; } void gcube::destroy(void) { if (this->d_pixels) { checkCudaErrors(cudaFree(this->d_pixels)); this->d_pixels = NULL; } } // OPERATORS void gcube::set(float v, size_t i, size_t j, size_t k) { checkCudaErrors(cudaMemcpy(&this->d_pixels[IJK2C(i, j, k, this->n_rows, this->n_cols)], &v, sizeof(float), cudaMemcpyHostToDevice)); } float gcube::get(size_t i, size_t j, size_t k) { float v; checkCudaErrors(cudaMemcpy(&v, &this->d_pixels[IJK2C(i, j, k, this->n_rows, this->n_cols)], sizeof(float), cudaMemcpyDeviceToHost)); return v; } gcube &gcube::operator=(const gcube &gpucube) { this->copy(gpucube); return *this; } gcube &gcube::operator+=(const float &f) { dim3 blockSize(16, 16, 1); dim3 gridSize((this->n_rows-1) / 16 + 1, (this->n_cols-1) / 16 + 1, 1); GPU_addI<<<gridSize, blockSize>>>(this->d_pixels, this->d_pixels, f, this->n_rows, this->n_cols); checkCudaErrors(cudaGetLastError()); return *this; } gcube &gcube::operator-=(const float &f) { dim3 blockSize(16, 16, 1); dim3 gridSize((this->n_rows-1) / 16 + 1, (this->n_cols-1) / 16 + 1, 1); GPU_subI<<<gridSize, blockSize>>>(this->d_pixels, this->d_pixels, f, this->n_rows, this->n_cols); checkCudaErrors(cudaGetLastError()); return *this; } gcube &gcube::operator*=(const float &f) { dim3 blockSize(16, 16, 1); dim3 gridSize((this->n_rows-1) / 16 + 1, (this->n_cols-1) / 16 + 1, 1); GPU_mulI<<<gridSize, blockSize>>>(this->d_pixels, this->d_pixels, f, this->n_rows, this->n_cols); checkCudaErrors(cudaGetLastError()); return *this; } gcube &gcube::operator/=(const float &f) { dim3 blockSize(16, 16, 1); dim3 gridSize((this->n_rows-1) / 16 + 1, (this->n_cols-1) / 16 + 1, 1); GPU_divI<<<gridSize, blockSize>>>(this->d_pixels, this->d_pixels, f, this->n_rows, this->n_cols); checkCudaErrors(cudaGetLastError()); return *this; } gcube &gcube::operator+=(const gcube &other) { assert(this->n_rows == other.n_rows && this->n_cols == other.n_cols); dim3 blockSize(16, 16, 1); dim3 gridSize((this->n_rows-1) / 16 + 1, (this->n_cols-1) / 16 + 1, 1); GPU_add<<<gridSize, blockSize>>>(this->d_pixels, this->d_pixels, other.d_pixels, this->n_rows, this->n_cols); checkCudaErrors(cudaGetLastError()); return *this; } gcube &gcube::operator-=(const gcube &other) { assert(this->n_rows == other.n_rows && this->n_cols == other.n_cols); dim3 blockSize(16, 16, 1); dim3 gridSize((this->n_rows-1) / 16 + 1, (this->n_cols-1) / 16 + 1, 1); GPU_sub<<<gridSize, blockSize>>>(this->d_pixels, this->d_pixels, other.d_pixels, this->n_rows, this->n_cols); checkCudaErrors(cudaGetLastError()); return *this; } gcube &gcube::operator%=(const gcube &other) { // schur product assert(this->n_rows == other.n_rows && this->n_cols == other.n_cols); dim3 blockSize(16, 16, 1); dim3 gridSize((this->n_rows-1) / 16 + 1, (this->n_cols-1) / 16 + 1, 1); GPU_mul<<<gridSize, blockSize>>>(this->d_pixels, this->d_pixels, other.d_pixels, this->n_rows, this->n_cols); checkCudaErrors(cudaGetLastError()); return *this; } gcube &gcube::operator/=(const gcube &other) { assert(this->n_rows == other.n_rows && this->n_cols == other.n_cols); dim3 blockSize(16, 16, 1); dim3 gridSize((this->n_rows-1) / 16 + 1, (this->n_cols-1) / 16 + 1, 1); GPU_div<<<gridSize, blockSize>>>(this->d_pixels, this->d_pixels, other.d_pixels, this->n_rows, this->n_cols); checkCudaErrors(cudaGetLastError()); return *this; } gcube &gcube::operator*=(const gcube &other) { gcube G = (*this) * other; this->destroy(); this->d_pixels = G.d_pixels; this->n_rows = G.n_rows; this->n_cols = G.n_cols; this->n_slices = G.n_slices; this->n_elem = G.n_elem; return *this; } gcube gcube::operator+(const float &f) { gcube G(this->n_rows, this->n_cols, 1, gfill::none); dim3 blockSize(16, 16, 1); dim3 gridSize((this->n_rows-1) / 16 + 1, (this->n_cols-1) / 16 + 1, 1); GPU_addI<<<gridSize, blockSize>>>(G.d_pixels, this->d_pixels, f, this->n_rows, this->n_cols); checkCudaErrors(cudaGetLastError()); return G; } gcube gcube::operator-(const float &f) { gcube G(this->n_rows, this->n_cols, 1, gfill::none); dim3 blockSize(16, 16, 1); dim3 gridSize((this->n_rows-1) / 16 + 1, (this->n_cols-1) / 16 + 1, 1); GPU_subI<<<gridSize, blockSize>>>(G.d_pixels, this->d_pixels, f, this->n_rows, this->n_cols); checkCudaErrors(cudaGetLastError()); return G; } gcube gcube::operator*(const float &f) { gcube G(this->n_rows, this->n_cols, 1, gfill::none); dim3 blockSize(16, 16, 1); dim3 gridSize((this->n_rows-1) / 16 + 1, (this->n_cols-1) / 16 + 1, 1); GPU_mulI<<<gridSize, blockSize>>>(G.d_pixels, this->d_pixels, f, this->n_rows, this->n_cols); checkCudaErrors(cudaGetLastError()); return G; } gcube gcube::operator/(const float &f) { gcube G(this->n_rows, this->n_cols, 1, gfill::none); dim3 blockSize(16, 16, 1); dim3 gridSize((this->n_rows-1) / 16 + 1, (this->n_cols-1) / 16 + 1, 1); GPU_divI<<<gridSize, blockSize>>>(G.d_pixels, this->d_pixels, f, this->n_rows, this->n_cols); checkCudaErrors(cudaGetLastError()); return G; } gcube gcube::operator+(const gcube &other) { assert(this->n_rows == other.n_rows && this->n_cols == other.n_cols); gcube G(this->n_rows, this->n_cols, 1, gfill::none); dim3 blockSize(16, 16, 1); dim3 gridSize((this->n_rows-1) / 16 + 1, (this->n_cols-1) / 16 + 1, 1); GPU_add<<<gridSize, blockSize>>>(G.d_pixels, this->d_pixels, other.d_pixels, this->n_rows, this->n_cols); checkCudaErrors(cudaGetLastError()); return G; } gcube gcube::operator-(const gcube &other) { assert(this->n_rows == other.n_rows && this->n_cols == other.n_cols); gcube G(this->n_rows, this->n_cols, 1, gfill::none); dim3 blockSize(16, 16, 1); dim3 gridSize((this->n_rows-1) / 16 + 1, (this->n_cols-1) / 16 + 1, 1); GPU_sub<<<gridSize, blockSize>>>(G.d_pixels, this->d_pixels, other.d_pixels, this->n_rows, this->n_cols); checkCudaErrors(cudaGetLastError()); return G; } gcube gcube::operator%(const gcube &other) { assert(this->n_rows == other.n_rows && this->n_cols == other.n_cols); gcube G(this->n_rows, this->n_cols, 1, gfill::none); dim3 blockSize(16, 16, 1); dim3 gridSize((this->n_rows-1)/16+1, (this->n_cols-1)/16+1, 1); GPU_mul<<<gridSize, blockSize>>>(G.d_pixels, this->d_pixels, other.d_pixels, this->n_rows, this->n_cols); checkCudaErrors(cudaGetLastError()); return G; } gcube gcube::operator/(const gcube &other) { assert(this->n_rows == other.n_rows && this->n_cols == other.n_cols); gcube G(this->n_rows, this->n_cols, 1, gfill::none); dim3 blockSize(16, 16, 1); dim3 gridSize((this->n_rows-1)/16+1, (this->n_cols-1)/16 + 1, 1); GPU_div<<<gridSize, blockSize>>>(G.d_pixels, this->d_pixels, other.d_pixels, this->n_rows, this->n_cols); checkCudaErrors(cudaGetLastError()); return G; } gcube gcube::operator*(const gcube &other) { assert(this->n_cols == other.n_rows); gcube G(this->n_cols, this->n_rows * other.n_cols, 1, gfill::none); dim3 blockSize(8, 8, 8); dim3 gridSize((this->n_rows-1)/8+1, (other.n_cols-1)/8+1, (this->n_cols-1)/8+1); // set up the matrices (map mult) GPU_mmul<<<gridSize, blockSize>>>(G.d_pixels, this->d_pixels, other.d_pixels, this->n_rows, other.n_cols, this->n_cols); checkCudaErrors(cudaGetLastError()); // sum up each column blockSize.x = 128; blockSize.y = 1; blockSize.z = 1; gridSize.x = (G.n_rows-1)/128+1; gridSize.y = G.n_cols; gridSize.z = 1; for (int i = 0; (size_t)(1 << i) < G.n_rows; i += 8) { GPU_sum<<<gridSize, blockSize, sizeof(float) * 128>>>(G.d_pixels, G.d_pixels, G.n_rows, G.n_cols, 128, i); checkCudaErrors(cudaGetLastError()); blockSize.x = MIN(gridSize.x, 128); gridSize.x = (blockSize.x-1)/128+1; } blockSize.x = 128; gridSize.x = (G.n_rows-1)/128+1; gridSize.y = 1; gcube F(this->n_rows * other.n_cols, 1, 1, gfill::none); GPU_copyRow<<<gridSize, blockSize>>>(F.d_pixels, G.d_pixels, F.n_rows, 0); checkCudaErrors(cudaGetLastError()); return F; } // MEMORY void gcube::copy(const gcube &gpucube) { this->create(gpucube.n_rows, gpucube.n_cols, gpucube.n_slices, gfill::none); checkCudaErrors(cudaMemcpy(this->d_pixels, gpucube.d_pixels, this->n_elem * sizeof(float), cudaMemcpyDeviceToDevice)); } /*void gcube::submatCopy(const gcube &gpucube, int x1, int x2, int y1, int y2) { this-> }*/ void gcube::load(const std::string &fname) { // change this->create(cv::imread(fname)); } void gcube::save(const std::string &fname) { // change cv::imwrite(fname, this->cv_img()); } // Specific OpenCV interaction (to make sure that they are backwards compatible) gcube::gcube(cv::Mat &cvMat) { this->d_pixels = NULL; this->create(cvMat); } __global__ void GPU_cv_img2gcube(float *dst, unsigned char *src, int dst_n_rows, int dst_n_cols, int src_n_rows, int src_n_cols, int n_slices, int ioffset, int joffset) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int k = blockIdx.z * blockDim.z + threadIdx.z; if (i >= dst_n_rows || j >= dst_n_cols || k >= n_slices) { return; } dst[IJK2C(i, j, n_slices-k-1, dst_n_rows, dst_n_cols)] = ((float)src[IJK2C(k, j+joffset, i+ioffset, n_slices, src_n_cols)]) / 255.0; } void gcube::create(const cv::Mat &cvMat, bool remalloc) { if (remalloc) { this->create(cvMat.rows, cvMat.cols, cvMat.channels(), gfill::none); } else { assert(cvMat.rows * cvMat.cols * cvMat.channels() == this->n_elem && this->d_pixels != NULL); } if (this->n_elem == 0) { return; } // copy to memory unsigned char *dimg; checkCudaErrors(cudaMalloc(&dimg, sizeof(unsigned char) * this->n_elem)); checkCudaErrors(cudaMemcpy(dimg, cvMat.data, sizeof(unsigned char) * this->n_elem, cudaMemcpyHostToDevice)); // reformat dim3 blockSize(16, 16, 1); dim3 gridSize((this->n_rows-1)/16+1, (this->n_cols-1)/16+1, this->n_slices); GPU_cv_img2gcube<<<gridSize, blockSize>>>(this->d_pixels, dimg, this->n_rows, this->n_cols, this->n_rows, this->n_cols, this->n_slices, 0, 0); checkCudaErrors(cudaGetLastError()); checkCudaErrors(cudaFree(dimg)); } void gcube::create(const cv::Mat &cvMat, int i1, int i2, int j1, int j2, bool remalloc) { assert(i1 <= i2 && j1 <= j2 && j2 <= cvMat.cols && i2 <= cvMat.rows); int di = i2 - i1; int dj = j2 - j1; if (remalloc) { this->create(di, dj, cvMat.channels(), gfill::none); } else { assert(di * dj * cvMat.channels() == this->n_elem && this->d_pixels != NULL); } if (this->n_elem == 0) { return; } // copy to memory size_t n_elem = cvMat.rows * cvMat.cols * cvMat.channels(); unsigned char *dimg; checkCudaErrors(cudaMalloc(&dimg, sizeof(unsigned char) * n_elem)); checkCudaErrors(cudaMemcpy(dimg, cvMat.data, sizeof(unsigned char) * n_elem, cudaMemcpyHostToDevice)); // reformat dim3 blockSize(16, 16, 1); dim3 gridSize((di-1)/16+1, (dj-1)/16+1, this->n_slices); GPU_cv_img2gcube<<<gridSize, blockSize>>>(this->d_pixels, dimg, di, dj, cvMat.rows, cvMat.cols, this->n_slices, i1, j1); checkCudaErrors(cudaGetLastError()); checkCudaErrors(cudaFree(dimg)); } /*static int limit(int x, int a, int b) { if (x < a) { return a; } else if (x > b) { return b; } else { return x; } }*/ __global__ void GPU_gcube2cv_img(unsigned char *dst, float *src, int n_rows, int n_cols, int n_slices) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int k = blockIdx.z * blockDim.z + threadIdx.z; if (i >= n_rows || j >= n_cols || k >= n_slices) { return; } dst[IJK2C(k, j, i, n_slices, n_cols)] = (unsigned char)(src[IJK2C(i, j, n_slices-k-1, n_rows, n_cols)] * 255.0); } cv::Mat gcube::cv_img(void) { if (this->n_elem == 0) { return cv::Mat(0, 0, CV_8UC1); } cv::Mat cv_image(this->n_rows, this->n_cols, (this->n_slices == 3) ? CV_8UC3 : CV_8UC1); // reformat unsigned char *dimg; checkCudaErrors(cudaMalloc(&dimg, sizeof(unsigned char) * this->n_elem)); dim3 blockSize(16, 16, 1); dim3 gridSize((this->n_rows-1)/16+1, (this->n_cols-1)/16+1, this->n_slices); GPU_gcube2cv_img<<<gridSize, blockSize>>>(dimg, this->d_pixels, this->n_rows, this->n_cols, this->n_slices); checkCudaErrors(cudaGetLastError()); // place the matrix into the image checkCudaErrors(cudaMemcpy(cv_image.data, dimg, sizeof(unsigned char) * this->n_elem, cudaMemcpyDeviceToHost)); checkCudaErrors(cudaFree(dimg)); return cv_image; } cv::Mat gcube::cv_mat(void) { cv::Mat cv_image(this->n_rows, this->n_cols, (this->n_slices == 1 ? CV_32F : CV_32FC3)); float *h_pixels = new float[this->n_elem]; checkCudaErrors(cudaMemcpy(h_pixels, this->d_pixels, this->n_elem * sizeof(float), cudaMemcpyDeviceToHost)); for (int i = 0; i < this->n_rows; i++) { for (int j = 0; j < this->n_cols; j++) { if (this->n_slices == 1) { cv_image.at<float>(i, j) = h_pixels[IJ2C(i, j, this->n_rows)]; } else if (this->n_slices == 3) { cv_image.at<cv::Vec3f>(i, j) = cv::Vec3f( h_pixels[IJK2C(i, j, 0, this->n_rows, this->n_cols)], h_pixels[IJK2C(i, j, 1, this->n_rows, this->n_cols)], h_pixels[IJK2C(i, j, 2, this->n_rows, this->n_cols)]); } } } delete h_pixels; return cv_image; } // specific armadillo compatibility gcube::gcube(arma::vec &armaCube) { this->d_pixels = NULL; this->create(armaCube); } gcube::gcube(arma::mat &armaCube) { this->d_pixels = NULL; this->create(armaCube); } gcube::gcube(arma::cube &armaCube) { this->d_pixels = NULL; this->create(armaCube); } void gcube::create(const arma::vec &armaCube) { this->create(armaCube.n_rows, 1, 1, gfill::none); if (this->n_elem == 0) { return; } float *h_pixels = new float[this->n_elem]; for (int i = 0; i < this->n_rows; i++) { h_pixels[i] = (float)armaCube(i); } checkCudaErrors(cudaMemcpy(this->d_pixels, h_pixels, this->n_elem * sizeof(float), cudaMemcpyHostToDevice)); delete h_pixels; } void gcube::create(const arma::mat &armaCube) { this->create(armaCube.n_rows, armaCube.n_cols, 1, gfill::none); if (this->n_elem == 0) { return; } float *h_pixels = new float[this->n_elem]; for (int i = 0; i < this->n_rows; i++) { for (int j = 0; j < this->n_cols; j++) { h_pixels[IJ2C(i, j, this->n_rows)] = (float)armaCube(i, j); } } checkCudaErrors(cudaMemcpy(this->d_pixels, h_pixels, this->n_elem * sizeof(float), cudaMemcpyHostToDevice)); delete h_pixels; } void gcube::create(const arma::cube &armaCube) { this->create(armaCube.n_rows, armaCube.n_cols, armaCube.n_slices, gfill::none); if (this->n_elem == 0) { return; } float *h_pixels = new float[this->n_elem]; for (int i = 0; i < this->n_rows; i++) { for (int j = 0; j < this->n_cols; j++) { for (int k = 0; k < this->n_slices; k++) { h_pixels[IJK2C(i, j, k, this->n_rows, this->n_cols)] = (float)armaCube(i, j, k); } } } checkCudaErrors(cudaMemcpy(this->d_pixels, h_pixels, this->n_elem * sizeof(float), cudaMemcpyHostToDevice)); delete h_pixels; } arma::cube gcube::arma_cube(void) { arma::cube ac(this->n_rows, this->n_cols, this->n_slices); float *h_pixels = new float[this->n_elem]; checkCudaErrors(cudaMemcpy(h_pixels, this->d_pixels, this->n_elem * sizeof(float), cudaMemcpyDeviceToHost)); for (int i = 0; i < this->n_rows; i++) { for (int j = 0; j < this->n_cols; j++) { for (int k = 0; k < this->n_slices; k++) { ac(i, j, k) = h_pixels[IJK2C(i, j, k, this->n_rows, this->n_cols)]; } } } delete h_pixels; return ac; } /*gcube &gcube::operator=(const cv::Mat &cvMat) { this->create(cvMat); return *this; }*/
9804bdf7d00ca5c0067e514dd5c52cf8667e06a9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" extern "C" __global__ void default_function_kernel0(void* __restrict__ A, void* __restrict__ B, void* __restrict__ compute) { float compute_local[1]; __shared__ float A_shared[64]; __shared__ float B_shared[2048]; float A_shared_local[1]; float B_shared_local[1]; compute_local[(0)] = 0.000000e+00f; for (int k_outer = 0; k_outer < 8; ++k_outer) { __syncthreads(); #pragma unroll for (int ax2_inner = 0; ax2_inner < 2; ++ax2_inner) { A_shared[(((((int)threadIdx.x) * 2) + ax2_inner))] = ((float*)A)[((((k_outer * 64) + (((int)threadIdx.x) * 2)) + ax2_inner))]; } for (int ax1_inner = 0; ax1_inner < 32; ++ax1_inner) { #pragma unroll for (int ax2_inner1 = 0; ax2_inner1 < 2; ++ax2_inner1) { B_shared[((((ax1_inner * 64) + (((int)threadIdx.x) * 2)) + ax2_inner1))] = ((float*)B)[((((((((int)blockIdx.x) * 16384) + (ax1_inner * 512)) + (k_outer * 64)) + (((int)threadIdx.x) * 2)) + ax2_inner1))]; } } __syncthreads(); for (int k_inner = 0; k_inner < 64; ++k_inner) { A_shared_local[(0)] = A_shared[(k_inner)]; B_shared_local[(0)] = B_shared[(((((int)threadIdx.x) * 64) + k_inner))]; compute_local[(0)] = (compute_local[(0)] + (A_shared_local[(0)] * B_shared_local[(0)])); } } ((float*)compute)[(((((int)blockIdx.x) * 32) + ((int)threadIdx.x)))] = compute_local[(0)]; }
9804bdf7d00ca5c0067e514dd5c52cf8667e06a9.cu
extern "C" __global__ void default_function_kernel0(void* __restrict__ A, void* __restrict__ B, void* __restrict__ compute) { float compute_local[1]; __shared__ float A_shared[64]; __shared__ float B_shared[2048]; float A_shared_local[1]; float B_shared_local[1]; compute_local[(0)] = 0.000000e+00f; for (int k_outer = 0; k_outer < 8; ++k_outer) { __syncthreads(); #pragma unroll for (int ax2_inner = 0; ax2_inner < 2; ++ax2_inner) { A_shared[(((((int)threadIdx.x) * 2) + ax2_inner))] = ((float*)A)[((((k_outer * 64) + (((int)threadIdx.x) * 2)) + ax2_inner))]; } for (int ax1_inner = 0; ax1_inner < 32; ++ax1_inner) { #pragma unroll for (int ax2_inner1 = 0; ax2_inner1 < 2; ++ax2_inner1) { B_shared[((((ax1_inner * 64) + (((int)threadIdx.x) * 2)) + ax2_inner1))] = ((float*)B)[((((((((int)blockIdx.x) * 16384) + (ax1_inner * 512)) + (k_outer * 64)) + (((int)threadIdx.x) * 2)) + ax2_inner1))]; } } __syncthreads(); for (int k_inner = 0; k_inner < 64; ++k_inner) { A_shared_local[(0)] = A_shared[(k_inner)]; B_shared_local[(0)] = B_shared[(((((int)threadIdx.x) * 64) + k_inner))]; compute_local[(0)] = (compute_local[(0)] + (A_shared_local[(0)] * B_shared_local[(0)])); } } ((float*)compute)[(((((int)blockIdx.x) * 32) + ((int)threadIdx.x)))] = compute_local[(0)]; }
9696ccd09bf14fe02f45309ec7e273f663aa7751.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.6.1) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date January 2015 @generated from zmgeelltmv.cu normal z -> c, Fri Jan 30 19:00:29 2015 */ #include "common_magma.h" #if (GPUSHMEM < 200) #define BLOCK_SIZE 128 #else #define BLOCK_SIZE 512 #endif __global__ void cmgeelltmv_kernel( int num_rows, int num_cols, int num_vecs, int num_cols_per_row, magmaFloatComplex alpha, magmaFloatComplex * dval, magma_index_t * dcolind, magmaFloatComplex * dx, magmaFloatComplex beta, magmaFloatComplex * dy) { extern __shared__ magmaFloatComplex dot[]; int row = blockDim.x * blockIdx.x + threadIdx.x ; if(row < num_rows ){ for( int i=0; i<num_vecs; i++ ) dot[ threadIdx.x+ i*blockDim.x ] = MAGMA_C_MAKE(0.0, 0.0); for ( int n = 0; n < num_cols_per_row ; n ++){ int col = dcolind [ num_rows * n + row ]; magmaFloatComplex val = dval [ num_rows * n + row ]; if( val != 0){ for( int i=0; i<num_vecs; i++ ) dot[ threadIdx.x + i*blockDim.x ] += val * dx[col + i * num_cols ]; } } for( int i=0; i<num_vecs; i++ ) dy[ row + i*num_cols ] = dot[ threadIdx.x + i*blockDim.x ] * alpha + beta * dy [ row + i*num_cols ]; } } /** Purpose ------- This routine computes Y = alpha * A * X + beta * Y for X and Y sets of num_vec vectors on the GPU. Input format is ELL. Arguments --------- @param[in] transA magma_trans_t transposition parameter for A @param[in] m magma_int_t number of rows in A @param[in] n magma_int_t number of columns in A @param[in] num_vecs mama_int_t number of vectors @param[in] nnz_per_row magma_int_t number of elements in the longest row @param[in] alpha magmaFloatComplex scalar multiplier @param[in] dval magmaFloatComplex_ptr array containing values of A in ELL @param[in] dcolind magmaIndex_ptr columnindices of A in ELL @param[in] dx magmaFloatComplex_ptr input vector x @param[in] beta magmaFloatComplex scalar multiplier @param[out] dy magmaFloatComplex_ptr input/output vector y @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_cblas ********************************************************************/ extern "C" magma_int_t magma_cmgeelltmv( magma_trans_t transA, magma_int_t m, magma_int_t n, magma_int_t num_vecs, magma_int_t nnz_per_row, magmaFloatComplex alpha, magmaFloatComplex_ptr dval, magmaIndex_ptr dcolind, magmaFloatComplex_ptr dx, magmaFloatComplex beta, magmaFloatComplex_ptr dy, magma_queue_t queue ) { dim3 grid( (m+BLOCK_SIZE-1)/BLOCK_SIZE, 1, 1); magma_int_t threads = BLOCK_SIZE; unsigned int MEM_SIZE = num_vecs* BLOCK_SIZE * sizeof( magmaFloatComplex ); // num_vecs vectors hipLaunchKernelGGL(( cmgeelltmv_kernel), dim3(grid), dim3(threads), MEM_SIZE, queue , m, n, num_vecs, nnz_per_row, alpha, dval, dcolind, dx, beta, dy ); return MAGMA_SUCCESS; }
9696ccd09bf14fe02f45309ec7e273f663aa7751.cu
/* -- MAGMA (version 1.6.1) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date January 2015 @generated from zmgeelltmv.cu normal z -> c, Fri Jan 30 19:00:29 2015 */ #include "common_magma.h" #if (GPUSHMEM < 200) #define BLOCK_SIZE 128 #else #define BLOCK_SIZE 512 #endif __global__ void cmgeelltmv_kernel( int num_rows, int num_cols, int num_vecs, int num_cols_per_row, magmaFloatComplex alpha, magmaFloatComplex * dval, magma_index_t * dcolind, magmaFloatComplex * dx, magmaFloatComplex beta, magmaFloatComplex * dy) { extern __shared__ magmaFloatComplex dot[]; int row = blockDim.x * blockIdx.x + threadIdx.x ; if(row < num_rows ){ for( int i=0; i<num_vecs; i++ ) dot[ threadIdx.x+ i*blockDim.x ] = MAGMA_C_MAKE(0.0, 0.0); for ( int n = 0; n < num_cols_per_row ; n ++){ int col = dcolind [ num_rows * n + row ]; magmaFloatComplex val = dval [ num_rows * n + row ]; if( val != 0){ for( int i=0; i<num_vecs; i++ ) dot[ threadIdx.x + i*blockDim.x ] += val * dx[col + i * num_cols ]; } } for( int i=0; i<num_vecs; i++ ) dy[ row + i*num_cols ] = dot[ threadIdx.x + i*blockDim.x ] * alpha + beta * dy [ row + i*num_cols ]; } } /** Purpose ------- This routine computes Y = alpha * A * X + beta * Y for X and Y sets of num_vec vectors on the GPU. Input format is ELL. Arguments --------- @param[in] transA magma_trans_t transposition parameter for A @param[in] m magma_int_t number of rows in A @param[in] n magma_int_t number of columns in A @param[in] num_vecs mama_int_t number of vectors @param[in] nnz_per_row magma_int_t number of elements in the longest row @param[in] alpha magmaFloatComplex scalar multiplier @param[in] dval magmaFloatComplex_ptr array containing values of A in ELL @param[in] dcolind magmaIndex_ptr columnindices of A in ELL @param[in] dx magmaFloatComplex_ptr input vector x @param[in] beta magmaFloatComplex scalar multiplier @param[out] dy magmaFloatComplex_ptr input/output vector y @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_cblas ********************************************************************/ extern "C" magma_int_t magma_cmgeelltmv( magma_trans_t transA, magma_int_t m, magma_int_t n, magma_int_t num_vecs, magma_int_t nnz_per_row, magmaFloatComplex alpha, magmaFloatComplex_ptr dval, magmaIndex_ptr dcolind, magmaFloatComplex_ptr dx, magmaFloatComplex beta, magmaFloatComplex_ptr dy, magma_queue_t queue ) { dim3 grid( (m+BLOCK_SIZE-1)/BLOCK_SIZE, 1, 1); magma_int_t threads = BLOCK_SIZE; unsigned int MEM_SIZE = num_vecs* BLOCK_SIZE * sizeof( magmaFloatComplex ); // num_vecs vectors cmgeelltmv_kernel<<< grid, threads, MEM_SIZE, queue >>> ( m, n, num_vecs, nnz_per_row, alpha, dval, dcolind, dx, beta, dy ); return MAGMA_SUCCESS; }
adcaad645d50b302b3f5a154d0c60d34a1cad8cc.hip
// !!! This is a file automatically generated by hipify!!! /*! * Copyright 2015-2019 XGBoost contributors */ #include <dmlc/omp.h> #include <dmlc/timer.h> #include <xgboost/logging.h> #include <xgboost/objective.h> #include <vector> #include <algorithm> #include <utility> #include "xgboost/json.h" #include "xgboost/parameter.h" #include "../common/math.h" #include "../common/random.h" #if defined(__HIPCC__) #include <thrust/sort.h> #include <thrust/gather.h> #include <thrust/iterator/discard_iterator.h> #include <thrust/random/uniform_int_distribution.h> #include <thrust/random/linear_congruential_engine.h> #include <hipcub/hipcub.hpp> #include "../common/device_helpers.cuh" #endif namespace xgboost { namespace obj { #if defined(XGBOOST_USE_CUDA) && !defined(GTEST_TEST) DMLC_REGISTRY_FILE_TAG(rank_obj_gpu); #endif // defined(XGBOOST_USE_CUDA) struct LambdaRankParam : public XGBoostParameter<LambdaRankParam> { size_t num_pairsample; float fix_list_weight; // declare parameters DMLC_DECLARE_PARAMETER(LambdaRankParam) { DMLC_DECLARE_FIELD(num_pairsample).set_lower_bound(1).set_default(1) .describe("Number of pair generated for each instance."); DMLC_DECLARE_FIELD(fix_list_weight).set_lower_bound(0.0f).set_default(0.0f) .describe("Normalize the weight of each list by this value," " if equals 0, no effect will happen"); } }; #if defined(__HIPCC__) // This type sorts an array which is divided into multiple groups. The sorting is influenced // by the function object 'Comparator' template <typename T> class SegmentSorter { private: // Items sorted within the group dh::caching_device_vector<T> ditems_; // Original position of the items before they are sorted descendingly within its groups dh::caching_device_vector<uint32_t> doriginal_pos_; // Segments within the original list that delineates the different groups dh::caching_device_vector<uint32_t> group_segments_; // Need this on the device as it is used in the kernels dh::caching_device_vector<uint32_t> dgroups_; // Group information on device // Where did the item that was originally present at position 'x' move to after they are sorted dh::caching_device_vector<uint32_t> dindexable_sorted_pos_; // Initialize everything but the segments void Init(uint32_t num_elems) { ditems_.resize(num_elems); doriginal_pos_.resize(num_elems); thrust::sequence(doriginal_pos_.begin(), doriginal_pos_.end()); } // Initialize all with group info void Init(const std::vector<uint32_t> &groups) { uint32_t num_elems = groups.back(); this->Init(num_elems); this->CreateGroupSegments(groups); } public: // This needs to be public due to device lambda void CreateGroupSegments(const std::vector<uint32_t> &groups) { uint32_t num_elems = groups.back(); group_segments_.resize(num_elems); dgroups_ = groups; // Define the segments by assigning a group ID to each element const uint32_t *dgroups = dgroups_.data().get(); uint32_t ngroups = dgroups_.size(); auto ComputeGroupIDLambda = [=] __device__(uint32_t idx) { return dh::UpperBound(dgroups, ngroups, idx) - 1; }; // NOLINT thrust::transform(thrust::make_counting_iterator(static_cast<uint32_t>(0)), thrust::make_counting_iterator(num_elems), group_segments_.begin(), ComputeGroupIDLambda); } // Accessors that returns device pointer inline const T *GetItemsPtr() const { return ditems_.data().get(); } inline uint32_t GetNumItems() const { return ditems_.size(); } inline const dh::caching_device_vector<T> &GetItems() const { return ditems_; } inline const uint32_t *GetOriginalPositionsPtr() const { return doriginal_pos_.data().get(); } inline const dh::caching_device_vector<uint32_t> &GetOriginalPositions() const { return doriginal_pos_; } inline const dh::caching_device_vector<uint32_t> &GetGroupSegments() const { return group_segments_; } inline uint32_t GetNumGroups() const { return dgroups_.size() - 1; } inline const uint32_t *GetGroupsPtr() const { return dgroups_.data().get(); } inline const dh::caching_device_vector<uint32_t> &GetGroups() const { return dgroups_; } inline const dh::caching_device_vector<uint32_t> &GetIndexableSortedPositions() const { return dindexable_sorted_pos_; } // Sort an array that is divided into multiple groups. The array is sorted within each group. // This version provides the group information that is on the host. // The array is sorted based on an adaptable binary predicate. By default a stateless predicate // is used. template <typename Comparator = thrust::greater<T>> void SortItems(const T *ditems, uint32_t item_size, const std::vector<uint32_t> &groups, const Comparator &comp = Comparator()) { this->Init(groups); this->SortItems(ditems, item_size, group_segments_, comp); } // Sort an array that is divided into multiple groups. The array is sorted within each group. // This version provides the group information that is on the device. // The array is sorted based on an adaptable binary predicate. By default a stateless predicate // is used. template <typename Comparator = thrust::greater<T>> void SortItems(const T *ditems, uint32_t item_size, const dh::caching_device_vector<uint32_t> &group_segments, const Comparator &comp = Comparator()) { this->Init(item_size); // Sort the items that are grouped. We would like to avoid using predicates to perform the sort, // as thrust resorts to using a merge sort as opposed to a much much faster radix sort // when comparators are used. Hence, the following algorithm is used. This is done so that // we can grab the appropriate related values from the original list later, after the // items are sorted. // // Here is the internal representation: // dgroups_: [ 0, 3, 5, 8, 10 ] // group_segments_: 0 0 0 | 1 1 | 2 2 2 | 3 3 // doriginal_pos_: 0 1 2 | 3 4 | 5 6 7 | 8 9 // ditems_: 1 0 1 | 2 1 | 1 3 3 | 4 4 (from original items) // // Sort the items first and make a note of the original positions in doriginal_pos_ // based on the sort // ditems_: 4 4 3 3 2 1 1 1 1 0 // doriginal_pos_: 8 9 6 7 3 0 2 4 5 1 // NOTE: This consumes space, but is much faster than some of the other approaches - sorting // in kernel, sorting using predicates etc. ditems_.assign(thrust::device_ptr<const T>(ditems), thrust::device_ptr<const T>(ditems) + item_size); // Allocator to be used by sort for managing space overhead while sorting dh::XGBCachingDeviceAllocator<char> alloc; thrust::stable_sort_by_key(thrust::hip::par(alloc), ditems_.begin(), ditems_.end(), doriginal_pos_.begin(), comp); // Next, gather the segments based on the doriginal_pos_. This is to reflect the // holisitic item sort order on the segments // group_segments_c_: 3 3 2 2 1 0 0 1 2 0 // doriginal_pos_: 8 9 6 7 3 0 2 4 5 1 (stays the same) dh::caching_device_vector<uint32_t> group_segments_c(group_segments); thrust::gather(doriginal_pos_.begin(), doriginal_pos_.end(), group_segments.begin(), group_segments_c.begin()); // Now, sort the group segments so that you may bring the items within the group together, // in the process also noting the relative changes to the doriginal_pos_ while that happens // group_segments_c_: 0 0 0 1 1 2 2 2 3 3 // doriginal_pos_: 0 2 1 3 4 6 7 5 8 9 thrust::stable_sort_by_key(thrust::hip::par(alloc), group_segments_c.begin(), group_segments_c.end(), doriginal_pos_.begin(), thrust::less<uint32_t>()); // Finally, gather the original items based on doriginal_pos_ to sort the input and // to store them in ditems_ // doriginal_pos_: 0 2 1 3 4 6 7 5 8 9 (stays the same) // ditems_: 1 1 0 2 1 3 3 1 4 4 (from unsorted items - ditems) thrust::gather(doriginal_pos_.begin(), doriginal_pos_.end(), thrust::device_ptr<const T>(ditems), ditems_.begin()); } // Determine where an item that was originally present at position 'x' has been relocated to // after a sort. Creation of such an index has to be explicitly requested after a sort void CreateIndexableSortedPositions() { dindexable_sorted_pos_.resize(GetNumItems()); thrust::scatter(thrust::make_counting_iterator(static_cast<uint32_t>(0)), thrust::make_counting_iterator(GetNumItems()), // Rearrange indices... // ...based on this map thrust::device_ptr<const uint32_t>(GetOriginalPositionsPtr()), dindexable_sorted_pos_.begin()); // Write results into this } }; // Helper functions template <typename T> XGBOOST_DEVICE __forceinline__ uint32_t CountNumItemsToTheLeftOf(const T * __restrict__ items, uint32_t n, T v) { return dh::LowerBound(items, n, v, thrust::greater<T>()); } template <typename T> XGBOOST_DEVICE __forceinline__ uint32_t CountNumItemsToTheRightOf(const T * __restrict__ items, uint32_t n, T v) { return n - dh::UpperBound(items, n, v, thrust::greater<T>()); } #endif /*! \brief helper information in a list */ struct ListEntry { /*! \brief the predict score we in the data */ bst_float pred; /*! \brief the actual label of the entry */ bst_float label; /*! \brief row index in the data matrix */ unsigned rindex; // constructor ListEntry(bst_float pred, bst_float label, unsigned rindex) : pred(pred), label(label), rindex(rindex) {} // comparator by prediction inline static bool CmpPred(const ListEntry &a, const ListEntry &b) { return a.pred > b.pred; } // comparator by label inline static bool CmpLabel(const ListEntry &a, const ListEntry &b) { return a.label > b.label; } }; /*! \brief a pair in the lambda rank */ struct LambdaPair { /*! \brief positive index: this is a position in the list */ unsigned pos_index; /*! \brief negative index: this is a position in the list */ unsigned neg_index; /*! \brief weight to be filled in */ bst_float weight; // constructor LambdaPair(unsigned pos_index, unsigned neg_index) : pos_index(pos_index), neg_index(neg_index), weight(1.0f) {} // constructor LambdaPair(unsigned pos_index, unsigned neg_index, bst_float weight) : pos_index(pos_index), neg_index(neg_index), weight(weight) {} }; class PairwiseLambdaWeightComputer { public: /*! * \brief get lambda weight for existing pairs - for pairwise objective * \param list a list that is sorted by pred score * \param io_pairs record of pairs, containing the pairs to fill in weights */ static void GetLambdaWeight(const std::vector<ListEntry> &sorted_list, std::vector<LambdaPair> *io_pairs) {} static char const* Name() { return "rank:pairwise"; } #if defined(__HIPCC__) PairwiseLambdaWeightComputer(const bst_float *dpreds, const bst_float *dlabels, const SegmentSorter<float> &segment_label_sorter) {} class PairwiseLambdaWeightMultiplier { public: // Adjust the items weight by this value __device__ __forceinline__ bst_float GetWeight(uint32_t gidx, int pidx, int nidx) const { return 1.0f; } }; inline const PairwiseLambdaWeightMultiplier GetWeightMultiplier() const { return {}; } #endif }; #if defined(__HIPCC__) class BaseLambdaWeightMultiplier { public: BaseLambdaWeightMultiplier(const SegmentSorter<float> &segment_label_sorter, const SegmentSorter<float> &segment_pred_sorter) : dsorted_labels_(segment_label_sorter.GetItemsPtr()), dorig_pos_(segment_label_sorter.GetOriginalPositionsPtr()), dgroups_(segment_label_sorter.GetGroupsPtr()), dindexable_sorted_preds_pos_ptr_( segment_pred_sorter.GetIndexableSortedPositions().data().get()) {} protected: const float *dsorted_labels_{nullptr}; // Labels sorted within a group const uint32_t *dorig_pos_{nullptr}; // Original indices of the labels before they are sorted const uint32_t *dgroups_{nullptr}; // The group indices // Where can a prediction for a label be found in the original array, when they are sorted const uint32_t *dindexable_sorted_preds_pos_ptr_{nullptr}; }; // While computing the weight that needs to be adjusted by this ranking objective, we need // to figure out where positive and negative labels chosen earlier exists, if the group // were to be sorted by its predictions. To accommodate this, we employ the following algorithm. // For a given group, let's assume the following: // labels: 1 5 9 2 4 8 0 7 6 3 // predictions: 1 9 0 8 2 7 3 6 5 4 // position: 0 1 2 3 4 5 6 7 8 9 // // After label sort: // labels: 9 8 7 6 5 4 3 2 1 0 // position: 2 5 7 8 1 4 9 3 0 6 // // After prediction sort: // predictions: 9 8 7 6 5 4 3 2 1 0 // position: 1 3 5 7 8 9 6 4 0 2 // // If a sorted label at position 'x' is chosen, then we need to find out where the prediction // for this label 'x' exists, if the group were to be sorted by predictions. // We first take the sorted prediction positions: // position: 1 3 5 7 8 9 6 4 0 2 // at indices: 0 1 2 3 4 5 6 7 8 9 // // We create a sorted prediction positional array, such that value at position 'x' gives // us the position in the sorted prediction array where its related prediction lies. // dindexable_sorted_preds_pos_ptr_: 8 0 9 1 7 2 6 3 4 5 // at indices: 0 1 2 3 4 5 6 7 8 9 // Basically, swap the previous 2 arrays, sort the indices and reorder positions // for an O(1) lookup using the position where the sorted label exists. // // This type does that using the SegmentSorter class IndexablePredictionSorter { public: IndexablePredictionSorter(const bst_float *dpreds, const SegmentSorter<float> &segment_label_sorter) { // Sort the predictions first segment_pred_sorter_.SortItems(dpreds, segment_label_sorter.GetNumItems(), segment_label_sorter.GetGroupSegments()); // Create an index for the sorted prediction positions segment_pred_sorter_.CreateIndexableSortedPositions(); } inline const SegmentSorter<float> &GetPredictionSorter() const { return segment_pred_sorter_; } private: SegmentSorter<float> segment_pred_sorter_; // For sorting the predictions }; #endif // beta version: NDCG lambda rank class NDCGLambdaWeightComputer #if defined(__HIPCC__) : public IndexablePredictionSorter #endif { public: #if defined(__HIPCC__) // This function object computes the item's DCG value class ComputeItemDCG : public thrust::unary_function<uint32_t, float> { public: XGBOOST_DEVICE ComputeItemDCG(const float *dsorted_labels, const uint32_t *dgroups, const uint32_t *gidxs) : dsorted_labels_(dsorted_labels), dgroups_(dgroups), dgidxs_(gidxs) {} // Compute DCG for the item at 'idx' __device__ __forceinline__ float operator()(uint32_t idx) const { return ComputeItemDCGWeight(dsorted_labels_[idx], idx - dgroups_[dgidxs_[idx]]); } private: const float *dsorted_labels_{nullptr}; // Labels sorted within a group const uint32_t *dgroups_{nullptr}; // The group indices - where each group begins and ends const uint32_t *dgidxs_{nullptr}; // The group each items belongs to }; // Type containing device pointers that can be cheaply copied on the kernel class NDCGLambdaWeightMultiplier : public BaseLambdaWeightMultiplier { public: NDCGLambdaWeightMultiplier(const SegmentSorter<float> &segment_label_sorter, const NDCGLambdaWeightComputer &lwc) : BaseLambdaWeightMultiplier(segment_label_sorter, lwc.GetPredictionSorter()), dgroup_dcg_ptr_(lwc.GetGroupDcgs().data().get()) {} // Adjust the items weight by this value __device__ __forceinline__ bst_float GetWeight(uint32_t gidx, int pidx, int nidx) const { if (dgroup_dcg_ptr_[gidx] == 0.0) return 0.0f; uint32_t group_begin = dgroups_[gidx]; auto pos_lab_orig_posn = dorig_pos_[pidx]; auto neg_lab_orig_posn = dorig_pos_[nidx]; KERNEL_CHECK(pos_lab_orig_posn != neg_lab_orig_posn); // Note: the label positive and negative indices are relative to the entire dataset. // Hence, scale them back to an index within the group auto pos_pred_pos = dindexable_sorted_preds_pos_ptr_[pos_lab_orig_posn] - group_begin; auto neg_pred_pos = dindexable_sorted_preds_pos_ptr_[neg_lab_orig_posn] - group_begin; return NDCGLambdaWeightComputer::ComputeDeltaWeight( pos_pred_pos, neg_pred_pos, static_cast<int>(dsorted_labels_[pidx]), static_cast<int>(dsorted_labels_[nidx]), dgroup_dcg_ptr_[gidx]); } private: const float *dgroup_dcg_ptr_{nullptr}; // Start address of the group DCG values }; NDCGLambdaWeightComputer(const bst_float *dpreds, const bst_float *dlabels, const SegmentSorter<float> &segment_label_sorter) : IndexablePredictionSorter(dpreds, segment_label_sorter), dgroup_dcg_(segment_label_sorter.GetNumGroups(), 0.0f), weight_multiplier_(segment_label_sorter, *this) { const auto &group_segments = segment_label_sorter.GetGroupSegments(); // Compute each elements DCG values and reduce them across groups concurrently. auto end_range = thrust::reduce_by_key(group_segments.begin(), group_segments.end(), thrust::make_transform_iterator( // The indices need not be sequential within a group, as we care only // about the sum of items DCG values within a group segment_label_sorter.GetOriginalPositions().begin(), ComputeItemDCG(segment_label_sorter.GetItemsPtr(), segment_label_sorter.GetGroupsPtr(), group_segments.data().get())), thrust::make_discard_iterator(), // We don't care for the group indices dgroup_dcg_.begin()); // Sum of the item's DCG values in the group CHECK(end_range.second - dgroup_dcg_.begin() == dgroup_dcg_.size()); } inline const dh::caching_device_vector<float> &GetGroupDcgs() const { return dgroup_dcg_; } inline const NDCGLambdaWeightMultiplier GetWeightMultiplier() const { return weight_multiplier_; } #endif static void GetLambdaWeight(const std::vector<ListEntry> &sorted_list, std::vector<LambdaPair> *io_pairs) { std::vector<LambdaPair> &pairs = *io_pairs; float IDCG; // NOLINT { std::vector<bst_float> labels(sorted_list.size()); for (size_t i = 0; i < sorted_list.size(); ++i) { labels[i] = sorted_list[i].label; } std::stable_sort(labels.begin(), labels.end(), std::greater<bst_float>()); IDCG = ComputeGroupDCGWeight(&labels[0], labels.size()); } if (IDCG == 0.0) { for (auto & pair : pairs) { pair.weight = 0.0f; } } else { for (auto & pair : pairs) { unsigned pos_idx = pair.pos_index; unsigned neg_idx = pair.neg_index; pair.weight *= ComputeDeltaWeight(pos_idx, neg_idx, sorted_list[pos_idx].label, sorted_list[neg_idx].label, IDCG); } } } static char const* Name() { return "rank:ndcg"; } inline static bst_float ComputeGroupDCGWeight(const float *sorted_labels, uint32_t size) { double sumdcg = 0.0; for (uint32_t i = 0; i < size; ++i) { sumdcg += ComputeItemDCGWeight(sorted_labels[i], i); } return static_cast<bst_float>(sumdcg); } private: XGBOOST_DEVICE inline static bst_float ComputeItemDCGWeight(unsigned label, uint32_t idx) { return (label != 0) ? (((1 << label) - 1) / std::log2(static_cast<bst_float>(idx + 2))) : 0; } // Compute the weight adjustment for an item within a group: // pos_pred_pos => Where does the positive label live, had the list been sorted by prediction // neg_pred_pos => Where does the negative label live, had the list been sorted by prediction // pos_label => positive label value from sorted label list // neg_label => negative label value from sorted label list XGBOOST_DEVICE inline static bst_float ComputeDeltaWeight(uint32_t pos_pred_pos, uint32_t neg_pred_pos, int pos_label, int neg_label, float idcg) { float pos_loginv = 1.0f / std::log2(pos_pred_pos + 2.0f); float neg_loginv = 1.0f / std::log2(neg_pred_pos + 2.0f); bst_float original = ((1 << pos_label) - 1) * pos_loginv + ((1 << neg_label) - 1) * neg_loginv; float changed = ((1 << neg_label) - 1) * pos_loginv + ((1 << pos_label) - 1) * neg_loginv; bst_float delta = (original - changed) * (1.0f / idcg); if (delta < 0.0f) delta = - delta; return delta; } #if defined(__HIPCC__) dh::caching_device_vector<float> dgroup_dcg_; // This computes the adjustment to the weight const NDCGLambdaWeightMultiplier weight_multiplier_; #endif }; class MAPLambdaWeightComputer #if defined(__HIPCC__) : public IndexablePredictionSorter #endif { public: struct MAPStats { /*! \brief the accumulated precision */ float ap_acc{0.0f}; /*! * \brief the accumulated precision, * assuming a positive instance is missing */ float ap_acc_miss{0.0f}; /*! * \brief the accumulated precision, * assuming that one more positive instance is inserted ahead */ float ap_acc_add{0.0f}; /* \brief the accumulated positive instance count */ float hits{0.0f}; XGBOOST_DEVICE MAPStats() {} // NOLINT XGBOOST_DEVICE MAPStats(float ap_acc, float ap_acc_miss, float ap_acc_add, float hits) : ap_acc(ap_acc), ap_acc_miss(ap_acc_miss), ap_acc_add(ap_acc_add), hits(hits) {} // For prefix scan XGBOOST_DEVICE MAPStats operator +(const MAPStats &v1) const { return {ap_acc + v1.ap_acc, ap_acc_miss + v1.ap_acc_miss, ap_acc_add + v1.ap_acc_add, hits + v1.hits}; } // For test purposes - compare for equality XGBOOST_DEVICE bool operator ==(const MAPStats &rhs) const { return ap_acc == rhs.ap_acc && ap_acc_miss == rhs.ap_acc_miss && ap_acc_add == rhs.ap_acc_add && hits == rhs.hits; } }; private: template <typename T> XGBOOST_DEVICE inline static void Swap(T &v0, T &v1) { #if defined(__HIPCC__) thrust::swap(v0, v1); #else std::swap(v0, v1); #endif } /*! * \brief Obtain the delta MAP by trying to switch the positions of labels in pos_pred_pos or * neg_pred_pos when sorted by predictions * \param pos_pred_pos positive label's prediction value position when the groups prediction * values are sorted * \param neg_pred_pos negative label's prediction value position when the groups prediction * values are sorted * \param pos_label, neg_label the chosen positive and negative labels * \param p_map_stats a vector containing the accumulated precisions for each position in a list * \param map_stats_size size of the accumulated precisions vector */ XGBOOST_DEVICE inline static bst_float GetLambdaMAP( int pos_pred_pos, int neg_pred_pos, bst_float pos_label, bst_float neg_label, const MAPStats *p_map_stats, uint32_t map_stats_size) { if (pos_pred_pos == neg_pred_pos || p_map_stats[map_stats_size - 1].hits == 0) { return 0.0f; } if (pos_pred_pos > neg_pred_pos) { Swap(pos_pred_pos, neg_pred_pos); Swap(pos_label, neg_label); } bst_float original = p_map_stats[neg_pred_pos].ap_acc; if (pos_pred_pos != 0) original -= p_map_stats[pos_pred_pos - 1].ap_acc; bst_float changed = 0; bst_float label1 = pos_label > 0.0f ? 1.0f : 0.0f; bst_float label2 = neg_label > 0.0f ? 1.0f : 0.0f; if (label1 == label2) { return 0.0; } else if (label1 < label2) { changed += p_map_stats[neg_pred_pos - 1].ap_acc_add - p_map_stats[pos_pred_pos].ap_acc_add; changed += (p_map_stats[pos_pred_pos].hits + 1.0f) / (pos_pred_pos + 1); } else { changed += p_map_stats[neg_pred_pos - 1].ap_acc_miss - p_map_stats[pos_pred_pos].ap_acc_miss; changed += p_map_stats[neg_pred_pos].hits / (neg_pred_pos + 1); } bst_float ans = (changed - original) / (p_map_stats[map_stats_size - 1].hits); if (ans < 0) ans = -ans; return ans; } public: /* * \brief obtain preprocessing results for calculating delta MAP * \param sorted_list the list containing entry information * \param map_stats a vector containing the accumulated precisions for each position in a list */ inline static void GetMAPStats(const std::vector<ListEntry> &sorted_list, std::vector<MAPStats> *p_map_acc) { std::vector<MAPStats> &map_acc = *p_map_acc; map_acc.resize(sorted_list.size()); bst_float hit = 0, acc1 = 0, acc2 = 0, acc3 = 0; for (size_t i = 1; i <= sorted_list.size(); ++i) { if (sorted_list[i - 1].label > 0.0f) { hit++; acc1 += hit / i; acc2 += (hit - 1) / i; acc3 += (hit + 1) / i; } map_acc[i - 1] = MAPStats(acc1, acc2, acc3, hit); } } static char const* Name() { return "rank:map"; } static void GetLambdaWeight(const std::vector<ListEntry> &sorted_list, std::vector<LambdaPair> *io_pairs) { std::vector<LambdaPair> &pairs = *io_pairs; std::vector<MAPStats> map_stats; GetMAPStats(sorted_list, &map_stats); for (auto & pair : pairs) { pair.weight *= GetLambdaMAP(pair.pos_index, pair.neg_index, sorted_list[pair.pos_index].label, sorted_list[pair.neg_index].label, &map_stats[0], map_stats.size()); } } #if defined(__HIPCC__) MAPLambdaWeightComputer(const bst_float *dpreds, const bst_float *dlabels, const SegmentSorter<float> &segment_label_sorter) : IndexablePredictionSorter(dpreds, segment_label_sorter), dmap_stats_(segment_label_sorter.GetNumItems(), MAPStats()), weight_multiplier_(segment_label_sorter, *this) { this->CreateMAPStats(dlabels, segment_label_sorter); } void CreateMAPStats(const bst_float *dlabels, const SegmentSorter<float> &segment_label_sorter) { // For each group, go through the sorted prediction positions, and look up its corresponding // label from the unsorted labels (from the original label list) // For each item in the group, compute its MAP stats. // Interleave the computation of map stats amongst different groups. // First, determine postive labels in the dataset individually auto nitems = segment_label_sorter.GetNumItems(); dh::caching_device_vector<uint32_t> dhits(nitems, 0); // Original positions of the predictions after they have been sorted const uint32_t *pred_original_pos = this->GetPredictionSorter().GetOriginalPositionsPtr(); // Unsorted labels const float *unsorted_labels = dlabels; auto DeterminePositiveLabelLambda = [=] __device__(uint32_t idx) { return (unsorted_labels[pred_original_pos[idx]] > 0.0f) ? 1 : 0; }; // NOLINT thrust::transform(thrust::make_counting_iterator(static_cast<uint32_t>(0)), thrust::make_counting_iterator(nitems), dhits.begin(), DeterminePositiveLabelLambda); // Allocator to be used by sort for managing space overhead while performing prefix scans dh::XGBCachingDeviceAllocator<char> alloc; // Next, prefix scan the positive labels that are segmented to accumulate them. // This is required for computing the accumulated precisions const auto &group_segments = segment_label_sorter.GetGroupSegments(); // Data segmented into different groups... thrust::inclusive_scan_by_key(thrust::hip::par(alloc), group_segments.begin(), group_segments.end(), dhits.begin(), // Input value dhits.begin()); // In-place scan // Compute accumulated precisions for each item, assuming positive and // negative instances are missing. // But first, compute individual item precisions const auto *dgidx_arr = group_segments.data().get(); const auto *dhits_arr = dhits.data().get(); // Group info on device const uint32_t *dgroups = segment_label_sorter.GetGroupsPtr(); uint32_t ngroups = segment_label_sorter.GetNumGroups(); auto ComputeItemPrecisionLambda = [=] __device__(uint32_t idx) { if (unsorted_labels[pred_original_pos[idx]] > 0.0f) { auto idx_within_group = (idx - dgroups[dgidx_arr[idx]]) + 1; return MAPStats(static_cast<float>(dhits_arr[idx]) / idx_within_group, static_cast<float>(dhits_arr[idx] - 1) / idx_within_group, static_cast<float>(dhits_arr[idx] + 1) / idx_within_group, 1.0f); } return MAPStats(); }; // NOLINT thrust::transform(thrust::make_counting_iterator(static_cast<uint32_t>(0)), thrust::make_counting_iterator(nitems), this->dmap_stats_.begin(), ComputeItemPrecisionLambda); // Lastly, compute the accumulated precisions for all the items segmented by groups. // The precisions are accumulated within each group thrust::inclusive_scan_by_key(thrust::hip::par(alloc), group_segments.begin(), group_segments.end(), this->dmap_stats_.begin(), // Input map stats this->dmap_stats_.begin()); // In-place scan and output here } inline const dh::caching_device_vector<MAPStats> &GetMapStats() const { return dmap_stats_; } // Type containing device pointers that can be cheaply copied on the kernel class MAPLambdaWeightMultiplier : public BaseLambdaWeightMultiplier { public: MAPLambdaWeightMultiplier(const SegmentSorter<float> &segment_label_sorter, const MAPLambdaWeightComputer &lwc) : BaseLambdaWeightMultiplier(segment_label_sorter, lwc.GetPredictionSorter()), dmap_stats_ptr_(lwc.GetMapStats().data().get()) {} // Adjust the items weight by this value __device__ __forceinline__ bst_float GetWeight(uint32_t gidx, int pidx, int nidx) const { uint32_t group_begin = dgroups_[gidx]; uint32_t group_end = dgroups_[gidx + 1]; auto pos_lab_orig_posn = dorig_pos_[pidx]; auto neg_lab_orig_posn = dorig_pos_[nidx]; KERNEL_CHECK(pos_lab_orig_posn != neg_lab_orig_posn); // Note: the label positive and negative indices are relative to the entire dataset. // Hence, scale them back to an index within the group auto pos_pred_pos = dindexable_sorted_preds_pos_ptr_[pos_lab_orig_posn] - group_begin; auto neg_pred_pos = dindexable_sorted_preds_pos_ptr_[neg_lab_orig_posn] - group_begin; return MAPLambdaWeightComputer::GetLambdaMAP( pos_pred_pos, neg_pred_pos, dsorted_labels_[pidx], dsorted_labels_[nidx], &dmap_stats_ptr_[group_begin], group_end - group_begin); } private: const MAPStats *dmap_stats_ptr_{nullptr}; // Start address of the map stats for every sorted // prediction value }; inline const MAPLambdaWeightMultiplier GetWeightMultiplier() const { return weight_multiplier_; } private: dh::caching_device_vector<MAPStats> dmap_stats_; // This computes the adjustment to the weight const MAPLambdaWeightMultiplier weight_multiplier_; #endif }; #if defined(__HIPCC__) class SortedLabelList : SegmentSorter<float> { private: const LambdaRankParam &param_; // Objective configuration public: explicit SortedLabelList(const LambdaRankParam &param) : param_(param) {} // Sort the labels that are grouped by 'groups' void Sort(const HostDeviceVector<bst_float> &dlabels, const std::vector<uint32_t> &groups) { this->SortItems(dlabels.ConstDevicePointer(), dlabels.Size(), groups); } // This kernel can only run *after* the kernel in sort is completed, as they // use the default stream template <typename LambdaWeightComputerT> void ComputeGradients(const bst_float *dpreds, // Unsorted predictions const bst_float *dlabels, // Unsorted labels const HostDeviceVector<bst_float> &weights, int iter, GradientPair *out_gpair, float weight_normalization_factor) { // Group info on device const uint32_t *dgroups = this->GetGroupsPtr(); uint32_t ngroups = this->GetNumGroups() + 1; uint32_t total_items = this->GetNumItems(); uint32_t niter = param_.num_pairsample * total_items; float fix_list_weight = param_.fix_list_weight; const uint32_t *original_pos = this->GetOriginalPositionsPtr(); uint32_t num_weights = weights.Size(); auto dweights = num_weights ? weights.ConstDevicePointer() : nullptr; const bst_float *sorted_labels = this->GetItemsPtr(); // This is used to adjust the weight of different elements based on the different ranking // objective function policies LambdaWeightComputerT weight_computer(dpreds, dlabels, *this); auto wmultiplier = weight_computer.GetWeightMultiplier(); int device_id = -1; dh::safe_cuda(hipGetDevice(&device_id)); // For each instance in the group, compute the gradient pair concurrently dh::LaunchN(device_id, niter, nullptr, [=] __device__(uint32_t idx) { // First, determine the group 'idx' belongs to uint32_t item_idx = idx % total_items; uint32_t group_idx = dh::UpperBound(dgroups, ngroups, item_idx); // Span of this group within the larger labels/predictions sorted tuple uint32_t group_begin = dgroups[group_idx - 1]; uint32_t group_end = dgroups[group_idx]; uint32_t total_group_items = group_end - group_begin; // Are the labels diverse enough? If they are all the same, then there is nothing to pick // from another group - bail sooner if (sorted_labels[group_begin] == sorted_labels[group_end - 1]) return; // Find the number of labels less than and greater than the current label // at the sorted index position item_idx uint32_t nleft = CountNumItemsToTheLeftOf( sorted_labels + group_begin, item_idx - group_begin + 1, sorted_labels[item_idx]); uint32_t nright = CountNumItemsToTheRightOf( sorted_labels + item_idx, group_end - item_idx, sorted_labels[item_idx]); // Create a minstd_rand object to act as our source of randomness thrust::minstd_rand rng((iter + 1) * 1111); rng.discard(((idx / total_items) * total_group_items) + item_idx - group_begin); // Create a uniform_int_distribution to produce a sample from outside of the // present label group thrust::uniform_int_distribution<int> dist(0, nleft + nright - 1); int sample = dist(rng); int pos_idx = -1; // Bigger label int neg_idx = -1; // Smaller label // Are we picking a sample to the left/right of the current group? if (sample < nleft) { // Go left pos_idx = sample + group_begin; neg_idx = item_idx; } else { pos_idx = item_idx; uint32_t items_in_group = total_group_items - nleft - nright; neg_idx = sample + items_in_group + group_begin; } // Compute and assign the gradients now const float eps = 1e-16f; bst_float p = common::Sigmoid(dpreds[original_pos[pos_idx]] - dpreds[original_pos[neg_idx]]); bst_float g = p - 1.0f; bst_float h = thrust::max(p * (1.0f - p), eps); // Rescale each gradient and hessian so that the group has a weighted constant float scale = __frcp_ru(niter / total_items); if (fix_list_weight != 0.0f) { scale *= fix_list_weight / total_group_items; } float weight = num_weights ? dweights[group_idx - 1] : 1.0f; weight *= weight_normalization_factor; weight *= wmultiplier.GetWeight(group_idx - 1, pos_idx, neg_idx); weight *= scale; // Accumulate gradient and hessian in both positive and negative indices const GradientPair in_pos_gpair(g * weight, 2.0f * weight * h); dh::AtomicAddGpair(&out_gpair[original_pos[pos_idx]], in_pos_gpair); const GradientPair in_neg_gpair(-g * weight, 2.0f * weight * h); dh::AtomicAddGpair(&out_gpair[original_pos[neg_idx]], in_neg_gpair); }); // Wait until the computations done by the kernel is complete dh::safe_cuda(hipStreamSynchronize(nullptr)); } }; #endif // objective for lambda rank template <typename LambdaWeightComputerT> class LambdaRankObj : public ObjFunction { public: void Configure(const std::vector<std::pair<std::string, std::string> >& args) override { param_.UpdateAllowUnknown(args); } void GetGradient(const HostDeviceVector<bst_float>& preds, const MetaInfo& info, int iter, HostDeviceVector<GradientPair>* out_gpair) override { CHECK_EQ(preds.Size(), info.labels_.Size()) << "label size predict size not match"; // quick consistency when group is not available std::vector<unsigned> tgptr(2, 0); tgptr[1] = static_cast<unsigned>(info.labels_.Size()); const std::vector<unsigned> &gptr = info.group_ptr_.size() == 0 ? tgptr : info.group_ptr_; CHECK(gptr.size() != 0 && gptr.back() == info.labels_.Size()) << "group structure not consistent with #rows" << ", " << "group ponter size: " << gptr.size() << ", " << "labels size: " << info.labels_.Size() << ", " << "group pointer back: " << (gptr.size() == 0 ? 0 : gptr.back()); #if defined(__HIPCC__) // Check if we have a GPU assignment; else, revert back to CPU auto device = tparam_->gpu_id; if (device >= 0) { ComputeGradientsOnGPU(preds, info, iter, out_gpair, gptr); } else { // Revert back to CPU #endif ComputeGradientsOnCPU(preds, info, iter, out_gpair, gptr); #if defined(__HIPCC__) } #endif } const char* DefaultEvalMetric() const override { return "map"; } void SaveConfig(Json* p_out) const override { auto& out = *p_out; out["name"] = String(LambdaWeightComputerT::Name()); out["lambda_rank_param"] = Object(); for (auto const& kv : param_.__DICT__()) { out["lambda_rank_param"][kv.first] = kv.second; } } void LoadConfig(Json const& in) override { fromJson(in["lambda_rank_param"], &param_); } private: bst_float ComputeWeightNormalizationFactor(const MetaInfo& info, const std::vector<unsigned> &gptr) { const auto ngroup = static_cast<bst_omp_uint>(gptr.size() - 1); bst_float sum_weights = 0; for (bst_omp_uint k = 0; k < ngroup; ++k) { sum_weights += info.GetWeight(k); } return ngroup / sum_weights; } void ComputeGradientsOnCPU(const HostDeviceVector<bst_float>& preds, const MetaInfo& info, int iter, HostDeviceVector<GradientPair>* out_gpair, const std::vector<unsigned> &gptr) { LOG(DEBUG) << "Computing " << LambdaWeightComputerT::Name() << " gradients on CPU."; bst_float weight_normalization_factor = ComputeWeightNormalizationFactor(info, gptr); const auto& preds_h = preds.HostVector(); const auto& labels = info.labels_.HostVector(); std::vector<GradientPair>& gpair = out_gpair->HostVector(); const auto ngroup = static_cast<bst_omp_uint>(gptr.size() - 1); out_gpair->Resize(preds.Size()); #pragma omp parallel { // parallel construct, declare random number generator here, so that each // thread use its own random number generator, seed by thread id and current iteration std::minstd_rand rnd((iter + 1) * 1111); std::vector<LambdaPair> pairs; std::vector<ListEntry> lst; std::vector< std::pair<bst_float, unsigned> > rec; #pragma omp for schedule(static) for (bst_omp_uint k = 0; k < ngroup; ++k) { lst.clear(); pairs.clear(); for (unsigned j = gptr[k]; j < gptr[k+1]; ++j) { lst.emplace_back(preds_h[j], labels[j], j); gpair[j] = GradientPair(0.0f, 0.0f); } std::stable_sort(lst.begin(), lst.end(), ListEntry::CmpPred); rec.resize(lst.size()); for (unsigned i = 0; i < lst.size(); ++i) { rec[i] = std::make_pair(lst[i].label, i); } std::stable_sort(rec.begin(), rec.end(), common::CmpFirst); // enumerate buckets with same label, for each item in the lst, grab another sample randomly for (unsigned i = 0; i < rec.size(); ) { unsigned j = i + 1; while (j < rec.size() && rec[j].first == rec[i].first) ++j; // bucket in [i,j), get a sample outside bucket unsigned nleft = i, nright = static_cast<unsigned>(rec.size() - j); if (nleft + nright != 0) { int nsample = param_.num_pairsample; while (nsample --) { for (unsigned pid = i; pid < j; ++pid) { unsigned ridx = std::uniform_int_distribution<unsigned>(0, nleft + nright - 1)(rnd); if (ridx < nleft) { pairs.emplace_back(rec[ridx].second, rec[pid].second, info.GetWeight(k) * weight_normalization_factor); } else { pairs.emplace_back(rec[pid].second, rec[ridx+j-i].second, info.GetWeight(k) * weight_normalization_factor); } } } } i = j; } // get lambda weight for the pairs LambdaWeightComputerT::GetLambdaWeight(lst, &pairs); // rescale each gradient and hessian so that the lst have constant weighted float scale = 1.0f / param_.num_pairsample; if (param_.fix_list_weight != 0.0f) { scale *= param_.fix_list_weight / (gptr[k + 1] - gptr[k]); } for (auto & pair : pairs) { const ListEntry &pos = lst[pair.pos_index]; const ListEntry &neg = lst[pair.neg_index]; const bst_float w = pair.weight * scale; const float eps = 1e-16f; bst_float p = common::Sigmoid(pos.pred - neg.pred); bst_float g = p - 1.0f; bst_float h = ::max(p * (1.0f - p), eps); // accumulate gradient and hessian in both pid, and nid gpair[pos.rindex] += GradientPair(g * w, 2.0f*w*h); gpair[neg.rindex] += GradientPair(-g * w, 2.0f*w*h); } } } } #if defined(__HIPCC__) void ComputeGradientsOnGPU(const HostDeviceVector<bst_float>& preds, const MetaInfo& info, int iter, HostDeviceVector<GradientPair>* out_gpair, const std::vector<unsigned> &gptr) { LOG(DEBUG) << "Computing " << LambdaWeightComputerT::Name() << " gradients on GPU."; auto device = tparam_->gpu_id; dh::safe_cuda(hipSetDevice(device)); bst_float weight_normalization_factor = ComputeWeightNormalizationFactor(info, gptr); // Set the device ID and copy them to the device out_gpair->SetDevice(device); info.labels_.SetDevice(device); preds.SetDevice(device); info.weights_.SetDevice(device); out_gpair->Resize(preds.Size()); auto d_preds = preds.ConstDevicePointer(); auto d_gpair = out_gpair->DevicePointer(); auto d_labels = info.labels_.ConstDevicePointer(); SortedLabelList slist(param_); // Sort the labels within the groups on the device slist.Sort(info.labels_, gptr); // Initialize the gradients next out_gpair->Fill(GradientPair(0.0f, 0.0f)); // Finally, compute the gradients slist.ComputeGradients<LambdaWeightComputerT> (d_preds, d_labels, info.weights_, iter, d_gpair, weight_normalization_factor); } #endif LambdaRankParam param_; }; #if !defined(GTEST_TEST) // register the objective functions DMLC_REGISTER_PARAMETER(LambdaRankParam); XGBOOST_REGISTER_OBJECTIVE(PairwiseRankObj, PairwiseLambdaWeightComputer::Name()) .describe("Pairwise rank objective.") .set_body([]() { return new LambdaRankObj<PairwiseLambdaWeightComputer>(); }); XGBOOST_REGISTER_OBJECTIVE(LambdaRankNDCG, NDCGLambdaWeightComputer::Name()) .describe("LambdaRank with NDCG as objective.") .set_body([]() { return new LambdaRankObj<NDCGLambdaWeightComputer>(); }); XGBOOST_REGISTER_OBJECTIVE(LambdaRankObjMAP, MAPLambdaWeightComputer::Name()) .describe("LambdaRank with MAP as objective.") .set_body([]() { return new LambdaRankObj<MAPLambdaWeightComputer>(); }); #endif } // namespace obj } // namespace xgboost
adcaad645d50b302b3f5a154d0c60d34a1cad8cc.cu
/*! * Copyright 2015-2019 XGBoost contributors */ #include <dmlc/omp.h> #include <dmlc/timer.h> #include <xgboost/logging.h> #include <xgboost/objective.h> #include <vector> #include <algorithm> #include <utility> #include "xgboost/json.h" #include "xgboost/parameter.h" #include "../common/math.h" #include "../common/random.h" #if defined(__CUDACC__) #include <thrust/sort.h> #include <thrust/gather.h> #include <thrust/iterator/discard_iterator.h> #include <thrust/random/uniform_int_distribution.h> #include <thrust/random/linear_congruential_engine.h> #include <cub/util_allocator.cuh> #include "../common/device_helpers.cuh" #endif namespace xgboost { namespace obj { #if defined(XGBOOST_USE_CUDA) && !defined(GTEST_TEST) DMLC_REGISTRY_FILE_TAG(rank_obj_gpu); #endif // defined(XGBOOST_USE_CUDA) struct LambdaRankParam : public XGBoostParameter<LambdaRankParam> { size_t num_pairsample; float fix_list_weight; // declare parameters DMLC_DECLARE_PARAMETER(LambdaRankParam) { DMLC_DECLARE_FIELD(num_pairsample).set_lower_bound(1).set_default(1) .describe("Number of pair generated for each instance."); DMLC_DECLARE_FIELD(fix_list_weight).set_lower_bound(0.0f).set_default(0.0f) .describe("Normalize the weight of each list by this value," " if equals 0, no effect will happen"); } }; #if defined(__CUDACC__) // This type sorts an array which is divided into multiple groups. The sorting is influenced // by the function object 'Comparator' template <typename T> class SegmentSorter { private: // Items sorted within the group dh::caching_device_vector<T> ditems_; // Original position of the items before they are sorted descendingly within its groups dh::caching_device_vector<uint32_t> doriginal_pos_; // Segments within the original list that delineates the different groups dh::caching_device_vector<uint32_t> group_segments_; // Need this on the device as it is used in the kernels dh::caching_device_vector<uint32_t> dgroups_; // Group information on device // Where did the item that was originally present at position 'x' move to after they are sorted dh::caching_device_vector<uint32_t> dindexable_sorted_pos_; // Initialize everything but the segments void Init(uint32_t num_elems) { ditems_.resize(num_elems); doriginal_pos_.resize(num_elems); thrust::sequence(doriginal_pos_.begin(), doriginal_pos_.end()); } // Initialize all with group info void Init(const std::vector<uint32_t> &groups) { uint32_t num_elems = groups.back(); this->Init(num_elems); this->CreateGroupSegments(groups); } public: // This needs to be public due to device lambda void CreateGroupSegments(const std::vector<uint32_t> &groups) { uint32_t num_elems = groups.back(); group_segments_.resize(num_elems); dgroups_ = groups; // Define the segments by assigning a group ID to each element const uint32_t *dgroups = dgroups_.data().get(); uint32_t ngroups = dgroups_.size(); auto ComputeGroupIDLambda = [=] __device__(uint32_t idx) { return dh::UpperBound(dgroups, ngroups, idx) - 1; }; // NOLINT thrust::transform(thrust::make_counting_iterator(static_cast<uint32_t>(0)), thrust::make_counting_iterator(num_elems), group_segments_.begin(), ComputeGroupIDLambda); } // Accessors that returns device pointer inline const T *GetItemsPtr() const { return ditems_.data().get(); } inline uint32_t GetNumItems() const { return ditems_.size(); } inline const dh::caching_device_vector<T> &GetItems() const { return ditems_; } inline const uint32_t *GetOriginalPositionsPtr() const { return doriginal_pos_.data().get(); } inline const dh::caching_device_vector<uint32_t> &GetOriginalPositions() const { return doriginal_pos_; } inline const dh::caching_device_vector<uint32_t> &GetGroupSegments() const { return group_segments_; } inline uint32_t GetNumGroups() const { return dgroups_.size() - 1; } inline const uint32_t *GetGroupsPtr() const { return dgroups_.data().get(); } inline const dh::caching_device_vector<uint32_t> &GetGroups() const { return dgroups_; } inline const dh::caching_device_vector<uint32_t> &GetIndexableSortedPositions() const { return dindexable_sorted_pos_; } // Sort an array that is divided into multiple groups. The array is sorted within each group. // This version provides the group information that is on the host. // The array is sorted based on an adaptable binary predicate. By default a stateless predicate // is used. template <typename Comparator = thrust::greater<T>> void SortItems(const T *ditems, uint32_t item_size, const std::vector<uint32_t> &groups, const Comparator &comp = Comparator()) { this->Init(groups); this->SortItems(ditems, item_size, group_segments_, comp); } // Sort an array that is divided into multiple groups. The array is sorted within each group. // This version provides the group information that is on the device. // The array is sorted based on an adaptable binary predicate. By default a stateless predicate // is used. template <typename Comparator = thrust::greater<T>> void SortItems(const T *ditems, uint32_t item_size, const dh::caching_device_vector<uint32_t> &group_segments, const Comparator &comp = Comparator()) { this->Init(item_size); // Sort the items that are grouped. We would like to avoid using predicates to perform the sort, // as thrust resorts to using a merge sort as opposed to a much much faster radix sort // when comparators are used. Hence, the following algorithm is used. This is done so that // we can grab the appropriate related values from the original list later, after the // items are sorted. // // Here is the internal representation: // dgroups_: [ 0, 3, 5, 8, 10 ] // group_segments_: 0 0 0 | 1 1 | 2 2 2 | 3 3 // doriginal_pos_: 0 1 2 | 3 4 | 5 6 7 | 8 9 // ditems_: 1 0 1 | 2 1 | 1 3 3 | 4 4 (from original items) // // Sort the items first and make a note of the original positions in doriginal_pos_ // based on the sort // ditems_: 4 4 3 3 2 1 1 1 1 0 // doriginal_pos_: 8 9 6 7 3 0 2 4 5 1 // NOTE: This consumes space, but is much faster than some of the other approaches - sorting // in kernel, sorting using predicates etc. ditems_.assign(thrust::device_ptr<const T>(ditems), thrust::device_ptr<const T>(ditems) + item_size); // Allocator to be used by sort for managing space overhead while sorting dh::XGBCachingDeviceAllocator<char> alloc; thrust::stable_sort_by_key(thrust::cuda::par(alloc), ditems_.begin(), ditems_.end(), doriginal_pos_.begin(), comp); // Next, gather the segments based on the doriginal_pos_. This is to reflect the // holisitic item sort order on the segments // group_segments_c_: 3 3 2 2 1 0 0 1 2 0 // doriginal_pos_: 8 9 6 7 3 0 2 4 5 1 (stays the same) dh::caching_device_vector<uint32_t> group_segments_c(group_segments); thrust::gather(doriginal_pos_.begin(), doriginal_pos_.end(), group_segments.begin(), group_segments_c.begin()); // Now, sort the group segments so that you may bring the items within the group together, // in the process also noting the relative changes to the doriginal_pos_ while that happens // group_segments_c_: 0 0 0 1 1 2 2 2 3 3 // doriginal_pos_: 0 2 1 3 4 6 7 5 8 9 thrust::stable_sort_by_key(thrust::cuda::par(alloc), group_segments_c.begin(), group_segments_c.end(), doriginal_pos_.begin(), thrust::less<uint32_t>()); // Finally, gather the original items based on doriginal_pos_ to sort the input and // to store them in ditems_ // doriginal_pos_: 0 2 1 3 4 6 7 5 8 9 (stays the same) // ditems_: 1 1 0 2 1 3 3 1 4 4 (from unsorted items - ditems) thrust::gather(doriginal_pos_.begin(), doriginal_pos_.end(), thrust::device_ptr<const T>(ditems), ditems_.begin()); } // Determine where an item that was originally present at position 'x' has been relocated to // after a sort. Creation of such an index has to be explicitly requested after a sort void CreateIndexableSortedPositions() { dindexable_sorted_pos_.resize(GetNumItems()); thrust::scatter(thrust::make_counting_iterator(static_cast<uint32_t>(0)), thrust::make_counting_iterator(GetNumItems()), // Rearrange indices... // ...based on this map thrust::device_ptr<const uint32_t>(GetOriginalPositionsPtr()), dindexable_sorted_pos_.begin()); // Write results into this } }; // Helper functions template <typename T> XGBOOST_DEVICE __forceinline__ uint32_t CountNumItemsToTheLeftOf(const T * __restrict__ items, uint32_t n, T v) { return dh::LowerBound(items, n, v, thrust::greater<T>()); } template <typename T> XGBOOST_DEVICE __forceinline__ uint32_t CountNumItemsToTheRightOf(const T * __restrict__ items, uint32_t n, T v) { return n - dh::UpperBound(items, n, v, thrust::greater<T>()); } #endif /*! \brief helper information in a list */ struct ListEntry { /*! \brief the predict score we in the data */ bst_float pred; /*! \brief the actual label of the entry */ bst_float label; /*! \brief row index in the data matrix */ unsigned rindex; // constructor ListEntry(bst_float pred, bst_float label, unsigned rindex) : pred(pred), label(label), rindex(rindex) {} // comparator by prediction inline static bool CmpPred(const ListEntry &a, const ListEntry &b) { return a.pred > b.pred; } // comparator by label inline static bool CmpLabel(const ListEntry &a, const ListEntry &b) { return a.label > b.label; } }; /*! \brief a pair in the lambda rank */ struct LambdaPair { /*! \brief positive index: this is a position in the list */ unsigned pos_index; /*! \brief negative index: this is a position in the list */ unsigned neg_index; /*! \brief weight to be filled in */ bst_float weight; // constructor LambdaPair(unsigned pos_index, unsigned neg_index) : pos_index(pos_index), neg_index(neg_index), weight(1.0f) {} // constructor LambdaPair(unsigned pos_index, unsigned neg_index, bst_float weight) : pos_index(pos_index), neg_index(neg_index), weight(weight) {} }; class PairwiseLambdaWeightComputer { public: /*! * \brief get lambda weight for existing pairs - for pairwise objective * \param list a list that is sorted by pred score * \param io_pairs record of pairs, containing the pairs to fill in weights */ static void GetLambdaWeight(const std::vector<ListEntry> &sorted_list, std::vector<LambdaPair> *io_pairs) {} static char const* Name() { return "rank:pairwise"; } #if defined(__CUDACC__) PairwiseLambdaWeightComputer(const bst_float *dpreds, const bst_float *dlabels, const SegmentSorter<float> &segment_label_sorter) {} class PairwiseLambdaWeightMultiplier { public: // Adjust the items weight by this value __device__ __forceinline__ bst_float GetWeight(uint32_t gidx, int pidx, int nidx) const { return 1.0f; } }; inline const PairwiseLambdaWeightMultiplier GetWeightMultiplier() const { return {}; } #endif }; #if defined(__CUDACC__) class BaseLambdaWeightMultiplier { public: BaseLambdaWeightMultiplier(const SegmentSorter<float> &segment_label_sorter, const SegmentSorter<float> &segment_pred_sorter) : dsorted_labels_(segment_label_sorter.GetItemsPtr()), dorig_pos_(segment_label_sorter.GetOriginalPositionsPtr()), dgroups_(segment_label_sorter.GetGroupsPtr()), dindexable_sorted_preds_pos_ptr_( segment_pred_sorter.GetIndexableSortedPositions().data().get()) {} protected: const float *dsorted_labels_{nullptr}; // Labels sorted within a group const uint32_t *dorig_pos_{nullptr}; // Original indices of the labels before they are sorted const uint32_t *dgroups_{nullptr}; // The group indices // Where can a prediction for a label be found in the original array, when they are sorted const uint32_t *dindexable_sorted_preds_pos_ptr_{nullptr}; }; // While computing the weight that needs to be adjusted by this ranking objective, we need // to figure out where positive and negative labels chosen earlier exists, if the group // were to be sorted by its predictions. To accommodate this, we employ the following algorithm. // For a given group, let's assume the following: // labels: 1 5 9 2 4 8 0 7 6 3 // predictions: 1 9 0 8 2 7 3 6 5 4 // position: 0 1 2 3 4 5 6 7 8 9 // // After label sort: // labels: 9 8 7 6 5 4 3 2 1 0 // position: 2 5 7 8 1 4 9 3 0 6 // // After prediction sort: // predictions: 9 8 7 6 5 4 3 2 1 0 // position: 1 3 5 7 8 9 6 4 0 2 // // If a sorted label at position 'x' is chosen, then we need to find out where the prediction // for this label 'x' exists, if the group were to be sorted by predictions. // We first take the sorted prediction positions: // position: 1 3 5 7 8 9 6 4 0 2 // at indices: 0 1 2 3 4 5 6 7 8 9 // // We create a sorted prediction positional array, such that value at position 'x' gives // us the position in the sorted prediction array where its related prediction lies. // dindexable_sorted_preds_pos_ptr_: 8 0 9 1 7 2 6 3 4 5 // at indices: 0 1 2 3 4 5 6 7 8 9 // Basically, swap the previous 2 arrays, sort the indices and reorder positions // for an O(1) lookup using the position where the sorted label exists. // // This type does that using the SegmentSorter class IndexablePredictionSorter { public: IndexablePredictionSorter(const bst_float *dpreds, const SegmentSorter<float> &segment_label_sorter) { // Sort the predictions first segment_pred_sorter_.SortItems(dpreds, segment_label_sorter.GetNumItems(), segment_label_sorter.GetGroupSegments()); // Create an index for the sorted prediction positions segment_pred_sorter_.CreateIndexableSortedPositions(); } inline const SegmentSorter<float> &GetPredictionSorter() const { return segment_pred_sorter_; } private: SegmentSorter<float> segment_pred_sorter_; // For sorting the predictions }; #endif // beta version: NDCG lambda rank class NDCGLambdaWeightComputer #if defined(__CUDACC__) : public IndexablePredictionSorter #endif { public: #if defined(__CUDACC__) // This function object computes the item's DCG value class ComputeItemDCG : public thrust::unary_function<uint32_t, float> { public: XGBOOST_DEVICE ComputeItemDCG(const float *dsorted_labels, const uint32_t *dgroups, const uint32_t *gidxs) : dsorted_labels_(dsorted_labels), dgroups_(dgroups), dgidxs_(gidxs) {} // Compute DCG for the item at 'idx' __device__ __forceinline__ float operator()(uint32_t idx) const { return ComputeItemDCGWeight(dsorted_labels_[idx], idx - dgroups_[dgidxs_[idx]]); } private: const float *dsorted_labels_{nullptr}; // Labels sorted within a group const uint32_t *dgroups_{nullptr}; // The group indices - where each group begins and ends const uint32_t *dgidxs_{nullptr}; // The group each items belongs to }; // Type containing device pointers that can be cheaply copied on the kernel class NDCGLambdaWeightMultiplier : public BaseLambdaWeightMultiplier { public: NDCGLambdaWeightMultiplier(const SegmentSorter<float> &segment_label_sorter, const NDCGLambdaWeightComputer &lwc) : BaseLambdaWeightMultiplier(segment_label_sorter, lwc.GetPredictionSorter()), dgroup_dcg_ptr_(lwc.GetGroupDcgs().data().get()) {} // Adjust the items weight by this value __device__ __forceinline__ bst_float GetWeight(uint32_t gidx, int pidx, int nidx) const { if (dgroup_dcg_ptr_[gidx] == 0.0) return 0.0f; uint32_t group_begin = dgroups_[gidx]; auto pos_lab_orig_posn = dorig_pos_[pidx]; auto neg_lab_orig_posn = dorig_pos_[nidx]; KERNEL_CHECK(pos_lab_orig_posn != neg_lab_orig_posn); // Note: the label positive and negative indices are relative to the entire dataset. // Hence, scale them back to an index within the group auto pos_pred_pos = dindexable_sorted_preds_pos_ptr_[pos_lab_orig_posn] - group_begin; auto neg_pred_pos = dindexable_sorted_preds_pos_ptr_[neg_lab_orig_posn] - group_begin; return NDCGLambdaWeightComputer::ComputeDeltaWeight( pos_pred_pos, neg_pred_pos, static_cast<int>(dsorted_labels_[pidx]), static_cast<int>(dsorted_labels_[nidx]), dgroup_dcg_ptr_[gidx]); } private: const float *dgroup_dcg_ptr_{nullptr}; // Start address of the group DCG values }; NDCGLambdaWeightComputer(const bst_float *dpreds, const bst_float *dlabels, const SegmentSorter<float> &segment_label_sorter) : IndexablePredictionSorter(dpreds, segment_label_sorter), dgroup_dcg_(segment_label_sorter.GetNumGroups(), 0.0f), weight_multiplier_(segment_label_sorter, *this) { const auto &group_segments = segment_label_sorter.GetGroupSegments(); // Compute each elements DCG values and reduce them across groups concurrently. auto end_range = thrust::reduce_by_key(group_segments.begin(), group_segments.end(), thrust::make_transform_iterator( // The indices need not be sequential within a group, as we care only // about the sum of items DCG values within a group segment_label_sorter.GetOriginalPositions().begin(), ComputeItemDCG(segment_label_sorter.GetItemsPtr(), segment_label_sorter.GetGroupsPtr(), group_segments.data().get())), thrust::make_discard_iterator(), // We don't care for the group indices dgroup_dcg_.begin()); // Sum of the item's DCG values in the group CHECK(end_range.second - dgroup_dcg_.begin() == dgroup_dcg_.size()); } inline const dh::caching_device_vector<float> &GetGroupDcgs() const { return dgroup_dcg_; } inline const NDCGLambdaWeightMultiplier GetWeightMultiplier() const { return weight_multiplier_; } #endif static void GetLambdaWeight(const std::vector<ListEntry> &sorted_list, std::vector<LambdaPair> *io_pairs) { std::vector<LambdaPair> &pairs = *io_pairs; float IDCG; // NOLINT { std::vector<bst_float> labels(sorted_list.size()); for (size_t i = 0; i < sorted_list.size(); ++i) { labels[i] = sorted_list[i].label; } std::stable_sort(labels.begin(), labels.end(), std::greater<bst_float>()); IDCG = ComputeGroupDCGWeight(&labels[0], labels.size()); } if (IDCG == 0.0) { for (auto & pair : pairs) { pair.weight = 0.0f; } } else { for (auto & pair : pairs) { unsigned pos_idx = pair.pos_index; unsigned neg_idx = pair.neg_index; pair.weight *= ComputeDeltaWeight(pos_idx, neg_idx, sorted_list[pos_idx].label, sorted_list[neg_idx].label, IDCG); } } } static char const* Name() { return "rank:ndcg"; } inline static bst_float ComputeGroupDCGWeight(const float *sorted_labels, uint32_t size) { double sumdcg = 0.0; for (uint32_t i = 0; i < size; ++i) { sumdcg += ComputeItemDCGWeight(sorted_labels[i], i); } return static_cast<bst_float>(sumdcg); } private: XGBOOST_DEVICE inline static bst_float ComputeItemDCGWeight(unsigned label, uint32_t idx) { return (label != 0) ? (((1 << label) - 1) / std::log2(static_cast<bst_float>(idx + 2))) : 0; } // Compute the weight adjustment for an item within a group: // pos_pred_pos => Where does the positive label live, had the list been sorted by prediction // neg_pred_pos => Where does the negative label live, had the list been sorted by prediction // pos_label => positive label value from sorted label list // neg_label => negative label value from sorted label list XGBOOST_DEVICE inline static bst_float ComputeDeltaWeight(uint32_t pos_pred_pos, uint32_t neg_pred_pos, int pos_label, int neg_label, float idcg) { float pos_loginv = 1.0f / std::log2(pos_pred_pos + 2.0f); float neg_loginv = 1.0f / std::log2(neg_pred_pos + 2.0f); bst_float original = ((1 << pos_label) - 1) * pos_loginv + ((1 << neg_label) - 1) * neg_loginv; float changed = ((1 << neg_label) - 1) * pos_loginv + ((1 << pos_label) - 1) * neg_loginv; bst_float delta = (original - changed) * (1.0f / idcg); if (delta < 0.0f) delta = - delta; return delta; } #if defined(__CUDACC__) dh::caching_device_vector<float> dgroup_dcg_; // This computes the adjustment to the weight const NDCGLambdaWeightMultiplier weight_multiplier_; #endif }; class MAPLambdaWeightComputer #if defined(__CUDACC__) : public IndexablePredictionSorter #endif { public: struct MAPStats { /*! \brief the accumulated precision */ float ap_acc{0.0f}; /*! * \brief the accumulated precision, * assuming a positive instance is missing */ float ap_acc_miss{0.0f}; /*! * \brief the accumulated precision, * assuming that one more positive instance is inserted ahead */ float ap_acc_add{0.0f}; /* \brief the accumulated positive instance count */ float hits{0.0f}; XGBOOST_DEVICE MAPStats() {} // NOLINT XGBOOST_DEVICE MAPStats(float ap_acc, float ap_acc_miss, float ap_acc_add, float hits) : ap_acc(ap_acc), ap_acc_miss(ap_acc_miss), ap_acc_add(ap_acc_add), hits(hits) {} // For prefix scan XGBOOST_DEVICE MAPStats operator +(const MAPStats &v1) const { return {ap_acc + v1.ap_acc, ap_acc_miss + v1.ap_acc_miss, ap_acc_add + v1.ap_acc_add, hits + v1.hits}; } // For test purposes - compare for equality XGBOOST_DEVICE bool operator ==(const MAPStats &rhs) const { return ap_acc == rhs.ap_acc && ap_acc_miss == rhs.ap_acc_miss && ap_acc_add == rhs.ap_acc_add && hits == rhs.hits; } }; private: template <typename T> XGBOOST_DEVICE inline static void Swap(T &v0, T &v1) { #if defined(__CUDACC__) thrust::swap(v0, v1); #else std::swap(v0, v1); #endif } /*! * \brief Obtain the delta MAP by trying to switch the positions of labels in pos_pred_pos or * neg_pred_pos when sorted by predictions * \param pos_pred_pos positive label's prediction value position when the groups prediction * values are sorted * \param neg_pred_pos negative label's prediction value position when the groups prediction * values are sorted * \param pos_label, neg_label the chosen positive and negative labels * \param p_map_stats a vector containing the accumulated precisions for each position in a list * \param map_stats_size size of the accumulated precisions vector */ XGBOOST_DEVICE inline static bst_float GetLambdaMAP( int pos_pred_pos, int neg_pred_pos, bst_float pos_label, bst_float neg_label, const MAPStats *p_map_stats, uint32_t map_stats_size) { if (pos_pred_pos == neg_pred_pos || p_map_stats[map_stats_size - 1].hits == 0) { return 0.0f; } if (pos_pred_pos > neg_pred_pos) { Swap(pos_pred_pos, neg_pred_pos); Swap(pos_label, neg_label); } bst_float original = p_map_stats[neg_pred_pos].ap_acc; if (pos_pred_pos != 0) original -= p_map_stats[pos_pred_pos - 1].ap_acc; bst_float changed = 0; bst_float label1 = pos_label > 0.0f ? 1.0f : 0.0f; bst_float label2 = neg_label > 0.0f ? 1.0f : 0.0f; if (label1 == label2) { return 0.0; } else if (label1 < label2) { changed += p_map_stats[neg_pred_pos - 1].ap_acc_add - p_map_stats[pos_pred_pos].ap_acc_add; changed += (p_map_stats[pos_pred_pos].hits + 1.0f) / (pos_pred_pos + 1); } else { changed += p_map_stats[neg_pred_pos - 1].ap_acc_miss - p_map_stats[pos_pred_pos].ap_acc_miss; changed += p_map_stats[neg_pred_pos].hits / (neg_pred_pos + 1); } bst_float ans = (changed - original) / (p_map_stats[map_stats_size - 1].hits); if (ans < 0) ans = -ans; return ans; } public: /* * \brief obtain preprocessing results for calculating delta MAP * \param sorted_list the list containing entry information * \param map_stats a vector containing the accumulated precisions for each position in a list */ inline static void GetMAPStats(const std::vector<ListEntry> &sorted_list, std::vector<MAPStats> *p_map_acc) { std::vector<MAPStats> &map_acc = *p_map_acc; map_acc.resize(sorted_list.size()); bst_float hit = 0, acc1 = 0, acc2 = 0, acc3 = 0; for (size_t i = 1; i <= sorted_list.size(); ++i) { if (sorted_list[i - 1].label > 0.0f) { hit++; acc1 += hit / i; acc2 += (hit - 1) / i; acc3 += (hit + 1) / i; } map_acc[i - 1] = MAPStats(acc1, acc2, acc3, hit); } } static char const* Name() { return "rank:map"; } static void GetLambdaWeight(const std::vector<ListEntry> &sorted_list, std::vector<LambdaPair> *io_pairs) { std::vector<LambdaPair> &pairs = *io_pairs; std::vector<MAPStats> map_stats; GetMAPStats(sorted_list, &map_stats); for (auto & pair : pairs) { pair.weight *= GetLambdaMAP(pair.pos_index, pair.neg_index, sorted_list[pair.pos_index].label, sorted_list[pair.neg_index].label, &map_stats[0], map_stats.size()); } } #if defined(__CUDACC__) MAPLambdaWeightComputer(const bst_float *dpreds, const bst_float *dlabels, const SegmentSorter<float> &segment_label_sorter) : IndexablePredictionSorter(dpreds, segment_label_sorter), dmap_stats_(segment_label_sorter.GetNumItems(), MAPStats()), weight_multiplier_(segment_label_sorter, *this) { this->CreateMAPStats(dlabels, segment_label_sorter); } void CreateMAPStats(const bst_float *dlabels, const SegmentSorter<float> &segment_label_sorter) { // For each group, go through the sorted prediction positions, and look up its corresponding // label from the unsorted labels (from the original label list) // For each item in the group, compute its MAP stats. // Interleave the computation of map stats amongst different groups. // First, determine postive labels in the dataset individually auto nitems = segment_label_sorter.GetNumItems(); dh::caching_device_vector<uint32_t> dhits(nitems, 0); // Original positions of the predictions after they have been sorted const uint32_t *pred_original_pos = this->GetPredictionSorter().GetOriginalPositionsPtr(); // Unsorted labels const float *unsorted_labels = dlabels; auto DeterminePositiveLabelLambda = [=] __device__(uint32_t idx) { return (unsorted_labels[pred_original_pos[idx]] > 0.0f) ? 1 : 0; }; // NOLINT thrust::transform(thrust::make_counting_iterator(static_cast<uint32_t>(0)), thrust::make_counting_iterator(nitems), dhits.begin(), DeterminePositiveLabelLambda); // Allocator to be used by sort for managing space overhead while performing prefix scans dh::XGBCachingDeviceAllocator<char> alloc; // Next, prefix scan the positive labels that are segmented to accumulate them. // This is required for computing the accumulated precisions const auto &group_segments = segment_label_sorter.GetGroupSegments(); // Data segmented into different groups... thrust::inclusive_scan_by_key(thrust::cuda::par(alloc), group_segments.begin(), group_segments.end(), dhits.begin(), // Input value dhits.begin()); // In-place scan // Compute accumulated precisions for each item, assuming positive and // negative instances are missing. // But first, compute individual item precisions const auto *dgidx_arr = group_segments.data().get(); const auto *dhits_arr = dhits.data().get(); // Group info on device const uint32_t *dgroups = segment_label_sorter.GetGroupsPtr(); uint32_t ngroups = segment_label_sorter.GetNumGroups(); auto ComputeItemPrecisionLambda = [=] __device__(uint32_t idx) { if (unsorted_labels[pred_original_pos[idx]] > 0.0f) { auto idx_within_group = (idx - dgroups[dgidx_arr[idx]]) + 1; return MAPStats(static_cast<float>(dhits_arr[idx]) / idx_within_group, static_cast<float>(dhits_arr[idx] - 1) / idx_within_group, static_cast<float>(dhits_arr[idx] + 1) / idx_within_group, 1.0f); } return MAPStats(); }; // NOLINT thrust::transform(thrust::make_counting_iterator(static_cast<uint32_t>(0)), thrust::make_counting_iterator(nitems), this->dmap_stats_.begin(), ComputeItemPrecisionLambda); // Lastly, compute the accumulated precisions for all the items segmented by groups. // The precisions are accumulated within each group thrust::inclusive_scan_by_key(thrust::cuda::par(alloc), group_segments.begin(), group_segments.end(), this->dmap_stats_.begin(), // Input map stats this->dmap_stats_.begin()); // In-place scan and output here } inline const dh::caching_device_vector<MAPStats> &GetMapStats() const { return dmap_stats_; } // Type containing device pointers that can be cheaply copied on the kernel class MAPLambdaWeightMultiplier : public BaseLambdaWeightMultiplier { public: MAPLambdaWeightMultiplier(const SegmentSorter<float> &segment_label_sorter, const MAPLambdaWeightComputer &lwc) : BaseLambdaWeightMultiplier(segment_label_sorter, lwc.GetPredictionSorter()), dmap_stats_ptr_(lwc.GetMapStats().data().get()) {} // Adjust the items weight by this value __device__ __forceinline__ bst_float GetWeight(uint32_t gidx, int pidx, int nidx) const { uint32_t group_begin = dgroups_[gidx]; uint32_t group_end = dgroups_[gidx + 1]; auto pos_lab_orig_posn = dorig_pos_[pidx]; auto neg_lab_orig_posn = dorig_pos_[nidx]; KERNEL_CHECK(pos_lab_orig_posn != neg_lab_orig_posn); // Note: the label positive and negative indices are relative to the entire dataset. // Hence, scale them back to an index within the group auto pos_pred_pos = dindexable_sorted_preds_pos_ptr_[pos_lab_orig_posn] - group_begin; auto neg_pred_pos = dindexable_sorted_preds_pos_ptr_[neg_lab_orig_posn] - group_begin; return MAPLambdaWeightComputer::GetLambdaMAP( pos_pred_pos, neg_pred_pos, dsorted_labels_[pidx], dsorted_labels_[nidx], &dmap_stats_ptr_[group_begin], group_end - group_begin); } private: const MAPStats *dmap_stats_ptr_{nullptr}; // Start address of the map stats for every sorted // prediction value }; inline const MAPLambdaWeightMultiplier GetWeightMultiplier() const { return weight_multiplier_; } private: dh::caching_device_vector<MAPStats> dmap_stats_; // This computes the adjustment to the weight const MAPLambdaWeightMultiplier weight_multiplier_; #endif }; #if defined(__CUDACC__) class SortedLabelList : SegmentSorter<float> { private: const LambdaRankParam &param_; // Objective configuration public: explicit SortedLabelList(const LambdaRankParam &param) : param_(param) {} // Sort the labels that are grouped by 'groups' void Sort(const HostDeviceVector<bst_float> &dlabels, const std::vector<uint32_t> &groups) { this->SortItems(dlabels.ConstDevicePointer(), dlabels.Size(), groups); } // This kernel can only run *after* the kernel in sort is completed, as they // use the default stream template <typename LambdaWeightComputerT> void ComputeGradients(const bst_float *dpreds, // Unsorted predictions const bst_float *dlabels, // Unsorted labels const HostDeviceVector<bst_float> &weights, int iter, GradientPair *out_gpair, float weight_normalization_factor) { // Group info on device const uint32_t *dgroups = this->GetGroupsPtr(); uint32_t ngroups = this->GetNumGroups() + 1; uint32_t total_items = this->GetNumItems(); uint32_t niter = param_.num_pairsample * total_items; float fix_list_weight = param_.fix_list_weight; const uint32_t *original_pos = this->GetOriginalPositionsPtr(); uint32_t num_weights = weights.Size(); auto dweights = num_weights ? weights.ConstDevicePointer() : nullptr; const bst_float *sorted_labels = this->GetItemsPtr(); // This is used to adjust the weight of different elements based on the different ranking // objective function policies LambdaWeightComputerT weight_computer(dpreds, dlabels, *this); auto wmultiplier = weight_computer.GetWeightMultiplier(); int device_id = -1; dh::safe_cuda(cudaGetDevice(&device_id)); // For each instance in the group, compute the gradient pair concurrently dh::LaunchN(device_id, niter, nullptr, [=] __device__(uint32_t idx) { // First, determine the group 'idx' belongs to uint32_t item_idx = idx % total_items; uint32_t group_idx = dh::UpperBound(dgroups, ngroups, item_idx); // Span of this group within the larger labels/predictions sorted tuple uint32_t group_begin = dgroups[group_idx - 1]; uint32_t group_end = dgroups[group_idx]; uint32_t total_group_items = group_end - group_begin; // Are the labels diverse enough? If they are all the same, then there is nothing to pick // from another group - bail sooner if (sorted_labels[group_begin] == sorted_labels[group_end - 1]) return; // Find the number of labels less than and greater than the current label // at the sorted index position item_idx uint32_t nleft = CountNumItemsToTheLeftOf( sorted_labels + group_begin, item_idx - group_begin + 1, sorted_labels[item_idx]); uint32_t nright = CountNumItemsToTheRightOf( sorted_labels + item_idx, group_end - item_idx, sorted_labels[item_idx]); // Create a minstd_rand object to act as our source of randomness thrust::minstd_rand rng((iter + 1) * 1111); rng.discard(((idx / total_items) * total_group_items) + item_idx - group_begin); // Create a uniform_int_distribution to produce a sample from outside of the // present label group thrust::uniform_int_distribution<int> dist(0, nleft + nright - 1); int sample = dist(rng); int pos_idx = -1; // Bigger label int neg_idx = -1; // Smaller label // Are we picking a sample to the left/right of the current group? if (sample < nleft) { // Go left pos_idx = sample + group_begin; neg_idx = item_idx; } else { pos_idx = item_idx; uint32_t items_in_group = total_group_items - nleft - nright; neg_idx = sample + items_in_group + group_begin; } // Compute and assign the gradients now const float eps = 1e-16f; bst_float p = common::Sigmoid(dpreds[original_pos[pos_idx]] - dpreds[original_pos[neg_idx]]); bst_float g = p - 1.0f; bst_float h = thrust::max(p * (1.0f - p), eps); // Rescale each gradient and hessian so that the group has a weighted constant float scale = __frcp_ru(niter / total_items); if (fix_list_weight != 0.0f) { scale *= fix_list_weight / total_group_items; } float weight = num_weights ? dweights[group_idx - 1] : 1.0f; weight *= weight_normalization_factor; weight *= wmultiplier.GetWeight(group_idx - 1, pos_idx, neg_idx); weight *= scale; // Accumulate gradient and hessian in both positive and negative indices const GradientPair in_pos_gpair(g * weight, 2.0f * weight * h); dh::AtomicAddGpair(&out_gpair[original_pos[pos_idx]], in_pos_gpair); const GradientPair in_neg_gpair(-g * weight, 2.0f * weight * h); dh::AtomicAddGpair(&out_gpair[original_pos[neg_idx]], in_neg_gpair); }); // Wait until the computations done by the kernel is complete dh::safe_cuda(cudaStreamSynchronize(nullptr)); } }; #endif // objective for lambda rank template <typename LambdaWeightComputerT> class LambdaRankObj : public ObjFunction { public: void Configure(const std::vector<std::pair<std::string, std::string> >& args) override { param_.UpdateAllowUnknown(args); } void GetGradient(const HostDeviceVector<bst_float>& preds, const MetaInfo& info, int iter, HostDeviceVector<GradientPair>* out_gpair) override { CHECK_EQ(preds.Size(), info.labels_.Size()) << "label size predict size not match"; // quick consistency when group is not available std::vector<unsigned> tgptr(2, 0); tgptr[1] = static_cast<unsigned>(info.labels_.Size()); const std::vector<unsigned> &gptr = info.group_ptr_.size() == 0 ? tgptr : info.group_ptr_; CHECK(gptr.size() != 0 && gptr.back() == info.labels_.Size()) << "group structure not consistent with #rows" << ", " << "group ponter size: " << gptr.size() << ", " << "labels size: " << info.labels_.Size() << ", " << "group pointer back: " << (gptr.size() == 0 ? 0 : gptr.back()); #if defined(__CUDACC__) // Check if we have a GPU assignment; else, revert back to CPU auto device = tparam_->gpu_id; if (device >= 0) { ComputeGradientsOnGPU(preds, info, iter, out_gpair, gptr); } else { // Revert back to CPU #endif ComputeGradientsOnCPU(preds, info, iter, out_gpair, gptr); #if defined(__CUDACC__) } #endif } const char* DefaultEvalMetric() const override { return "map"; } void SaveConfig(Json* p_out) const override { auto& out = *p_out; out["name"] = String(LambdaWeightComputerT::Name()); out["lambda_rank_param"] = Object(); for (auto const& kv : param_.__DICT__()) { out["lambda_rank_param"][kv.first] = kv.second; } } void LoadConfig(Json const& in) override { fromJson(in["lambda_rank_param"], &param_); } private: bst_float ComputeWeightNormalizationFactor(const MetaInfo& info, const std::vector<unsigned> &gptr) { const auto ngroup = static_cast<bst_omp_uint>(gptr.size() - 1); bst_float sum_weights = 0; for (bst_omp_uint k = 0; k < ngroup; ++k) { sum_weights += info.GetWeight(k); } return ngroup / sum_weights; } void ComputeGradientsOnCPU(const HostDeviceVector<bst_float>& preds, const MetaInfo& info, int iter, HostDeviceVector<GradientPair>* out_gpair, const std::vector<unsigned> &gptr) { LOG(DEBUG) << "Computing " << LambdaWeightComputerT::Name() << " gradients on CPU."; bst_float weight_normalization_factor = ComputeWeightNormalizationFactor(info, gptr); const auto& preds_h = preds.HostVector(); const auto& labels = info.labels_.HostVector(); std::vector<GradientPair>& gpair = out_gpair->HostVector(); const auto ngroup = static_cast<bst_omp_uint>(gptr.size() - 1); out_gpair->Resize(preds.Size()); #pragma omp parallel { // parallel construct, declare random number generator here, so that each // thread use its own random number generator, seed by thread id and current iteration std::minstd_rand rnd((iter + 1) * 1111); std::vector<LambdaPair> pairs; std::vector<ListEntry> lst; std::vector< std::pair<bst_float, unsigned> > rec; #pragma omp for schedule(static) for (bst_omp_uint k = 0; k < ngroup; ++k) { lst.clear(); pairs.clear(); for (unsigned j = gptr[k]; j < gptr[k+1]; ++j) { lst.emplace_back(preds_h[j], labels[j], j); gpair[j] = GradientPair(0.0f, 0.0f); } std::stable_sort(lst.begin(), lst.end(), ListEntry::CmpPred); rec.resize(lst.size()); for (unsigned i = 0; i < lst.size(); ++i) { rec[i] = std::make_pair(lst[i].label, i); } std::stable_sort(rec.begin(), rec.end(), common::CmpFirst); // enumerate buckets with same label, for each item in the lst, grab another sample randomly for (unsigned i = 0; i < rec.size(); ) { unsigned j = i + 1; while (j < rec.size() && rec[j].first == rec[i].first) ++j; // bucket in [i,j), get a sample outside bucket unsigned nleft = i, nright = static_cast<unsigned>(rec.size() - j); if (nleft + nright != 0) { int nsample = param_.num_pairsample; while (nsample --) { for (unsigned pid = i; pid < j; ++pid) { unsigned ridx = std::uniform_int_distribution<unsigned>(0, nleft + nright - 1)(rnd); if (ridx < nleft) { pairs.emplace_back(rec[ridx].second, rec[pid].second, info.GetWeight(k) * weight_normalization_factor); } else { pairs.emplace_back(rec[pid].second, rec[ridx+j-i].second, info.GetWeight(k) * weight_normalization_factor); } } } } i = j; } // get lambda weight for the pairs LambdaWeightComputerT::GetLambdaWeight(lst, &pairs); // rescale each gradient and hessian so that the lst have constant weighted float scale = 1.0f / param_.num_pairsample; if (param_.fix_list_weight != 0.0f) { scale *= param_.fix_list_weight / (gptr[k + 1] - gptr[k]); } for (auto & pair : pairs) { const ListEntry &pos = lst[pair.pos_index]; const ListEntry &neg = lst[pair.neg_index]; const bst_float w = pair.weight * scale; const float eps = 1e-16f; bst_float p = common::Sigmoid(pos.pred - neg.pred); bst_float g = p - 1.0f; bst_float h = std::max(p * (1.0f - p), eps); // accumulate gradient and hessian in both pid, and nid gpair[pos.rindex] += GradientPair(g * w, 2.0f*w*h); gpair[neg.rindex] += GradientPair(-g * w, 2.0f*w*h); } } } } #if defined(__CUDACC__) void ComputeGradientsOnGPU(const HostDeviceVector<bst_float>& preds, const MetaInfo& info, int iter, HostDeviceVector<GradientPair>* out_gpair, const std::vector<unsigned> &gptr) { LOG(DEBUG) << "Computing " << LambdaWeightComputerT::Name() << " gradients on GPU."; auto device = tparam_->gpu_id; dh::safe_cuda(cudaSetDevice(device)); bst_float weight_normalization_factor = ComputeWeightNormalizationFactor(info, gptr); // Set the device ID and copy them to the device out_gpair->SetDevice(device); info.labels_.SetDevice(device); preds.SetDevice(device); info.weights_.SetDevice(device); out_gpair->Resize(preds.Size()); auto d_preds = preds.ConstDevicePointer(); auto d_gpair = out_gpair->DevicePointer(); auto d_labels = info.labels_.ConstDevicePointer(); SortedLabelList slist(param_); // Sort the labels within the groups on the device slist.Sort(info.labels_, gptr); // Initialize the gradients next out_gpair->Fill(GradientPair(0.0f, 0.0f)); // Finally, compute the gradients slist.ComputeGradients<LambdaWeightComputerT> (d_preds, d_labels, info.weights_, iter, d_gpair, weight_normalization_factor); } #endif LambdaRankParam param_; }; #if !defined(GTEST_TEST) // register the objective functions DMLC_REGISTER_PARAMETER(LambdaRankParam); XGBOOST_REGISTER_OBJECTIVE(PairwiseRankObj, PairwiseLambdaWeightComputer::Name()) .describe("Pairwise rank objective.") .set_body([]() { return new LambdaRankObj<PairwiseLambdaWeightComputer>(); }); XGBOOST_REGISTER_OBJECTIVE(LambdaRankNDCG, NDCGLambdaWeightComputer::Name()) .describe("LambdaRank with NDCG as objective.") .set_body([]() { return new LambdaRankObj<NDCGLambdaWeightComputer>(); }); XGBOOST_REGISTER_OBJECTIVE(LambdaRankObjMAP, MAPLambdaWeightComputer::Name()) .describe("LambdaRank with MAP as objective.") .set_body([]() { return new LambdaRankObj<MAPLambdaWeightComputer>(); }); #endif } // namespace obj } // namespace xgboost
da9a1544a7bd9f6cfe27b00c2e8b99baeb623115.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2011, Alex Krizhevsky (akrizhevsky@gmail.com) * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * - Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * - Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <algorithm> #include <util.cuh> #include <worker.cuh> #include <iostream> #include <layer.cuh> #include <nvmatrix.cuh> #include <noiselayer.cuh> using namespace std; /* * ==================== * WorkResult * ==================== */ WorkResult::WorkResult(WorkResult::RESULTS resultType, Cost& results) : _resultType(resultType), _results(&results) { } WorkResult::WorkResult(WorkResult::RESULTS resultType) : _resultType(resultType), _results(NULL) { } WorkResult::~WorkResult() { delete _results; // delete NULL is ok } Cost& WorkResult::getResults() const { return *_results; } WorkResult::RESULTS WorkResult::getResultType() const { return _resultType; } /* * ==================== * Worker * ==================== */ Worker::Worker(ConvNet& convNet) : _convNet(&convNet) { } /* * ==================== * DataWorker * ==================== */ DataWorker::DataWorker(ConvNet& convNet, CPUData& data) : Worker(convNet), _data(&data) { _dp = &convNet.getDataProvider(); } DataWorker::~DataWorker() { _dp->clearData(); } /* * ==================== * TrainingWorker * ==================== */ TrainingWorker::TrainingWorker(ConvNet& convNet, CPUData& data, bool test) : DataWorker(convNet, data), _test(test) { } // Need to setData here (as opposed to the constructor) because the constructor executes in // the original CPU thread, which is not the one with GPU access. void TrainingWorker::run() { _dp->setData(*_data); Cost& batchCost = *new Cost(0); Layer& l = _convNet->getLayer(_convNet->getNumLayers() - 2); for (int i = 0; i < _dp->getNumMinibatches(); i++) { _convNet->fprop(i, _test ? PASS_TEST : PASS_TRAIN); _convNet->getCost(batchCost); if (!_test) { _convNet->bprop(PASS_TRAIN); _convNet->updateWeights(); if (l.getName() == "noise") { // this weight is representing probability, so it should be normalized if (((WeightLayer&)l).getWeights(0).getEps() > 0) { NVMatrix& m = ((WeightLayer&)l).getWeights(0).getW(); assert(m.getNumCols() == m.getNumRows()); prob_project(m); } } } } hipDeviceSynchronize(); _convNet->getResultQueue().enqueue(new WorkResult(WorkResult::BATCH_DONE, batchCost)); } /* * ==================== * SyncWorker * ==================== */ SyncWorker::SyncWorker(ConvNet& convNet) : Worker(convNet) { } void SyncWorker::run() { _convNet->copyToCPU(); _convNet->getResultQueue().enqueue(new WorkResult(WorkResult::SYNC_DONE)); } /* * ==================== * GradCheckWorker * ==================== */ GradCheckWorker::GradCheckWorker(ConvNet& convNet, CPUData& data) : DataWorker(convNet, data) { } void GradCheckWorker::run() { _dp->setData(*_data); _convNet->checkGradients(); exit(0); } /* * ==================== * MultiviewTestWorker * ==================== */ MultiviewTestWorker::MultiviewTestWorker(ConvNet& convNet, CPUData& data, int numViews, int logregIdx) : DataWorker(convNet, data), _numViews(numViews), _logregIdx(logregIdx) { assert(_data->getNumCases() % _numViews == 0); } void MultiviewTestWorker::run() { _dp->setData(*_data); Layer& logregLayer = _convNet->getLayer(_logregIdx); int numCasesReal = _dp->getNumCases() / _numViews; int numMiniReal = DIVUP(numCasesReal, _dp->getMinibatchSize()); Cost& batchCost = *new Cost(0); for (int i = 0; i < numMiniReal; i++) { NVMatrix softmaxActs; for (int v = 0; v < _numViews; v++) { GPUData& mini = _dp->getDataSlice(v * numCasesReal + i * _dp->getMinibatchSize(), min((v + 1) * numCasesReal, v * numCasesReal + (i + 1) * _dp->getMinibatchSize())); _convNet->fprop(mini, PASS_TEST); if (v == 0) { logregLayer.getPrev()[1]->getActs().copy(softmaxActs); } else { softmaxActs.add(logregLayer.getPrev()[1]->getActs()); } } softmaxActs.scale(1.0 / _numViews); NVMatrixV logregInput; logregInput.push_back(&logregLayer.getPrev()[0]->getActs()); logregInput.push_back(&softmaxActs); logregLayer.fprop(logregInput, PASS_TEST); _convNet->getCost(batchCost); } hipDeviceSynchronize(); _convNet->getResultQueue().enqueue(new WorkResult(WorkResult::BATCH_DONE, batchCost)); } /* * ==================== * FeatureWorker * ==================== */ FeatureWorker::FeatureWorker(ConvNet& convNet, CPUData& data, Matrix& ftrs, int layerIdx) : DataWorker(convNet, data), _ftrs(&ftrs), _layerIdx(layerIdx) { assert(ftrs.getNumRows() == data.getNumCases()); assert(!ftrs.isTrans()); } FeatureWorker::~FeatureWorker() { delete _ftrs; } void FeatureWorker::run() { _dp->setData(*_data); Layer& ftrLayer = _convNet->getLayer(_layerIdx); Cost& batchCost = *new Cost(0); for (int i = 0; i < _dp->getNumMinibatches(); i++) { _convNet->fprop(i, PASS_TEST); _convNet->getCost(batchCost); Matrix& miniFtrs = _ftrs->sliceRows(i * _dp->getMinibatchSize(), min(_dp->getNumCases(), (i + 1) * _dp->getMinibatchSize())); NVMatrix& acts = ftrLayer.getActs(); NVMatrix acts_T; if (acts.isTrans()) { NVMatrix& soft_T = acts.getTranspose(); soft_T.transpose(acts_T); delete &soft_T; } else { acts.transpose(acts_T); } acts_T.copyToHost(miniFtrs); delete &miniFtrs; } hipDeviceSynchronize(); _convNet->getResultQueue().enqueue(new WorkResult(WorkResult::BATCH_DONE, batchCost)); }
da9a1544a7bd9f6cfe27b00c2e8b99baeb623115.cu
/* * Copyright (c) 2011, Alex Krizhevsky (akrizhevsky@gmail.com) * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * - Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * - Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <algorithm> #include <util.cuh> #include <worker.cuh> #include <iostream> #include <layer.cuh> #include <nvmatrix.cuh> #include <noiselayer.cuh> using namespace std; /* * ==================== * WorkResult * ==================== */ WorkResult::WorkResult(WorkResult::RESULTS resultType, Cost& results) : _resultType(resultType), _results(&results) { } WorkResult::WorkResult(WorkResult::RESULTS resultType) : _resultType(resultType), _results(NULL) { } WorkResult::~WorkResult() { delete _results; // delete NULL is ok } Cost& WorkResult::getResults() const { return *_results; } WorkResult::RESULTS WorkResult::getResultType() const { return _resultType; } /* * ==================== * Worker * ==================== */ Worker::Worker(ConvNet& convNet) : _convNet(&convNet) { } /* * ==================== * DataWorker * ==================== */ DataWorker::DataWorker(ConvNet& convNet, CPUData& data) : Worker(convNet), _data(&data) { _dp = &convNet.getDataProvider(); } DataWorker::~DataWorker() { _dp->clearData(); } /* * ==================== * TrainingWorker * ==================== */ TrainingWorker::TrainingWorker(ConvNet& convNet, CPUData& data, bool test) : DataWorker(convNet, data), _test(test) { } // Need to setData here (as opposed to the constructor) because the constructor executes in // the original CPU thread, which is not the one with GPU access. void TrainingWorker::run() { _dp->setData(*_data); Cost& batchCost = *new Cost(0); Layer& l = _convNet->getLayer(_convNet->getNumLayers() - 2); for (int i = 0; i < _dp->getNumMinibatches(); i++) { _convNet->fprop(i, _test ? PASS_TEST : PASS_TRAIN); _convNet->getCost(batchCost); if (!_test) { _convNet->bprop(PASS_TRAIN); _convNet->updateWeights(); if (l.getName() == "noise") { // this weight is representing probability, so it should be normalized if (((WeightLayer&)l).getWeights(0).getEps() > 0) { NVMatrix& m = ((WeightLayer&)l).getWeights(0).getW(); assert(m.getNumCols() == m.getNumRows()); prob_project(m); } } } } cudaThreadSynchronize(); _convNet->getResultQueue().enqueue(new WorkResult(WorkResult::BATCH_DONE, batchCost)); } /* * ==================== * SyncWorker * ==================== */ SyncWorker::SyncWorker(ConvNet& convNet) : Worker(convNet) { } void SyncWorker::run() { _convNet->copyToCPU(); _convNet->getResultQueue().enqueue(new WorkResult(WorkResult::SYNC_DONE)); } /* * ==================== * GradCheckWorker * ==================== */ GradCheckWorker::GradCheckWorker(ConvNet& convNet, CPUData& data) : DataWorker(convNet, data) { } void GradCheckWorker::run() { _dp->setData(*_data); _convNet->checkGradients(); exit(0); } /* * ==================== * MultiviewTestWorker * ==================== */ MultiviewTestWorker::MultiviewTestWorker(ConvNet& convNet, CPUData& data, int numViews, int logregIdx) : DataWorker(convNet, data), _numViews(numViews), _logregIdx(logregIdx) { assert(_data->getNumCases() % _numViews == 0); } void MultiviewTestWorker::run() { _dp->setData(*_data); Layer& logregLayer = _convNet->getLayer(_logregIdx); int numCasesReal = _dp->getNumCases() / _numViews; int numMiniReal = DIVUP(numCasesReal, _dp->getMinibatchSize()); Cost& batchCost = *new Cost(0); for (int i = 0; i < numMiniReal; i++) { NVMatrix softmaxActs; for (int v = 0; v < _numViews; v++) { GPUData& mini = _dp->getDataSlice(v * numCasesReal + i * _dp->getMinibatchSize(), min((v + 1) * numCasesReal, v * numCasesReal + (i + 1) * _dp->getMinibatchSize())); _convNet->fprop(mini, PASS_TEST); if (v == 0) { logregLayer.getPrev()[1]->getActs().copy(softmaxActs); } else { softmaxActs.add(logregLayer.getPrev()[1]->getActs()); } } softmaxActs.scale(1.0 / _numViews); NVMatrixV logregInput; logregInput.push_back(&logregLayer.getPrev()[0]->getActs()); logregInput.push_back(&softmaxActs); logregLayer.fprop(logregInput, PASS_TEST); _convNet->getCost(batchCost); } cudaThreadSynchronize(); _convNet->getResultQueue().enqueue(new WorkResult(WorkResult::BATCH_DONE, batchCost)); } /* * ==================== * FeatureWorker * ==================== */ FeatureWorker::FeatureWorker(ConvNet& convNet, CPUData& data, Matrix& ftrs, int layerIdx) : DataWorker(convNet, data), _ftrs(&ftrs), _layerIdx(layerIdx) { assert(ftrs.getNumRows() == data.getNumCases()); assert(!ftrs.isTrans()); } FeatureWorker::~FeatureWorker() { delete _ftrs; } void FeatureWorker::run() { _dp->setData(*_data); Layer& ftrLayer = _convNet->getLayer(_layerIdx); Cost& batchCost = *new Cost(0); for (int i = 0; i < _dp->getNumMinibatches(); i++) { _convNet->fprop(i, PASS_TEST); _convNet->getCost(batchCost); Matrix& miniFtrs = _ftrs->sliceRows(i * _dp->getMinibatchSize(), min(_dp->getNumCases(), (i + 1) * _dp->getMinibatchSize())); NVMatrix& acts = ftrLayer.getActs(); NVMatrix acts_T; if (acts.isTrans()) { NVMatrix& soft_T = acts.getTranspose(); soft_T.transpose(acts_T); delete &soft_T; } else { acts.transpose(acts_T); } acts_T.copyToHost(miniFtrs); delete &miniFtrs; } cudaThreadSynchronize(); _convNet->getResultQueue().enqueue(new WorkResult(WorkResult::BATCH_DONE, batchCost)); }
b208fac484bd7b2159cd4bbac45dcf4fadd91f2b.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <iostream> #include <assert.h> #include "Utilities.cuh" #include <hip/hip_runtime.h> #include <cusparse_v2.h> #define BLOCKSIZE 256 /**************************/ /* SETTING UP THE PROBLEM */ /**************************/ void setUpTheProblem(double **h_A_dense, double **h_x_dense, double **d_A_dense, double **d_x_dense, const int N) { // --- Host side dense matrix h_A_dense[0] = (double*)calloc(N * N, sizeof(*h_A_dense)); // --- Column-major ordering h_A_dense[0][0] = 0.4612f; h_A_dense[0][4] = -0.0006f; h_A_dense[0][8] = 0.f; h_A_dense[0][12] = 0.0f; h_A_dense[0][1] = -0.0006f; h_A_dense[0][5] = 0.f; h_A_dense[0][9] = 0.0723f; h_A_dense[0][13] = 0.04f; h_A_dense[0][2] = 0.3566f; h_A_dense[0][6] = 0.0723f; h_A_dense[0][10] = 0.f; h_A_dense[0][14] = 0.0f; h_A_dense[0][3] = 0.0f; h_A_dense[0][7] = 0.0f; h_A_dense[0][11] = 1.0f; h_A_dense[0][15] = 0.1f; h_x_dense[0] = (double *)malloc(N * sizeof(double)); h_x_dense[0][0] = 100.0; h_x_dense[0][1] = 200.0; h_x_dense[0][2] = 400.0; h_x_dense[0][3] = 500.0; // --- Create device arrays and copy host arrays to them gpuErrchk(hipMalloc(&d_A_dense[0], N * N * sizeof(double))); gpuErrchk(hipMemcpy(d_A_dense[0], h_A_dense[0], N * N * sizeof(double), hipMemcpyHostToDevice)); gpuErrchk(hipMalloc(&d_x_dense[0], N * sizeof(double))); gpuErrchk(hipMemcpy(d_x_dense[0], h_x_dense[0], N * sizeof(double), hipMemcpyHostToDevice)); } /************************/ /* FROM DENSE TO SPARSE */ /************************/ void fromDenseToSparse(const hipsparseHandle_t handle, double *d_A_dense, double **d_A, int **d_A_RowIndices, int **d_A_ColIndices, int *nnz, hipsparseMatDescr_t *descrA, const int N) { cusparseSafeCall(hipsparseCreateMatDescr(&descrA[0])); cusparseSafeCall(hipsparseSetMatType (descrA[0], HIPSPARSE_MATRIX_TYPE_GENERAL)); cusparseSafeCall(hipsparseSetMatIndexBase(descrA[0], HIPSPARSE_INDEX_BASE_ZERO)); nnz[0] = 0; // --- Number of nonzero elements in dense matrix const int lda = N; // --- Leading dimension of dense matrix // --- Device side number of nonzero elements per row int *d_nnzPerVector; gpuErrchk(hipMalloc(&d_nnzPerVector, N * sizeof(int))); cusparseSafeCall(hipsparseDnnz(handle, HIPSPARSE_DIRECTION_ROW, N, N, descrA[0], d_A_dense, lda, d_nnzPerVector, &nnz[0])); // --- Host side number of nonzero elements per row int *h_nnzPerVector = (int *)malloc(N * sizeof(int)); gpuErrchk(hipMemcpy(h_nnzPerVector, d_nnzPerVector, N * sizeof(int), hipMemcpyDeviceToHost)); printf("Number of nonzero elements in dense matrix = %i\n\n", nnz[0]); for (int i = 0; i < N; ++i) printf("Number of nonzero elements in row %i = %i \n", i, h_nnzPerVector[i]); printf("\n"); // --- Device side sparse matrix gpuErrchk(hipMalloc(&d_A[0], nnz[0] * sizeof(double))); gpuErrchk(hipMalloc(&d_A_RowIndices[0], (N + 1) * sizeof(int))); gpuErrchk(hipMalloc(&d_A_ColIndices[0], nnz[0] * sizeof(int))); cusparseSafeCall(hipsparseDdense2csr(handle, N, N, descrA[0], d_A_dense, lda, d_nnzPerVector, d_A[0], d_A_RowIndices[0], d_A_ColIndices[0])); // --- Host side sparse matrix double *h_A = (double *)malloc(nnz[0] * sizeof(double)); int *h_A_RowIndices = (int *)malloc((N + 1) * sizeof(*h_A_RowIndices)); int *h_A_ColIndices = (int *)malloc(nnz[0] * sizeof(*h_A_ColIndices)); gpuErrchk(hipMemcpy(h_A, d_A[0], nnz[0] * sizeof(double), hipMemcpyDeviceToHost)); gpuErrchk(hipMemcpy(h_A_RowIndices, d_A_RowIndices[0], (N + 1) * sizeof(int), hipMemcpyDeviceToHost)); gpuErrchk(hipMemcpy(h_A_ColIndices, d_A_ColIndices[0], nnz[0] * sizeof(int), hipMemcpyDeviceToHost)); printf("\nOriginal matrix in CSR format\n\n"); for (int i = 0; i < nnz[0]; ++i) printf("A[%i] = %f ", i, h_A[i]); printf("\n"); printf("\n"); for (int i = 0; i < (N + 1); ++i) printf("h_A_RowIndices[%i] = %i \n", i, h_A_RowIndices[i]); printf("\n"); for (int i = 0; i < nnz[0]; ++i) printf("h_A_ColIndices[%i] = %i \n", i, h_A_ColIndices[i]); } /******************/ /* GRAPH COLORING */ /******************/ __global__ void setRowIndices(int *d_B_RowIndices, const int N) { const int tid = threadIdx.x + blockDim.x * blockIdx.x; if (tid == N) d_B_RowIndices[tid] = N; else if (tid < N) d_B_RowIndices[tid] = tid; } __global__ void setB(double *d_B, const int N) { const int tid = threadIdx.x + blockDim.x * blockIdx.x; if (tid < N) d_B[tid] = 1.f; } void graphColoring(const hipsparseHandle_t handle, const int nnz, const hipsparseMatDescr_t descrA, const double fractionToColor, double *d_A, const int *d_A_RowIndices, const int *d_A_ColIndices, double **d_B, int **d_B_RowIndices, int **d_B_ColIndices, hipsparseMatDescr_t *descrB, const int N) { cusparseColorInfo_t info; cusparseSafeCall(cusparseCreateColorInfo(&info)); int ncolors; int *d_coloring; gpuErrchk(hipMalloc(&d_coloring, N * sizeof(double))); gpuErrchk(hipMalloc(&d_B_ColIndices[0], N * sizeof(double))); cusparseSafeCall(cusparseDcsrcolor(handle, N, nnz, descrA, d_A, d_A_RowIndices, d_A_ColIndices, &fractionToColor, &ncolors, d_coloring, d_B_ColIndices[0], info)); int *h_coloring = (int *)malloc(N * sizeof(double)); int *h_B_ColIndices = (int *)malloc(N * sizeof(double)); gpuErrchk(hipMemcpy(h_coloring, d_coloring, N * sizeof(double), hipMemcpyDeviceToHost)); gpuErrchk(hipMemcpy(h_B_ColIndices, d_B_ColIndices[0], N * sizeof(double), hipMemcpyDeviceToHost)); for (int i = 0; i < N; i++) printf("h_coloring = %i; h_B_ColIndices = %i\n", h_coloring[i], h_B_ColIndices[i]); gpuErrchk(hipMalloc(&d_B_RowIndices[0], (N + 1) * sizeof(int))); int *h_B_RowIndices = (int *)malloc((N + 1) * sizeof(double)); hipLaunchKernelGGL(( setRowIndices), dim3(iDivUp(N + 1, BLOCKSIZE)), dim3(BLOCKSIZE), 0, 0, d_B_RowIndices[0], N); gpuErrchk(hipMemcpy(h_B_RowIndices, d_B_RowIndices[0], (N + 1) * sizeof(int), hipMemcpyDeviceToHost)); printf("\n"); for (int i = 0; i <= N; i++) printf("h_B_RowIndices = %i\n", h_B_RowIndices[i]); gpuErrchk(hipMalloc(&d_B[0], N * sizeof(double))); double *h_B = (double *)malloc(N * sizeof(double)); hipLaunchKernelGGL(( setB), dim3(iDivUp(N, BLOCKSIZE)), dim3(BLOCKSIZE), 0, 0, d_B[0], N); gpuErrchk(hipMemcpy(h_B, d_B[0], N * sizeof(double), hipMemcpyDeviceToHost)); printf("\n"); for (int i = 0; i < N; i++) printf("h_B = %f\n", h_B[i]); // --- Descriptor for sparse mutation matrix B cusparseSafeCall(hipsparseCreateMatDescr(&descrB[0])); cusparseSafeCall(hipsparseSetMatType (descrB[0], HIPSPARSE_MATRIX_TYPE_GENERAL)); cusparseSafeCall(hipsparseSetMatIndexBase(descrB[0], HIPSPARSE_INDEX_BASE_ZERO)); } /*************************/ /* MATRIX ROW REORDERING */ /*************************/ void matrixRowReordering(const hipsparseHandle_t handle, int nnzA, int nnzB, int *nnzC, hipsparseMatDescr_t descrA, hipsparseMatDescr_t descrB, hipsparseMatDescr_t *descrC, double *d_A, int *d_A_RowIndices, int *d_A_ColIndices, double *d_B, int *d_B_RowIndices, int *d_B_ColIndices, double **d_C, int **d_C_RowIndices, int **d_C_ColIndices, const int N) { // --- Descriptor for sparse matrix C cusparseSafeCall(hipsparseCreateMatDescr(&descrC[0])); cusparseSafeCall(hipsparseSetMatType (descrC[0], HIPSPARSE_MATRIX_TYPE_GENERAL)); cusparseSafeCall(hipsparseSetMatIndexBase(descrC[0], HIPSPARSE_INDEX_BASE_ZERO)); const int lda = N; // --- Leading dimension of dense matrix // --- Device side sparse matrix gpuErrchk(hipMalloc(&d_C_RowIndices[0], (N + 1) * sizeof(int))); // --- Host side sparse matrices int *h_C_RowIndices = (int *)malloc((N + 1) * sizeof(int)); // --- Performing the matrix - matrix multiplication int baseC; int *nnzTotalDevHostPtr = &nnzC[0]; cusparseSafeCall(hipsparseSetPointerMode(handle, HIPSPARSE_POINTER_MODE_HOST)); cusparseSafeCall(hipsparseXcsrgemmNnz(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, HIPSPARSE_OPERATION_NON_TRANSPOSE, N, N, N, descrB, nnzB, d_B_RowIndices, d_B_ColIndices, descrA, nnzA, d_A_RowIndices, d_A_ColIndices, descrC[0], d_C_RowIndices[0], nnzTotalDevHostPtr)); if (NULL != nnzTotalDevHostPtr) nnzC[0] = *nnzTotalDevHostPtr; else { gpuErrchk(hipMemcpy(&nnzC[0], d_C_RowIndices + N, sizeof(int), hipMemcpyDeviceToHost)); gpuErrchk(hipMemcpy(&baseC, d_C_RowIndices, sizeof(int), hipMemcpyDeviceToHost)); nnzC -= baseC; } gpuErrchk(hipMalloc(&d_C_ColIndices[0], nnzC[0] * sizeof(int))); gpuErrchk(hipMalloc(&d_C[0], nnzC[0] * sizeof(double))); double *h_C = (double *)malloc(nnzC[0] * sizeof(double)); int *h_C_ColIndices = (int *)malloc(nnzC[0] * sizeof(int)); cusparseSafeCall(hipsparseDcsrgemm(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, HIPSPARSE_OPERATION_NON_TRANSPOSE, N, N, N, descrB, nnzB, d_B, d_B_RowIndices, d_B_ColIndices, descrA, nnzA, d_A, d_A_RowIndices, d_A_ColIndices, descrC[0], d_C[0], d_C_RowIndices[0], d_C_ColIndices[0])); double *h_C_dense = (double*)malloc(N * N * sizeof(double)); double *d_C_dense; gpuErrchk(hipMalloc(&d_C_dense, N * N * sizeof(double))); cusparseSafeCall(hipsparseDcsr2dense(handle, N, N, descrC[0], d_C[0], d_C_RowIndices[0], d_C_ColIndices[0], d_C_dense, N)); gpuErrchk(hipMemcpy(h_C , d_C[0], nnzC[0] * sizeof(double), hipMemcpyDeviceToHost)); gpuErrchk(hipMemcpy(h_C_RowIndices, d_C_RowIndices[0], (N + 1) * sizeof(int), hipMemcpyDeviceToHost)); gpuErrchk(hipMemcpy(h_C_ColIndices, d_C_ColIndices[0], nnzC[0] * sizeof(int), hipMemcpyDeviceToHost)); printf("\nResult matrix C in CSR format\n\n"); for (int i = 0; i < nnzC[0]; ++i) printf("C[%i] = %f ", i, h_C[i]); printf("\n"); printf("\n"); for (int i = 0; i < (N + 1); ++i) printf("h_C_RowIndices[%i] = %i \n", i, h_C_RowIndices[i]); printf("\n"); printf("\n"); for (int i = 0; i < nnzC[0]; ++i) printf("h_C_ColIndices[%i] = %i \n", i, h_C_ColIndices[i]); gpuErrchk(hipMemcpy(h_C_dense, d_C_dense, N * N * sizeof(double), hipMemcpyDeviceToHost)); for (int j = 0; j < N; j++) { for (int i = 0; i < N; i++) printf("%f \t", h_C_dense[i * N + j]); printf("\n"); } } /******************/ /* ROW REORDERING */ /******************/ void rowReordering(const hipsparseHandle_t handle, int nnzA, hipsparseMatDescr_t descrB, double *d_B, int *d_B_RowIndices, int *d_B_ColIndices, double *d_x_dense, double **d_y_dense, const int N) { gpuErrchk(hipMalloc(&d_y_dense[0], N * sizeof(double))); const double alpha = 1.; const double beta = 0.; cusparseSafeCall(hipsparseDcsrmv(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, N, N, nnzA, &alpha, descrB, d_B, d_B_RowIndices, d_B_ColIndices, d_x_dense, &beta, d_y_dense[0])); double *h_y_dense = (double*)malloc(N * sizeof(double)); gpuErrchk(hipMemcpy(h_y_dense, d_y_dense[0], N * sizeof(double), hipMemcpyDeviceToHost)); printf("\nResult vector\n\n"); for (int i = 0; i < N; ++i) printf("h_y[%i] = %f ", i, h_y_dense[i]); printf("\n"); } /*****************************/ /* SOLVING THE LINEAR SYSTEM */ /*****************************/ void LUDecomposition(const hipsparseHandle_t handle, int nnzC, hipsparseMatDescr_t descrC, double *d_C, int *d_C_RowIndices, int *d_C_ColIndices, double *d_x_dense, double **d_y_dense, const int N) { /******************************************/ /* STEP 1: CREATE DESCRIPTORS FOR L AND U */ /******************************************/ hipsparseMatDescr_t descr_L = 0; cusparseSafeCall(hipsparseCreateMatDescr (&descr_L)); cusparseSafeCall(hipsparseSetMatIndexBase(descr_L, HIPSPARSE_INDEX_BASE_ZERO)); cusparseSafeCall(hipsparseSetMatType (descr_L, HIPSPARSE_MATRIX_TYPE_GENERAL)); cusparseSafeCall(hipsparseSetMatFillMode (descr_L, HIPSPARSE_FILL_MODE_LOWER)); cusparseSafeCall(hipsparseSetMatDiagType (descr_L, HIPSPARSE_DIAG_TYPE_UNIT)); hipsparseMatDescr_t descr_U = 0; cusparseSafeCall(hipsparseCreateMatDescr (&descr_U)); cusparseSafeCall(hipsparseSetMatIndexBase(descr_U, HIPSPARSE_INDEX_BASE_ZERO)); cusparseSafeCall(hipsparseSetMatType (descr_U, HIPSPARSE_MATRIX_TYPE_GENERAL)); cusparseSafeCall(hipsparseSetMatFillMode (descr_U, HIPSPARSE_FILL_MODE_UPPER)); cusparseSafeCall(hipsparseSetMatDiagType (descr_U, HIPSPARSE_DIAG_TYPE_NON_UNIT)); /**************************************************************************************************/ /* STEP 2: QUERY HOW MUCH MEMORY USED IN LU FACTORIZATION AND THE TWO FOLLOWING SYSTEM INVERSIONS */ /**************************************************************************************************/ csrilu02Info_t info_C = 0; cusparseSafeCall(hipsparseCreateCsrilu02Info (&info_C)); csrsv2Info_t info_L = 0; cusparseSafeCall(hipsparseCreateCsrsv2Info (&info_L)); csrsv2Info_t info_U = 0; cusparseSafeCall(hipsparseCreateCsrsv2Info (&info_U)); int pBufferSize_M, pBufferSize_L, pBufferSize_U; cusparseSafeCall(hipsparseDcsrilu02_bufferSize(handle, N, nnzC, descrC, d_C, d_C_RowIndices, d_C_ColIndices, info_C, &pBufferSize_M)); cusparseSafeCall(hipsparseDcsrsv2_bufferSize (handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, N, nnzC, descr_L, d_C, d_C_RowIndices, d_C_ColIndices, info_L, &pBufferSize_L)); cusparseSafeCall(hipsparseDcsrsv2_bufferSize (handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, N, nnzC, descr_U, d_C, d_C_RowIndices, d_C_ColIndices, info_U, &pBufferSize_U)); int pBufferSize = max(pBufferSize_M, max(pBufferSize_L, pBufferSize_U)); void *pBuffer = 0; gpuErrchk(hipMalloc((void**)&pBuffer, pBufferSize)); /************************************************************************************************/ /* STEP 3: ANALYZE THE THREE PROBLEMS: LU FACTORIZATION AND THE TWO FOLLOWING SYSTEM INVERSIONS */ /************************************************************************************************/ int structural_zero; cusparseSafeCall(hipsparseDcsrilu02_analysis(handle, N, nnzC, descrC, d_C, d_C_RowIndices, d_C_ColIndices, info_C, HIPSPARSE_SOLVE_POLICY_NO_LEVEL, pBuffer)); hipsparseStatus_t status = hipsparseXcsrilu02_zeroPivot(handle, info_C, &structural_zero); if (HIPSPARSE_STATUS_ZERO_PIVOT == status){ printf("A(%d,%d) is missing\n", structural_zero, structural_zero); } cusparseSafeCall(hipsparseDcsrsv2_analysis(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, N, nnzC, descr_L, d_C, d_C_RowIndices, d_C_ColIndices, info_L, HIPSPARSE_SOLVE_POLICY_NO_LEVEL, pBuffer)); cusparseSafeCall(hipsparseDcsrsv2_analysis(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, N, nnzC, descr_U, d_C, d_C_RowIndices, d_C_ColIndices, info_U, HIPSPARSE_SOLVE_POLICY_USE_LEVEL, pBuffer)); /************************************/ /* STEP 4: FACTORIZATION: A = L * U */ /************************************/ int numerical_zero; cusparseSafeCall(hipsparseDcsrilu02(handle, N, nnzC, descrC, d_C, d_C_RowIndices, d_C_ColIndices, info_C, HIPSPARSE_SOLVE_POLICY_NO_LEVEL, pBuffer)); status = hipsparseXcsrilu02_zeroPivot(handle, info_C, &numerical_zero); if (HIPSPARSE_STATUS_ZERO_PIVOT == status){ printf("U(%d,%d) is zero\n", numerical_zero, numerical_zero); } /*********************/ /* STEP 5: L * z = x */ /*********************/ // --- Allocating the intermediate result vector double *d_z_dense; gpuErrchk(hipMalloc(&d_z_dense, N * sizeof(double))); const double alpha = 1.; cusparseSafeCall(hipsparseDcsrsv2_solve(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, N, nnzC, &alpha, descr_L, d_C, d_C_RowIndices, d_C_ColIndices, info_L, d_x_dense, d_z_dense, HIPSPARSE_SOLVE_POLICY_NO_LEVEL, pBuffer)); /*********************/ /* STEP 5: U * y = z */ /*********************/ gpuErrchk(hipMalloc(&d_y_dense[0], N * sizeof(double))); cusparseSafeCall(hipsparseDcsrsv2_solve(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, N, nnzC, &alpha, descr_U, d_C, d_C_RowIndices, d_C_ColIndices, info_U, d_z_dense, d_y_dense[0], HIPSPARSE_SOLVE_POLICY_USE_LEVEL, pBuffer)); double *h_y_dense = (double *)malloc(N * sizeof(double)); gpuErrchk(hipMemcpy(h_y_dense, d_y_dense[0], N * sizeof(double), hipMemcpyDeviceToHost)); printf("\n\nFinal result\n"); for (int k=0; k<N; k++) printf("x[%i] = %f\n", k, h_y_dense[k]); } /********/ /* MAIN */ /********/ int main() { // --- Initialize cuSPARSE hipsparseHandle_t handle; cusparseSafeCall(hipsparseCreate(&handle)); /*************************************************/ /* SETTING UP THE ORIGINAL LINEAR SYSTEM PROBLEM */ /*************************************************/ const int N = 4; // --- Number of rows and columns double *h_A_dense; double *h_x_dense; double *d_A_dense; double *d_x_dense; setUpTheProblem(&h_A_dense, &h_x_dense, &d_A_dense, &d_x_dense, N); /************************/ /* FROM DENSE TO SPARSE */ /************************/ //--- Descriptor for sparse matrix A hipsparseMatDescr_t descrA; int *d_A_RowIndices, *d_A_ColIndices; double *d_A; int nnzA; fromDenseToSparse(handle, d_A_dense, &d_A, &d_A_RowIndices, &d_A_ColIndices, &nnzA, &descrA, N); /******************/ /* GRAPH COLORING */ /******************/ const double fractionToColor = 0.95; int *d_B_RowIndices, *d_B_ColIndices; double *d_B; int nnzB; hipsparseMatDescr_t descrB; graphColoring(handle, nnzB, descrA, fractionToColor, d_A, d_A_RowIndices, d_A_ColIndices, &d_B, &d_B_RowIndices, &d_B_ColIndices, &descrB, N); /*************************/ /* MATRIX ROW REORDERING */ /*************************/ int nnzC; int *d_C_RowIndices, *d_C_ColIndices; double *d_C; hipsparseMatDescr_t descrC; matrixRowReordering(handle, nnzA, nnzB, &nnzC, descrA, descrB, &descrC, d_A, d_A_RowIndices, d_A_ColIndices, d_B, d_B_RowIndices, d_B_ColIndices, &d_C, &d_C_RowIndices, &d_C_ColIndices, N); /******************/ /* ROW REORDERING */ /******************/ double *d_y_dense; rowReordering(handle, nnzA, descrB, d_B, d_B_RowIndices, d_B_ColIndices, d_x_dense, &d_y_dense, N); /*****************************/ /* SOLVING THE LINEAR SYSTEM */ /*****************************/ double *d_xsol_dense; LUDecomposition(handle, nnzC, descrC, d_C, d_C_RowIndices, d_C_ColIndices, d_y_dense, &d_xsol_dense, N); }
b208fac484bd7b2159cd4bbac45dcf4fadd91f2b.cu
#include <stdio.h> #include <stdlib.h> #include <iostream> #include <assert.h> #include "Utilities.cuh" #include <cuda_runtime.h> #include <cusparse_v2.h> #define BLOCKSIZE 256 /**************************/ /* SETTING UP THE PROBLEM */ /**************************/ void setUpTheProblem(double **h_A_dense, double **h_x_dense, double **d_A_dense, double **d_x_dense, const int N) { // --- Host side dense matrix h_A_dense[0] = (double*)calloc(N * N, sizeof(*h_A_dense)); // --- Column-major ordering h_A_dense[0][0] = 0.4612f; h_A_dense[0][4] = -0.0006f; h_A_dense[0][8] = 0.f; h_A_dense[0][12] = 0.0f; h_A_dense[0][1] = -0.0006f; h_A_dense[0][5] = 0.f; h_A_dense[0][9] = 0.0723f; h_A_dense[0][13] = 0.04f; h_A_dense[0][2] = 0.3566f; h_A_dense[0][6] = 0.0723f; h_A_dense[0][10] = 0.f; h_A_dense[0][14] = 0.0f; h_A_dense[0][3] = 0.0f; h_A_dense[0][7] = 0.0f; h_A_dense[0][11] = 1.0f; h_A_dense[0][15] = 0.1f; h_x_dense[0] = (double *)malloc(N * sizeof(double)); h_x_dense[0][0] = 100.0; h_x_dense[0][1] = 200.0; h_x_dense[0][2] = 400.0; h_x_dense[0][3] = 500.0; // --- Create device arrays and copy host arrays to them gpuErrchk(cudaMalloc(&d_A_dense[0], N * N * sizeof(double))); gpuErrchk(cudaMemcpy(d_A_dense[0], h_A_dense[0], N * N * sizeof(double), cudaMemcpyHostToDevice)); gpuErrchk(cudaMalloc(&d_x_dense[0], N * sizeof(double))); gpuErrchk(cudaMemcpy(d_x_dense[0], h_x_dense[0], N * sizeof(double), cudaMemcpyHostToDevice)); } /************************/ /* FROM DENSE TO SPARSE */ /************************/ void fromDenseToSparse(const cusparseHandle_t handle, double *d_A_dense, double **d_A, int **d_A_RowIndices, int **d_A_ColIndices, int *nnz, cusparseMatDescr_t *descrA, const int N) { cusparseSafeCall(cusparseCreateMatDescr(&descrA[0])); cusparseSafeCall(cusparseSetMatType (descrA[0], CUSPARSE_MATRIX_TYPE_GENERAL)); cusparseSafeCall(cusparseSetMatIndexBase(descrA[0], CUSPARSE_INDEX_BASE_ZERO)); nnz[0] = 0; // --- Number of nonzero elements in dense matrix const int lda = N; // --- Leading dimension of dense matrix // --- Device side number of nonzero elements per row int *d_nnzPerVector; gpuErrchk(cudaMalloc(&d_nnzPerVector, N * sizeof(int))); cusparseSafeCall(cusparseDnnz(handle, CUSPARSE_DIRECTION_ROW, N, N, descrA[0], d_A_dense, lda, d_nnzPerVector, &nnz[0])); // --- Host side number of nonzero elements per row int *h_nnzPerVector = (int *)malloc(N * sizeof(int)); gpuErrchk(cudaMemcpy(h_nnzPerVector, d_nnzPerVector, N * sizeof(int), cudaMemcpyDeviceToHost)); printf("Number of nonzero elements in dense matrix = %i\n\n", nnz[0]); for (int i = 0; i < N; ++i) printf("Number of nonzero elements in row %i = %i \n", i, h_nnzPerVector[i]); printf("\n"); // --- Device side sparse matrix gpuErrchk(cudaMalloc(&d_A[0], nnz[0] * sizeof(double))); gpuErrchk(cudaMalloc(&d_A_RowIndices[0], (N + 1) * sizeof(int))); gpuErrchk(cudaMalloc(&d_A_ColIndices[0], nnz[0] * sizeof(int))); cusparseSafeCall(cusparseDdense2csr(handle, N, N, descrA[0], d_A_dense, lda, d_nnzPerVector, d_A[0], d_A_RowIndices[0], d_A_ColIndices[0])); // --- Host side sparse matrix double *h_A = (double *)malloc(nnz[0] * sizeof(double)); int *h_A_RowIndices = (int *)malloc((N + 1) * sizeof(*h_A_RowIndices)); int *h_A_ColIndices = (int *)malloc(nnz[0] * sizeof(*h_A_ColIndices)); gpuErrchk(cudaMemcpy(h_A, d_A[0], nnz[0] * sizeof(double), cudaMemcpyDeviceToHost)); gpuErrchk(cudaMemcpy(h_A_RowIndices, d_A_RowIndices[0], (N + 1) * sizeof(int), cudaMemcpyDeviceToHost)); gpuErrchk(cudaMemcpy(h_A_ColIndices, d_A_ColIndices[0], nnz[0] * sizeof(int), cudaMemcpyDeviceToHost)); printf("\nOriginal matrix in CSR format\n\n"); for (int i = 0; i < nnz[0]; ++i) printf("A[%i] = %f ", i, h_A[i]); printf("\n"); printf("\n"); for (int i = 0; i < (N + 1); ++i) printf("h_A_RowIndices[%i] = %i \n", i, h_A_RowIndices[i]); printf("\n"); for (int i = 0; i < nnz[0]; ++i) printf("h_A_ColIndices[%i] = %i \n", i, h_A_ColIndices[i]); } /******************/ /* GRAPH COLORING */ /******************/ __global__ void setRowIndices(int *d_B_RowIndices, const int N) { const int tid = threadIdx.x + blockDim.x * blockIdx.x; if (tid == N) d_B_RowIndices[tid] = N; else if (tid < N) d_B_RowIndices[tid] = tid; } __global__ void setB(double *d_B, const int N) { const int tid = threadIdx.x + blockDim.x * blockIdx.x; if (tid < N) d_B[tid] = 1.f; } void graphColoring(const cusparseHandle_t handle, const int nnz, const cusparseMatDescr_t descrA, const double fractionToColor, double *d_A, const int *d_A_RowIndices, const int *d_A_ColIndices, double **d_B, int **d_B_RowIndices, int **d_B_ColIndices, cusparseMatDescr_t *descrB, const int N) { cusparseColorInfo_t info; cusparseSafeCall(cusparseCreateColorInfo(&info)); int ncolors; int *d_coloring; gpuErrchk(cudaMalloc(&d_coloring, N * sizeof(double))); gpuErrchk(cudaMalloc(&d_B_ColIndices[0], N * sizeof(double))); cusparseSafeCall(cusparseDcsrcolor(handle, N, nnz, descrA, d_A, d_A_RowIndices, d_A_ColIndices, &fractionToColor, &ncolors, d_coloring, d_B_ColIndices[0], info)); int *h_coloring = (int *)malloc(N * sizeof(double)); int *h_B_ColIndices = (int *)malloc(N * sizeof(double)); gpuErrchk(cudaMemcpy(h_coloring, d_coloring, N * sizeof(double), cudaMemcpyDeviceToHost)); gpuErrchk(cudaMemcpy(h_B_ColIndices, d_B_ColIndices[0], N * sizeof(double), cudaMemcpyDeviceToHost)); for (int i = 0; i < N; i++) printf("h_coloring = %i; h_B_ColIndices = %i\n", h_coloring[i], h_B_ColIndices[i]); gpuErrchk(cudaMalloc(&d_B_RowIndices[0], (N + 1) * sizeof(int))); int *h_B_RowIndices = (int *)malloc((N + 1) * sizeof(double)); setRowIndices<<<iDivUp(N + 1, BLOCKSIZE), BLOCKSIZE>>>(d_B_RowIndices[0], N); gpuErrchk(cudaMemcpy(h_B_RowIndices, d_B_RowIndices[0], (N + 1) * sizeof(int), cudaMemcpyDeviceToHost)); printf("\n"); for (int i = 0; i <= N; i++) printf("h_B_RowIndices = %i\n", h_B_RowIndices[i]); gpuErrchk(cudaMalloc(&d_B[0], N * sizeof(double))); double *h_B = (double *)malloc(N * sizeof(double)); setB<<<iDivUp(N, BLOCKSIZE), BLOCKSIZE>>>(d_B[0], N); gpuErrchk(cudaMemcpy(h_B, d_B[0], N * sizeof(double), cudaMemcpyDeviceToHost)); printf("\n"); for (int i = 0; i < N; i++) printf("h_B = %f\n", h_B[i]); // --- Descriptor for sparse mutation matrix B cusparseSafeCall(cusparseCreateMatDescr(&descrB[0])); cusparseSafeCall(cusparseSetMatType (descrB[0], CUSPARSE_MATRIX_TYPE_GENERAL)); cusparseSafeCall(cusparseSetMatIndexBase(descrB[0], CUSPARSE_INDEX_BASE_ZERO)); } /*************************/ /* MATRIX ROW REORDERING */ /*************************/ void matrixRowReordering(const cusparseHandle_t handle, int nnzA, int nnzB, int *nnzC, cusparseMatDescr_t descrA, cusparseMatDescr_t descrB, cusparseMatDescr_t *descrC, double *d_A, int *d_A_RowIndices, int *d_A_ColIndices, double *d_B, int *d_B_RowIndices, int *d_B_ColIndices, double **d_C, int **d_C_RowIndices, int **d_C_ColIndices, const int N) { // --- Descriptor for sparse matrix C cusparseSafeCall(cusparseCreateMatDescr(&descrC[0])); cusparseSafeCall(cusparseSetMatType (descrC[0], CUSPARSE_MATRIX_TYPE_GENERAL)); cusparseSafeCall(cusparseSetMatIndexBase(descrC[0], CUSPARSE_INDEX_BASE_ZERO)); const int lda = N; // --- Leading dimension of dense matrix // --- Device side sparse matrix gpuErrchk(cudaMalloc(&d_C_RowIndices[0], (N + 1) * sizeof(int))); // --- Host side sparse matrices int *h_C_RowIndices = (int *)malloc((N + 1) * sizeof(int)); // --- Performing the matrix - matrix multiplication int baseC; int *nnzTotalDevHostPtr = &nnzC[0]; cusparseSafeCall(cusparseSetPointerMode(handle, CUSPARSE_POINTER_MODE_HOST)); cusparseSafeCall(cusparseXcsrgemmNnz(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, CUSPARSE_OPERATION_NON_TRANSPOSE, N, N, N, descrB, nnzB, d_B_RowIndices, d_B_ColIndices, descrA, nnzA, d_A_RowIndices, d_A_ColIndices, descrC[0], d_C_RowIndices[0], nnzTotalDevHostPtr)); if (NULL != nnzTotalDevHostPtr) nnzC[0] = *nnzTotalDevHostPtr; else { gpuErrchk(cudaMemcpy(&nnzC[0], d_C_RowIndices + N, sizeof(int), cudaMemcpyDeviceToHost)); gpuErrchk(cudaMemcpy(&baseC, d_C_RowIndices, sizeof(int), cudaMemcpyDeviceToHost)); nnzC -= baseC; } gpuErrchk(cudaMalloc(&d_C_ColIndices[0], nnzC[0] * sizeof(int))); gpuErrchk(cudaMalloc(&d_C[0], nnzC[0] * sizeof(double))); double *h_C = (double *)malloc(nnzC[0] * sizeof(double)); int *h_C_ColIndices = (int *)malloc(nnzC[0] * sizeof(int)); cusparseSafeCall(cusparseDcsrgemm(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, CUSPARSE_OPERATION_NON_TRANSPOSE, N, N, N, descrB, nnzB, d_B, d_B_RowIndices, d_B_ColIndices, descrA, nnzA, d_A, d_A_RowIndices, d_A_ColIndices, descrC[0], d_C[0], d_C_RowIndices[0], d_C_ColIndices[0])); double *h_C_dense = (double*)malloc(N * N * sizeof(double)); double *d_C_dense; gpuErrchk(cudaMalloc(&d_C_dense, N * N * sizeof(double))); cusparseSafeCall(cusparseDcsr2dense(handle, N, N, descrC[0], d_C[0], d_C_RowIndices[0], d_C_ColIndices[0], d_C_dense, N)); gpuErrchk(cudaMemcpy(h_C , d_C[0], nnzC[0] * sizeof(double), cudaMemcpyDeviceToHost)); gpuErrchk(cudaMemcpy(h_C_RowIndices, d_C_RowIndices[0], (N + 1) * sizeof(int), cudaMemcpyDeviceToHost)); gpuErrchk(cudaMemcpy(h_C_ColIndices, d_C_ColIndices[0], nnzC[0] * sizeof(int), cudaMemcpyDeviceToHost)); printf("\nResult matrix C in CSR format\n\n"); for (int i = 0; i < nnzC[0]; ++i) printf("C[%i] = %f ", i, h_C[i]); printf("\n"); printf("\n"); for (int i = 0; i < (N + 1); ++i) printf("h_C_RowIndices[%i] = %i \n", i, h_C_RowIndices[i]); printf("\n"); printf("\n"); for (int i = 0; i < nnzC[0]; ++i) printf("h_C_ColIndices[%i] = %i \n", i, h_C_ColIndices[i]); gpuErrchk(cudaMemcpy(h_C_dense, d_C_dense, N * N * sizeof(double), cudaMemcpyDeviceToHost)); for (int j = 0; j < N; j++) { for (int i = 0; i < N; i++) printf("%f \t", h_C_dense[i * N + j]); printf("\n"); } } /******************/ /* ROW REORDERING */ /******************/ void rowReordering(const cusparseHandle_t handle, int nnzA, cusparseMatDescr_t descrB, double *d_B, int *d_B_RowIndices, int *d_B_ColIndices, double *d_x_dense, double **d_y_dense, const int N) { gpuErrchk(cudaMalloc(&d_y_dense[0], N * sizeof(double))); const double alpha = 1.; const double beta = 0.; cusparseSafeCall(cusparseDcsrmv(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, N, N, nnzA, &alpha, descrB, d_B, d_B_RowIndices, d_B_ColIndices, d_x_dense, &beta, d_y_dense[0])); double *h_y_dense = (double*)malloc(N * sizeof(double)); gpuErrchk(cudaMemcpy(h_y_dense, d_y_dense[0], N * sizeof(double), cudaMemcpyDeviceToHost)); printf("\nResult vector\n\n"); for (int i = 0; i < N; ++i) printf("h_y[%i] = %f ", i, h_y_dense[i]); printf("\n"); } /*****************************/ /* SOLVING THE LINEAR SYSTEM */ /*****************************/ void LUDecomposition(const cusparseHandle_t handle, int nnzC, cusparseMatDescr_t descrC, double *d_C, int *d_C_RowIndices, int *d_C_ColIndices, double *d_x_dense, double **d_y_dense, const int N) { /******************************************/ /* STEP 1: CREATE DESCRIPTORS FOR L AND U */ /******************************************/ cusparseMatDescr_t descr_L = 0; cusparseSafeCall(cusparseCreateMatDescr (&descr_L)); cusparseSafeCall(cusparseSetMatIndexBase(descr_L, CUSPARSE_INDEX_BASE_ZERO)); cusparseSafeCall(cusparseSetMatType (descr_L, CUSPARSE_MATRIX_TYPE_GENERAL)); cusparseSafeCall(cusparseSetMatFillMode (descr_L, CUSPARSE_FILL_MODE_LOWER)); cusparseSafeCall(cusparseSetMatDiagType (descr_L, CUSPARSE_DIAG_TYPE_UNIT)); cusparseMatDescr_t descr_U = 0; cusparseSafeCall(cusparseCreateMatDescr (&descr_U)); cusparseSafeCall(cusparseSetMatIndexBase(descr_U, CUSPARSE_INDEX_BASE_ZERO)); cusparseSafeCall(cusparseSetMatType (descr_U, CUSPARSE_MATRIX_TYPE_GENERAL)); cusparseSafeCall(cusparseSetMatFillMode (descr_U, CUSPARSE_FILL_MODE_UPPER)); cusparseSafeCall(cusparseSetMatDiagType (descr_U, CUSPARSE_DIAG_TYPE_NON_UNIT)); /**************************************************************************************************/ /* STEP 2: QUERY HOW MUCH MEMORY USED IN LU FACTORIZATION AND THE TWO FOLLOWING SYSTEM INVERSIONS */ /**************************************************************************************************/ csrilu02Info_t info_C = 0; cusparseSafeCall(cusparseCreateCsrilu02Info (&info_C)); csrsv2Info_t info_L = 0; cusparseSafeCall(cusparseCreateCsrsv2Info (&info_L)); csrsv2Info_t info_U = 0; cusparseSafeCall(cusparseCreateCsrsv2Info (&info_U)); int pBufferSize_M, pBufferSize_L, pBufferSize_U; cusparseSafeCall(cusparseDcsrilu02_bufferSize(handle, N, nnzC, descrC, d_C, d_C_RowIndices, d_C_ColIndices, info_C, &pBufferSize_M)); cusparseSafeCall(cusparseDcsrsv2_bufferSize (handle, CUSPARSE_OPERATION_NON_TRANSPOSE, N, nnzC, descr_L, d_C, d_C_RowIndices, d_C_ColIndices, info_L, &pBufferSize_L)); cusparseSafeCall(cusparseDcsrsv2_bufferSize (handle, CUSPARSE_OPERATION_NON_TRANSPOSE, N, nnzC, descr_U, d_C, d_C_RowIndices, d_C_ColIndices, info_U, &pBufferSize_U)); int pBufferSize = max(pBufferSize_M, max(pBufferSize_L, pBufferSize_U)); void *pBuffer = 0; gpuErrchk(cudaMalloc((void**)&pBuffer, pBufferSize)); /************************************************************************************************/ /* STEP 3: ANALYZE THE THREE PROBLEMS: LU FACTORIZATION AND THE TWO FOLLOWING SYSTEM INVERSIONS */ /************************************************************************************************/ int structural_zero; cusparseSafeCall(cusparseDcsrilu02_analysis(handle, N, nnzC, descrC, d_C, d_C_RowIndices, d_C_ColIndices, info_C, CUSPARSE_SOLVE_POLICY_NO_LEVEL, pBuffer)); cusparseStatus_t status = cusparseXcsrilu02_zeroPivot(handle, info_C, &structural_zero); if (CUSPARSE_STATUS_ZERO_PIVOT == status){ printf("A(%d,%d) is missing\n", structural_zero, structural_zero); } cusparseSafeCall(cusparseDcsrsv2_analysis(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, N, nnzC, descr_L, d_C, d_C_RowIndices, d_C_ColIndices, info_L, CUSPARSE_SOLVE_POLICY_NO_LEVEL, pBuffer)); cusparseSafeCall(cusparseDcsrsv2_analysis(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, N, nnzC, descr_U, d_C, d_C_RowIndices, d_C_ColIndices, info_U, CUSPARSE_SOLVE_POLICY_USE_LEVEL, pBuffer)); /************************************/ /* STEP 4: FACTORIZATION: A = L * U */ /************************************/ int numerical_zero; cusparseSafeCall(cusparseDcsrilu02(handle, N, nnzC, descrC, d_C, d_C_RowIndices, d_C_ColIndices, info_C, CUSPARSE_SOLVE_POLICY_NO_LEVEL, pBuffer)); status = cusparseXcsrilu02_zeroPivot(handle, info_C, &numerical_zero); if (CUSPARSE_STATUS_ZERO_PIVOT == status){ printf("U(%d,%d) is zero\n", numerical_zero, numerical_zero); } /*********************/ /* STEP 5: L * z = x */ /*********************/ // --- Allocating the intermediate result vector double *d_z_dense; gpuErrchk(cudaMalloc(&d_z_dense, N * sizeof(double))); const double alpha = 1.; cusparseSafeCall(cusparseDcsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, N, nnzC, &alpha, descr_L, d_C, d_C_RowIndices, d_C_ColIndices, info_L, d_x_dense, d_z_dense, CUSPARSE_SOLVE_POLICY_NO_LEVEL, pBuffer)); /*********************/ /* STEP 5: U * y = z */ /*********************/ gpuErrchk(cudaMalloc(&d_y_dense[0], N * sizeof(double))); cusparseSafeCall(cusparseDcsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, N, nnzC, &alpha, descr_U, d_C, d_C_RowIndices, d_C_ColIndices, info_U, d_z_dense, d_y_dense[0], CUSPARSE_SOLVE_POLICY_USE_LEVEL, pBuffer)); double *h_y_dense = (double *)malloc(N * sizeof(double)); gpuErrchk(cudaMemcpy(h_y_dense, d_y_dense[0], N * sizeof(double), cudaMemcpyDeviceToHost)); printf("\n\nFinal result\n"); for (int k=0; k<N; k++) printf("x[%i] = %f\n", k, h_y_dense[k]); } /********/ /* MAIN */ /********/ int main() { // --- Initialize cuSPARSE cusparseHandle_t handle; cusparseSafeCall(cusparseCreate(&handle)); /*************************************************/ /* SETTING UP THE ORIGINAL LINEAR SYSTEM PROBLEM */ /*************************************************/ const int N = 4; // --- Number of rows and columns double *h_A_dense; double *h_x_dense; double *d_A_dense; double *d_x_dense; setUpTheProblem(&h_A_dense, &h_x_dense, &d_A_dense, &d_x_dense, N); /************************/ /* FROM DENSE TO SPARSE */ /************************/ //--- Descriptor for sparse matrix A cusparseMatDescr_t descrA; int *d_A_RowIndices, *d_A_ColIndices; double *d_A; int nnzA; fromDenseToSparse(handle, d_A_dense, &d_A, &d_A_RowIndices, &d_A_ColIndices, &nnzA, &descrA, N); /******************/ /* GRAPH COLORING */ /******************/ const double fractionToColor = 0.95; int *d_B_RowIndices, *d_B_ColIndices; double *d_B; int nnzB; cusparseMatDescr_t descrB; graphColoring(handle, nnzB, descrA, fractionToColor, d_A, d_A_RowIndices, d_A_ColIndices, &d_B, &d_B_RowIndices, &d_B_ColIndices, &descrB, N); /*************************/ /* MATRIX ROW REORDERING */ /*************************/ int nnzC; int *d_C_RowIndices, *d_C_ColIndices; double *d_C; cusparseMatDescr_t descrC; matrixRowReordering(handle, nnzA, nnzB, &nnzC, descrA, descrB, &descrC, d_A, d_A_RowIndices, d_A_ColIndices, d_B, d_B_RowIndices, d_B_ColIndices, &d_C, &d_C_RowIndices, &d_C_ColIndices, N); /******************/ /* ROW REORDERING */ /******************/ double *d_y_dense; rowReordering(handle, nnzA, descrB, d_B, d_B_RowIndices, d_B_ColIndices, d_x_dense, &d_y_dense, N); /*****************************/ /* SOLVING THE LINEAR SYSTEM */ /*****************************/ double *d_xsol_dense; LUDecomposition(handle, nnzC, descrC, d_C, d_C_RowIndices, d_C_ColIndices, d_y_dense, &d_xsol_dense, N); }
b77a6e9f92712ec702d31b391f50a52484be3b19.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <vector> #include <iostream> #include "utils.h" #include "cuda_error_check.cuh" #include "initial_graph.hpp" #include "parse_graph.hpp" #define TRUE 1 #define FALSE 0 /* Edge process out of core with shared memory */ __global__ void edge_process_out_of_core_shared_memory(unsigned int edges_length, unsigned int *src, unsigned int *dest, unsigned int *weight, unsigned int *distance_prev, unsigned int *distance_cur, int *noChange, int *is_distance_infinity_prev, int *is_distance_infinity_cur) { extern __shared__ unsigned int s_data[ ]; extern __shared__ unsigned int dest_s_data[ ]; extern __shared__ unsigned int is_dest_valid[ ]; // unsigned int thread_id = blockDim.x * blockIdx.x + threadIdx.x; // unsigned int thread_num = blockDim.x * gridDim.x; // // unsigned int iter = edges_length % thread_num == 0 ? edges_length / thread_num : edges_length / thread_num + 1; // unsigned int lane = thread_id % 32; // // s_data[threadIdx.x] = -1; // is_dest_valid[threadIdx.x] = FALSE; // dest_s_data[threadIdx.x] = -1; // // __syncthreads(); unsigned int thread_id = blockDim.x * blockIdx.x + threadIdx.x; unsigned int thread_num = blockDim.x * gridDim.x; unsigned int warp_id = thread_id / 32; unsigned int warp_num = thread_num % 32 == 0 ? thread_num / 32 : thread_num / 32 + 1; unsigned int load = edges_length % warp_num == 0 ? edges_length / warp_num : edges_length / warp_num + 1; unsigned int beg = load * warp_id; unsigned int end = min(edges_length, beg + load); unsigned int lane = thread_id % 32; beg += lane; s_data[threadIdx.x] = -1; is_dest_valid[threadIdx.x] = FALSE; dest_s_data[threadIdx.x] = -1; __syncthreads(); for (unsigned int i = beg; i < end; i += 32) { // unsigned int i; // for (i = 0; i < iter; i++) { // unsigned int dataid = thread_id + i * thread_num; // lane = dataid % 32; // // if (dataid >= edges_length) // break; // unsigned int u = src[dataid]; // unsigned int v = dest[dataid]; // unsigned int w = weight[dataid]; unsigned int u = src[i]; unsigned int v = dest[i]; unsigned int w = weight[i]; //printf("src %u | dest %u | weight %u | dataid %u | lane %u\n", u, v, w, dataid, lane); dest_s_data[threadIdx.x] = v; is_dest_valid[threadIdx.x] = TRUE; if (is_distance_infinity_prev[u] == TRUE) { s_data[threadIdx.x] = -1; } else { s_data[threadIdx.x] = min(distance_cur[v], distance_prev[u] + w); } //printf("%u %u %u | s_data at %u is %u, lane %u, i %u\n", u, v, w, threadIdx.x, s_data[threadIdx.x], lane, i); __syncthreads(); // segmented scan to find minimum if (lane >= 1 && dest_s_data[threadIdx.x] == dest_s_data[threadIdx.x-1] && is_dest_valid[threadIdx.x-1] == TRUE) s_data[threadIdx.x] = min(s_data[threadIdx.x], s_data[threadIdx.x-1]); if (lane >= 2 && dest_s_data[threadIdx.x] == dest_s_data[threadIdx.x-2] && is_dest_valid[threadIdx.x-2] == TRUE) s_data[threadIdx.x] = min(s_data[threadIdx.x], s_data[threadIdx.x-2]); if (lane >= 4 && dest_s_data[threadIdx.x] == dest_s_data[threadIdx.x-4] && is_dest_valid[threadIdx.x-4] == TRUE) s_data[threadIdx.x] = min(s_data[threadIdx.x], s_data[threadIdx.x-4]); if (lane >= 8 && dest_s_data[threadIdx.x] == dest_s_data[threadIdx.x-8] && is_dest_valid[threadIdx.x-8] == TRUE) s_data[threadIdx.x] = min(s_data[threadIdx.x], s_data[threadIdx.x-8]); if (lane >= 16 && dest_s_data[threadIdx.x] == dest_s_data[threadIdx.x-16] && is_dest_valid[threadIdx.x-16] == TRUE) s_data[threadIdx.x] = min(s_data[threadIdx.x], s_data[threadIdx.x-16]); __syncthreads(); // i is in bounds if (i + 1 < edges_length) { //printf("inside 1\n"); // this thread is the last thread for the segment, so it holds the min // this thread is the last in a block if (threadIdx.x == blockDim.x - 1 || dest_s_data[threadIdx.x] != dest_s_data[threadIdx.x+1] || is_dest_valid[threadIdx.x+1] == FALSE) { //printf("inside 4\n"); //printf("the min for dest %u is %u\n", dest[dataid], s_data[threadIdx.x]); int old_distance = atomicMin(&distance_cur[v], s_data[threadIdx.x]); if (distance_cur[v] != -1) atomicMin(&is_distance_infinity_cur[v], FALSE); // test for a change! if (old_distance != distance_cur[v]) { //printf("there is change\n"); atomicMin(noChange, FALSE); } } //printf("inside no crashes\n"); } // i is the last element else { //printf("the min for dest %u is %u\n", dest[i], s_data[threadIdx.x]); int old_distance = atomicMin(&distance_cur[v], s_data[threadIdx.x]); if (distance_cur[v] != -1) atomicMin(&is_distance_infinity_cur[v], FALSE); // test for a change! if (old_distance != distance_cur[v]) { //printf("there is change\n"); atomicMin(noChange, FALSE); } } } } /* Edge process out of core with no shared memory */ __global__ void edge_process_out_of_core_non_warp_partitioned(unsigned int edges_length, unsigned int *src, unsigned int *dest, unsigned int *weight, unsigned int *distance_prev, unsigned int *distance_cur, int *noChange, int *is_distance_infinity_prev, int *is_distance_infinity_cur) { unsigned int thread_id = blockDim.x * blockIdx.x + threadIdx.x; unsigned int thread_num = blockDim.x * gridDim.x; unsigned int iter = edges_length % thread_num == 0 ? edges_length / thread_num : edges_length / thread_num + 1; unsigned int i; for (i = 0; i < iter; i++) { unsigned int dataid = thread_id + i * thread_num; if (dataid >= edges_length) break; unsigned int u = src[dataid]; unsigned int v = dest[dataid]; unsigned int w = weight[dataid]; if (is_distance_infinity_prev[u] == TRUE) { continue; } //printf("%u isn't infinite distance\n", u); if (distance_prev[u] + w < distance_prev[v]) { // relax //printf("%u %u\n", distance_cur[v], distance_prev[u] + w); unsigned int old_distance = atomicMin(&distance_cur[v], distance_prev[u] + w); atomicMin(&is_distance_infinity_cur[v], FALSE); //printf("%u %u %u %d\n", old_distance, distance_cur[v], distance_prev[u] + w, is_distance_infinity[v]); // test for a change! if (old_distance != distance_cur[v]) { //printf("there is change\n"); atomicMin(noChange, FALSE); } } } } /* Edge process out of core with no shared memory */ /* This has been found to be 20% slower than it's alternative */ __global__ void edge_process_out_of_core_warp_sided(unsigned int edges_length, unsigned int *src, unsigned int *dest, unsigned int *weight, unsigned int *distance_prev, unsigned int *distance_cur, int *noChange, int *is_distance_infinity_prev, int *is_distance_infinity_cur) { unsigned int thread_id = blockDim.x * blockIdx.x + threadIdx.x; unsigned int thread_num = blockDim.x * gridDim.x; unsigned int warp_id = thread_id / 32; unsigned int warp_num = thread_num % 32 == 0 ? thread_num / 32 : thread_num / 32 + 1; unsigned int load = edges_length % warp_num == 0 ? edges_length / warp_num : edges_length / warp_num + 1; unsigned int beg = load * warp_id; unsigned int end = min(edges_length, beg + load); unsigned int lane = thread_id % 32; beg += lane; for (unsigned int i = beg; i < end; i += 32) { unsigned int u = src[i]; unsigned int v = dest[i]; unsigned int w = weight[i]; if (is_distance_infinity_prev[u] == TRUE) { continue; } //printf("%u isn't infinite distance\n", u); if (distance_prev[u] + w < distance_prev[v]) { // relax //printf("%u %u\n", distance_cur[v], distance_prev[u] + w); unsigned int old_distance = atomicMin(&distance_cur[v], distance_prev[u] + w); atomicMin(&is_distance_infinity_cur[v], FALSE); //printf("%u %u %u %d\n", old_distance, distance_cur[v], distance_prev[u] + w); // test for a change! if (old_distance != distance_cur[v]) { //printf("there is change\n"); atomicMin(noChange, FALSE); } } } } /* Edge process in core */ __global__ void edge_process_in_core(unsigned int edges_length, unsigned int vertices_length, unsigned int *src, unsigned int *dest, unsigned int *weight, unsigned int *distance, int *noChange, int *is_distance_infinity) { unsigned int thread_id = blockDim.x * blockIdx.x + threadIdx.x; unsigned int thread_num = blockDim.x * gridDim.x; unsigned int warp_id = thread_id / 32; unsigned int warp_num = thread_num % 32 == 0 ? thread_num / 32 : thread_num / 32 + 1; unsigned int load = edges_length % warp_num == 0 ? edges_length / warp_num : edges_length / warp_num + 1; unsigned int beg = load * warp_id; unsigned int end = min(edges_length, beg + load); unsigned int lane = thread_id % 32; beg += lane; for (unsigned int i = beg; i < end; i += 32) { unsigned int u = src[i]; unsigned int v = dest[i]; unsigned int w = weight[i]; if (is_distance_infinity[u] == TRUE) { continue; } unsigned int temp_dist = distance[u] + w; if (distance[u] == -1) { continue; } if (temp_dist < distance[v]) { // relax //printf("%u %u\n", distance[v], temp_dist); int old_distance = atomicMin(&distance[v], temp_dist); atomicMin(&is_distance_infinity[v], FALSE); //printf("%u %u %u %d\n", old_distance, distance_cur[v], distance_prev[u] + w, is_distance_infinity[v]); // test for a change! if (old_distance != distance[v]) { //printf("there is change\n"); atomicMin(noChange, FALSE); } } } } void puller(int blockSize, int blockNum, int sync, int smem, unsigned int *distance_cur, unsigned int *edges_src, unsigned int *edges_dest, unsigned int *edges_weight, unsigned int edges_length, unsigned int vertices_length){ /* Will use these arrays instead of a vector * edges_src : array of all edges (indexed 0 to n) where the value is the vertex source index of the edge (since edges are directed) * edges_dest : same as above, except it tells the vertex destination index * edges_weight : same as above, except it tells the edge's weight * distance_prev : array of all vertices with their distance values * distance_cur : same as above */ /* Allocate here... */ unsigned int *distance_prev = (unsigned int *) malloc(vertices_length * sizeof(unsigned int)); int *noChange = (int *) malloc(sizeof(int)); int *is_distance_infinity = (int *) malloc(vertices_length * sizeof(int)); *noChange = TRUE; unsigned int *cuda_edges_src, *cuda_edges_dest, *cuda_edges_weight; unsigned int *cuda_distance_prev, *cuda_distance_cur; int *cuda_noChange, *cuda_is_distance_infinity_prev, *cuda_is_distance_infinity_cur; // the distance to the first vertex is always 0 distance_prev[0] = 0; distance_cur[0] = 0; is_distance_infinity[0] = FALSE; // setting an unsigned int to -1 will set it to the maximum value! for (int i = 1; i < vertices_length; i++) { distance_prev[i] = -1; distance_cur[i] = -1; is_distance_infinity[i] = TRUE; } hipMalloc((void **)&cuda_edges_src, edges_length * sizeof(unsigned int)); hipMalloc((void **)&cuda_edges_dest, edges_length * sizeof(unsigned int)); hipMalloc((void **)&cuda_edges_weight, edges_length * sizeof(unsigned int)); hipMalloc((void **)&cuda_distance_prev, vertices_length * sizeof(unsigned int)); hipMalloc((void **)&cuda_distance_cur, vertices_length * sizeof(unsigned int)); hipMalloc((void **)&cuda_noChange, sizeof(int)); hipMalloc((void **)&cuda_is_distance_infinity_prev, vertices_length * sizeof(int)); hipMalloc((void **)&cuda_is_distance_infinity_cur, vertices_length * sizeof(int)); hipMemcpy(cuda_edges_src, edges_src, edges_length * sizeof(unsigned int), hipMemcpyHostToDevice); hipMemcpy(cuda_edges_dest, edges_dest, edges_length * sizeof(unsigned int), hipMemcpyHostToDevice); hipMemcpy(cuda_edges_weight, edges_weight, edges_length * sizeof(unsigned int), hipMemcpyHostToDevice); hipMemcpy(cuda_distance_prev, distance_prev, vertices_length * sizeof(unsigned int), hipMemcpyHostToDevice); hipMemcpy(cuda_distance_cur, distance_cur, vertices_length * sizeof(unsigned int), hipMemcpyHostToDevice); hipMemcpy(cuda_noChange, noChange, sizeof(int), hipMemcpyHostToDevice); hipMemcpy(cuda_is_distance_infinity_prev, is_distance_infinity, vertices_length * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(cuda_is_distance_infinity_cur, is_distance_infinity, vertices_length * sizeof(int), hipMemcpyHostToDevice); setTime(); /* * Do all the things here! **/ // sync is out of core if (sync == 0) { // no shared memory if (smem == 0) { for (unsigned int i = 1; i < vertices_length; i++) { //printf("pass %u\n", i); hipLaunchKernelGGL(( edge_process_out_of_core_non_warp_partitioned), dim3(blockNum), dim3(blockSize), 0, 0, edges_length, cuda_edges_src, cuda_edges_dest, cuda_edges_weight, cuda_distance_prev, cuda_distance_cur, cuda_noChange, cuda_is_distance_infinity_prev, cuda_is_distance_infinity_cur); hipMemcpy(noChange, cuda_noChange, sizeof(int), hipMemcpyDeviceToHost); if (*noChange == TRUE) break; *noChange = TRUE; hipMemcpy(cuda_noChange, noChange, sizeof(int), hipMemcpyHostToDevice); // get current distance and copy it to both cuda_distance_prev and cuda_distance_cur hipMemcpy(distance_cur, cuda_distance_cur, vertices_length * sizeof(unsigned int), hipMemcpyDeviceToHost); hipMemcpy(cuda_distance_prev, distance_cur, vertices_length * sizeof(unsigned int), hipMemcpyHostToDevice); hipMemcpy(cuda_distance_cur, distance_cur, vertices_length * sizeof(unsigned int), hipMemcpyHostToDevice); hipMemcpy(is_distance_infinity, cuda_is_distance_infinity_cur, vertices_length * sizeof(unsigned int), hipMemcpyDeviceToHost); hipMemcpy(cuda_is_distance_infinity_prev, is_distance_infinity, vertices_length * sizeof(unsigned int), hipMemcpyHostToDevice); hipMemcpy(cuda_is_distance_infinity_cur, is_distance_infinity, vertices_length * sizeof(unsigned int), hipMemcpyHostToDevice); } } // shared memory else if (smem == 1) { for (unsigned int i = 1; i < vertices_length; i++) { //printf("pass %d\n", i); hipLaunchKernelGGL(( edge_process_out_of_core_shared_memory), dim3(blockNum), dim3(blockSize), blockSize * sizeof(unsigned int), 0, edges_length, cuda_edges_src, cuda_edges_dest, cuda_edges_weight, cuda_distance_prev, cuda_distance_cur, cuda_noChange, cuda_is_distance_infinity_prev, cuda_is_distance_infinity_cur); hipMemcpy(noChange, cuda_noChange, sizeof(int), hipMemcpyDeviceToHost); if (*noChange == TRUE) break; *noChange = TRUE; hipMemcpy(cuda_noChange, noChange, sizeof(int), hipMemcpyHostToDevice); // get current distance and copy it to both cuda_distance_prev and cuda_distance_cur hipMemcpy(distance_cur, cuda_distance_cur, vertices_length * sizeof(unsigned int), hipMemcpyDeviceToHost); hipMemcpy(cuda_distance_prev, distance_cur, vertices_length * sizeof(unsigned int), hipMemcpyHostToDevice); hipMemcpy(cuda_distance_cur, distance_cur, vertices_length * sizeof(unsigned int), hipMemcpyHostToDevice); hipMemcpy(is_distance_infinity, cuda_is_distance_infinity_cur, vertices_length * sizeof(unsigned int), hipMemcpyDeviceToHost); hipMemcpy(cuda_is_distance_infinity_prev, is_distance_infinity, vertices_length * sizeof(unsigned int), hipMemcpyHostToDevice); hipMemcpy(cuda_is_distance_infinity_cur, is_distance_infinity, vertices_length * sizeof(unsigned int), hipMemcpyHostToDevice); } } // no shared memory else { printf("No shared memory\n"); exit(1); } } // sync is in core else if (sync == 1) { for (unsigned int i = 1; i < vertices_length; i++) { hipLaunchKernelGGL(( edge_process_in_core), dim3(blockNum), dim3(blockSize), 0, 0, edges_length, vertices_length, cuda_edges_src, cuda_edges_dest, cuda_edges_weight, cuda_distance_cur, cuda_noChange, cuda_is_distance_infinity_prev); hipMemcpy(noChange, cuda_noChange, sizeof(int), hipMemcpyDeviceToHost); if (*noChange == TRUE) break; *noChange = TRUE; hipMemcpy(cuda_noChange, noChange, sizeof(int), hipMemcpyHostToDevice); } } else { // no syncing printf("No syncing tag\n"); exit(1); } hipDeviceSynchronize(); std::cout << "Took " << getTime() << "ms.\n"; hipMemcpy(distance_cur, cuda_distance_cur, vertices_length * sizeof(unsigned int), hipMemcpyDeviceToHost); /* Deallocate. */ hipFree(cuda_edges_src); hipFree(cuda_edges_dest); hipFree(cuda_edges_weight); hipFree(cuda_distance_prev); hipFree(cuda_distance_cur); hipFree(cuda_noChange); hipFree(cuda_is_distance_infinity_prev); hipFree(cuda_is_distance_infinity_cur); free(distance_prev); free(noChange); free(is_distance_infinity); }
b77a6e9f92712ec702d31b391f50a52484be3b19.cu
#include <vector> #include <iostream> #include "utils.h" #include "cuda_error_check.cuh" #include "initial_graph.hpp" #include "parse_graph.hpp" #define TRUE 1 #define FALSE 0 /* Edge process out of core with shared memory */ __global__ void edge_process_out_of_core_shared_memory(unsigned int edges_length, unsigned int *src, unsigned int *dest, unsigned int *weight, unsigned int *distance_prev, unsigned int *distance_cur, int *noChange, int *is_distance_infinity_prev, int *is_distance_infinity_cur) { extern __shared__ unsigned int s_data[ ]; extern __shared__ unsigned int dest_s_data[ ]; extern __shared__ unsigned int is_dest_valid[ ]; // unsigned int thread_id = blockDim.x * blockIdx.x + threadIdx.x; // unsigned int thread_num = blockDim.x * gridDim.x; // // unsigned int iter = edges_length % thread_num == 0 ? edges_length / thread_num : edges_length / thread_num + 1; // unsigned int lane = thread_id % 32; // // s_data[threadIdx.x] = -1; // is_dest_valid[threadIdx.x] = FALSE; // dest_s_data[threadIdx.x] = -1; // // __syncthreads(); unsigned int thread_id = blockDim.x * blockIdx.x + threadIdx.x; unsigned int thread_num = blockDim.x * gridDim.x; unsigned int warp_id = thread_id / 32; unsigned int warp_num = thread_num % 32 == 0 ? thread_num / 32 : thread_num / 32 + 1; unsigned int load = edges_length % warp_num == 0 ? edges_length / warp_num : edges_length / warp_num + 1; unsigned int beg = load * warp_id; unsigned int end = min(edges_length, beg + load); unsigned int lane = thread_id % 32; beg += lane; s_data[threadIdx.x] = -1; is_dest_valid[threadIdx.x] = FALSE; dest_s_data[threadIdx.x] = -1; __syncthreads(); for (unsigned int i = beg; i < end; i += 32) { // unsigned int i; // for (i = 0; i < iter; i++) { // unsigned int dataid = thread_id + i * thread_num; // lane = dataid % 32; // // if (dataid >= edges_length) // break; // unsigned int u = src[dataid]; // unsigned int v = dest[dataid]; // unsigned int w = weight[dataid]; unsigned int u = src[i]; unsigned int v = dest[i]; unsigned int w = weight[i]; //printf("src %u | dest %u | weight %u | dataid %u | lane %u\n", u, v, w, dataid, lane); dest_s_data[threadIdx.x] = v; is_dest_valid[threadIdx.x] = TRUE; if (is_distance_infinity_prev[u] == TRUE) { s_data[threadIdx.x] = -1; } else { s_data[threadIdx.x] = min(distance_cur[v], distance_prev[u] + w); } //printf("%u %u %u | s_data at %u is %u, lane %u, i %u\n", u, v, w, threadIdx.x, s_data[threadIdx.x], lane, i); __syncthreads(); // segmented scan to find minimum if (lane >= 1 && dest_s_data[threadIdx.x] == dest_s_data[threadIdx.x-1] && is_dest_valid[threadIdx.x-1] == TRUE) s_data[threadIdx.x] = min(s_data[threadIdx.x], s_data[threadIdx.x-1]); if (lane >= 2 && dest_s_data[threadIdx.x] == dest_s_data[threadIdx.x-2] && is_dest_valid[threadIdx.x-2] == TRUE) s_data[threadIdx.x] = min(s_data[threadIdx.x], s_data[threadIdx.x-2]); if (lane >= 4 && dest_s_data[threadIdx.x] == dest_s_data[threadIdx.x-4] && is_dest_valid[threadIdx.x-4] == TRUE) s_data[threadIdx.x] = min(s_data[threadIdx.x], s_data[threadIdx.x-4]); if (lane >= 8 && dest_s_data[threadIdx.x] == dest_s_data[threadIdx.x-8] && is_dest_valid[threadIdx.x-8] == TRUE) s_data[threadIdx.x] = min(s_data[threadIdx.x], s_data[threadIdx.x-8]); if (lane >= 16 && dest_s_data[threadIdx.x] == dest_s_data[threadIdx.x-16] && is_dest_valid[threadIdx.x-16] == TRUE) s_data[threadIdx.x] = min(s_data[threadIdx.x], s_data[threadIdx.x-16]); __syncthreads(); // i is in bounds if (i + 1 < edges_length) { //printf("inside 1\n"); // this thread is the last thread for the segment, so it holds the min // this thread is the last in a block if (threadIdx.x == blockDim.x - 1 || dest_s_data[threadIdx.x] != dest_s_data[threadIdx.x+1] || is_dest_valid[threadIdx.x+1] == FALSE) { //printf("inside 4\n"); //printf("the min for dest %u is %u\n", dest[dataid], s_data[threadIdx.x]); int old_distance = atomicMin(&distance_cur[v], s_data[threadIdx.x]); if (distance_cur[v] != -1) atomicMin(&is_distance_infinity_cur[v], FALSE); // test for a change! if (old_distance != distance_cur[v]) { //printf("there is change\n"); atomicMin(noChange, FALSE); } } //printf("inside no crashes\n"); } // i is the last element else { //printf("the min for dest %u is %u\n", dest[i], s_data[threadIdx.x]); int old_distance = atomicMin(&distance_cur[v], s_data[threadIdx.x]); if (distance_cur[v] != -1) atomicMin(&is_distance_infinity_cur[v], FALSE); // test for a change! if (old_distance != distance_cur[v]) { //printf("there is change\n"); atomicMin(noChange, FALSE); } } } } /* Edge process out of core with no shared memory */ __global__ void edge_process_out_of_core_non_warp_partitioned(unsigned int edges_length, unsigned int *src, unsigned int *dest, unsigned int *weight, unsigned int *distance_prev, unsigned int *distance_cur, int *noChange, int *is_distance_infinity_prev, int *is_distance_infinity_cur) { unsigned int thread_id = blockDim.x * blockIdx.x + threadIdx.x; unsigned int thread_num = blockDim.x * gridDim.x; unsigned int iter = edges_length % thread_num == 0 ? edges_length / thread_num : edges_length / thread_num + 1; unsigned int i; for (i = 0; i < iter; i++) { unsigned int dataid = thread_id + i * thread_num; if (dataid >= edges_length) break; unsigned int u = src[dataid]; unsigned int v = dest[dataid]; unsigned int w = weight[dataid]; if (is_distance_infinity_prev[u] == TRUE) { continue; } //printf("%u isn't infinite distance\n", u); if (distance_prev[u] + w < distance_prev[v]) { // relax //printf("%u %u\n", distance_cur[v], distance_prev[u] + w); unsigned int old_distance = atomicMin(&distance_cur[v], distance_prev[u] + w); atomicMin(&is_distance_infinity_cur[v], FALSE); //printf("%u %u %u %d\n", old_distance, distance_cur[v], distance_prev[u] + w, is_distance_infinity[v]); // test for a change! if (old_distance != distance_cur[v]) { //printf("there is change\n"); atomicMin(noChange, FALSE); } } } } /* Edge process out of core with no shared memory */ /* This has been found to be 20% slower than it's alternative */ __global__ void edge_process_out_of_core_warp_sided(unsigned int edges_length, unsigned int *src, unsigned int *dest, unsigned int *weight, unsigned int *distance_prev, unsigned int *distance_cur, int *noChange, int *is_distance_infinity_prev, int *is_distance_infinity_cur) { unsigned int thread_id = blockDim.x * blockIdx.x + threadIdx.x; unsigned int thread_num = blockDim.x * gridDim.x; unsigned int warp_id = thread_id / 32; unsigned int warp_num = thread_num % 32 == 0 ? thread_num / 32 : thread_num / 32 + 1; unsigned int load = edges_length % warp_num == 0 ? edges_length / warp_num : edges_length / warp_num + 1; unsigned int beg = load * warp_id; unsigned int end = min(edges_length, beg + load); unsigned int lane = thread_id % 32; beg += lane; for (unsigned int i = beg; i < end; i += 32) { unsigned int u = src[i]; unsigned int v = dest[i]; unsigned int w = weight[i]; if (is_distance_infinity_prev[u] == TRUE) { continue; } //printf("%u isn't infinite distance\n", u); if (distance_prev[u] + w < distance_prev[v]) { // relax //printf("%u %u\n", distance_cur[v], distance_prev[u] + w); unsigned int old_distance = atomicMin(&distance_cur[v], distance_prev[u] + w); atomicMin(&is_distance_infinity_cur[v], FALSE); //printf("%u %u %u %d\n", old_distance, distance_cur[v], distance_prev[u] + w); // test for a change! if (old_distance != distance_cur[v]) { //printf("there is change\n"); atomicMin(noChange, FALSE); } } } } /* Edge process in core */ __global__ void edge_process_in_core(unsigned int edges_length, unsigned int vertices_length, unsigned int *src, unsigned int *dest, unsigned int *weight, unsigned int *distance, int *noChange, int *is_distance_infinity) { unsigned int thread_id = blockDim.x * blockIdx.x + threadIdx.x; unsigned int thread_num = blockDim.x * gridDim.x; unsigned int warp_id = thread_id / 32; unsigned int warp_num = thread_num % 32 == 0 ? thread_num / 32 : thread_num / 32 + 1; unsigned int load = edges_length % warp_num == 0 ? edges_length / warp_num : edges_length / warp_num + 1; unsigned int beg = load * warp_id; unsigned int end = min(edges_length, beg + load); unsigned int lane = thread_id % 32; beg += lane; for (unsigned int i = beg; i < end; i += 32) { unsigned int u = src[i]; unsigned int v = dest[i]; unsigned int w = weight[i]; if (is_distance_infinity[u] == TRUE) { continue; } unsigned int temp_dist = distance[u] + w; if (distance[u] == -1) { continue; } if (temp_dist < distance[v]) { // relax //printf("%u %u\n", distance[v], temp_dist); int old_distance = atomicMin(&distance[v], temp_dist); atomicMin(&is_distance_infinity[v], FALSE); //printf("%u %u %u %d\n", old_distance, distance_cur[v], distance_prev[u] + w, is_distance_infinity[v]); // test for a change! if (old_distance != distance[v]) { //printf("there is change\n"); atomicMin(noChange, FALSE); } } } } void puller(int blockSize, int blockNum, int sync, int smem, unsigned int *distance_cur, unsigned int *edges_src, unsigned int *edges_dest, unsigned int *edges_weight, unsigned int edges_length, unsigned int vertices_length){ /* Will use these arrays instead of a vector * edges_src : array of all edges (indexed 0 to n) where the value is the vertex source index of the edge (since edges are directed) * edges_dest : same as above, except it tells the vertex destination index * edges_weight : same as above, except it tells the edge's weight * distance_prev : array of all vertices with their distance values * distance_cur : same as above */ /* Allocate here... */ unsigned int *distance_prev = (unsigned int *) malloc(vertices_length * sizeof(unsigned int)); int *noChange = (int *) malloc(sizeof(int)); int *is_distance_infinity = (int *) malloc(vertices_length * sizeof(int)); *noChange = TRUE; unsigned int *cuda_edges_src, *cuda_edges_dest, *cuda_edges_weight; unsigned int *cuda_distance_prev, *cuda_distance_cur; int *cuda_noChange, *cuda_is_distance_infinity_prev, *cuda_is_distance_infinity_cur; // the distance to the first vertex is always 0 distance_prev[0] = 0; distance_cur[0] = 0; is_distance_infinity[0] = FALSE; // setting an unsigned int to -1 will set it to the maximum value! for (int i = 1; i < vertices_length; i++) { distance_prev[i] = -1; distance_cur[i] = -1; is_distance_infinity[i] = TRUE; } cudaMalloc((void **)&cuda_edges_src, edges_length * sizeof(unsigned int)); cudaMalloc((void **)&cuda_edges_dest, edges_length * sizeof(unsigned int)); cudaMalloc((void **)&cuda_edges_weight, edges_length * sizeof(unsigned int)); cudaMalloc((void **)&cuda_distance_prev, vertices_length * sizeof(unsigned int)); cudaMalloc((void **)&cuda_distance_cur, vertices_length * sizeof(unsigned int)); cudaMalloc((void **)&cuda_noChange, sizeof(int)); cudaMalloc((void **)&cuda_is_distance_infinity_prev, vertices_length * sizeof(int)); cudaMalloc((void **)&cuda_is_distance_infinity_cur, vertices_length * sizeof(int)); cudaMemcpy(cuda_edges_src, edges_src, edges_length * sizeof(unsigned int), cudaMemcpyHostToDevice); cudaMemcpy(cuda_edges_dest, edges_dest, edges_length * sizeof(unsigned int), cudaMemcpyHostToDevice); cudaMemcpy(cuda_edges_weight, edges_weight, edges_length * sizeof(unsigned int), cudaMemcpyHostToDevice); cudaMemcpy(cuda_distance_prev, distance_prev, vertices_length * sizeof(unsigned int), cudaMemcpyHostToDevice); cudaMemcpy(cuda_distance_cur, distance_cur, vertices_length * sizeof(unsigned int), cudaMemcpyHostToDevice); cudaMemcpy(cuda_noChange, noChange, sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(cuda_is_distance_infinity_prev, is_distance_infinity, vertices_length * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(cuda_is_distance_infinity_cur, is_distance_infinity, vertices_length * sizeof(int), cudaMemcpyHostToDevice); setTime(); /* * Do all the things here! **/ // sync is out of core if (sync == 0) { // no shared memory if (smem == 0) { for (unsigned int i = 1; i < vertices_length; i++) { //printf("pass %u\n", i); edge_process_out_of_core_non_warp_partitioned<<<blockNum, blockSize>>>(edges_length, cuda_edges_src, cuda_edges_dest, cuda_edges_weight, cuda_distance_prev, cuda_distance_cur, cuda_noChange, cuda_is_distance_infinity_prev, cuda_is_distance_infinity_cur); cudaMemcpy(noChange, cuda_noChange, sizeof(int), cudaMemcpyDeviceToHost); if (*noChange == TRUE) break; *noChange = TRUE; cudaMemcpy(cuda_noChange, noChange, sizeof(int), cudaMemcpyHostToDevice); // get current distance and copy it to both cuda_distance_prev and cuda_distance_cur cudaMemcpy(distance_cur, cuda_distance_cur, vertices_length * sizeof(unsigned int), cudaMemcpyDeviceToHost); cudaMemcpy(cuda_distance_prev, distance_cur, vertices_length * sizeof(unsigned int), cudaMemcpyHostToDevice); cudaMemcpy(cuda_distance_cur, distance_cur, vertices_length * sizeof(unsigned int), cudaMemcpyHostToDevice); cudaMemcpy(is_distance_infinity, cuda_is_distance_infinity_cur, vertices_length * sizeof(unsigned int), cudaMemcpyDeviceToHost); cudaMemcpy(cuda_is_distance_infinity_prev, is_distance_infinity, vertices_length * sizeof(unsigned int), cudaMemcpyHostToDevice); cudaMemcpy(cuda_is_distance_infinity_cur, is_distance_infinity, vertices_length * sizeof(unsigned int), cudaMemcpyHostToDevice); } } // shared memory else if (smem == 1) { for (unsigned int i = 1; i < vertices_length; i++) { //printf("pass %d\n", i); edge_process_out_of_core_shared_memory<<<blockNum, blockSize, blockSize * sizeof(unsigned int)>>>(edges_length, cuda_edges_src, cuda_edges_dest, cuda_edges_weight, cuda_distance_prev, cuda_distance_cur, cuda_noChange, cuda_is_distance_infinity_prev, cuda_is_distance_infinity_cur); cudaMemcpy(noChange, cuda_noChange, sizeof(int), cudaMemcpyDeviceToHost); if (*noChange == TRUE) break; *noChange = TRUE; cudaMemcpy(cuda_noChange, noChange, sizeof(int), cudaMemcpyHostToDevice); // get current distance and copy it to both cuda_distance_prev and cuda_distance_cur cudaMemcpy(distance_cur, cuda_distance_cur, vertices_length * sizeof(unsigned int), cudaMemcpyDeviceToHost); cudaMemcpy(cuda_distance_prev, distance_cur, vertices_length * sizeof(unsigned int), cudaMemcpyHostToDevice); cudaMemcpy(cuda_distance_cur, distance_cur, vertices_length * sizeof(unsigned int), cudaMemcpyHostToDevice); cudaMemcpy(is_distance_infinity, cuda_is_distance_infinity_cur, vertices_length * sizeof(unsigned int), cudaMemcpyDeviceToHost); cudaMemcpy(cuda_is_distance_infinity_prev, is_distance_infinity, vertices_length * sizeof(unsigned int), cudaMemcpyHostToDevice); cudaMemcpy(cuda_is_distance_infinity_cur, is_distance_infinity, vertices_length * sizeof(unsigned int), cudaMemcpyHostToDevice); } } // no shared memory else { printf("No shared memory\n"); exit(1); } } // sync is in core else if (sync == 1) { for (unsigned int i = 1; i < vertices_length; i++) { edge_process_in_core<<<blockNum, blockSize>>>(edges_length, vertices_length, cuda_edges_src, cuda_edges_dest, cuda_edges_weight, cuda_distance_cur, cuda_noChange, cuda_is_distance_infinity_prev); cudaMemcpy(noChange, cuda_noChange, sizeof(int), cudaMemcpyDeviceToHost); if (*noChange == TRUE) break; *noChange = TRUE; cudaMemcpy(cuda_noChange, noChange, sizeof(int), cudaMemcpyHostToDevice); } } else { // no syncing printf("No syncing tag\n"); exit(1); } cudaDeviceSynchronize(); std::cout << "Took " << getTime() << "ms.\n"; cudaMemcpy(distance_cur, cuda_distance_cur, vertices_length * sizeof(unsigned int), cudaMemcpyDeviceToHost); /* Deallocate. */ cudaFree(cuda_edges_src); cudaFree(cuda_edges_dest); cudaFree(cuda_edges_weight); cudaFree(cuda_distance_prev); cudaFree(cuda_distance_cur); cudaFree(cuda_noChange); cudaFree(cuda_is_distance_infinity_prev); cudaFree(cuda_is_distance_infinity_cur); free(distance_prev); free(noChange); free(is_distance_infinity); }
74a847af817b5d03ad6bfb58dff44f5c069ed402.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <hiprand/hiprand.h> #include <rocblas.h> #include "convolutional_layer.h" #include "batchnorm_layer.h" #include "gemm.h" #include "blas.h" #include "im2col.h" #include "col2im.h" #include "utils.h" #include "dark_cuda.h" #include "box.h" __global__ void binarize_kernel(float *x, int n, float *binary) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (i >= n) return; binary[i] = (x[i] >= 0) ? 1 : -1; } void binarize_gpu(float *x, int n, float *binary) { hipLaunchKernelGGL(( binarize_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, get_cuda_stream() , x, n, binary); CHECK_CUDA(hipPeekAtLastError()); } __global__ void binarize_input_kernel(float *input, int n, int size, float *binary) { int s = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (s >= size) return; int i = 0; float mean = 0; for(i = 0; i < n; ++i){ mean += fabs(input[i*size + s]); } mean = mean / n; for(i = 0; i < n; ++i){ binary[i*size + s] = (input[i*size + s] > 0) ? mean : -mean; } } void binarize_input_gpu(float *input, int n, int size, float *binary) { hipLaunchKernelGGL(( binarize_input_kernel), dim3(cuda_gridsize(size)), dim3(BLOCK), 0, get_cuda_stream() , input, n, size, binary); CHECK_CUDA(hipPeekAtLastError()); } __global__ void binarize_weights_kernel(float *weights, int n, int size, float *binary) { int f = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (f >= n) return; int i = 0; float mean = 0; for (i = 0; i < size; ++i) { mean += fabs(weights[f*size + i]); } mean = mean / size; for (i = 0; i < size; ++i) { binary[f*size + i] = (weights[f*size + i] > 0) ? mean : -mean; //binary[f*size + i] = weights[f*size + i]; } } void binarize_weights_gpu(float *weights, int n, int size, float *binary) { binarize_weights_kernel << <cuda_gridsize(n), BLOCK, 0, get_cuda_stream() >> >(weights, n, size, binary); CHECK_CUDA(hipPeekAtLastError()); } __global__ void set_zero_kernel(float *src, int size) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < size) src[i] = 0; } __inline__ __device__ float warpAllReduceSum(float val) { for (int mask = WARP_SIZE / 2; mask > 0; mask /= 2) #if CUDART_VERSION >= 9000 val += __shfl_xor_sync(0xffffffff, val, mask); #else val += __shfl_xor(val, mask); #endif return val; } // only if (size % 32 == 0) __global__ void reduce_kernel(float *weights, int n, int size, float *mean_arr_gpu) { int i = blockIdx.x * blockDim.x + threadIdx.x; int f = i / size; if (f >= n) return; float warp_mean = warpAllReduceSum(fabs(weights[i])); if(i % 32 == 0) atomicAdd(&mean_arr_gpu[f], warp_mean / size); } __global__ void binarize_weights_mean_kernel(float *weights, int n, int size, float *binary, float *mean_arr_gpu) { int i = blockIdx.x * blockDim.x + threadIdx.x; int f = i / size; if (f >= n) return; float mean = mean_arr_gpu[f]; binary[i] = (weights[i] > 0) ? mean : -mean; } void fast_binarize_weights_gpu(float *weights, int n, int size, float *binary, float *mean_arr_gpu) { if (size % 32 == 0) { size_t gridsize = n * size; const int num_blocks = get_number_of_blocks(gridsize, BLOCK);// gridsize / BLOCK + 1; set_zero_kernel << <(n/BLOCK + 1), BLOCK, 0, get_cuda_stream() >> > (mean_arr_gpu, n); reduce_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> > (weights, n, size, mean_arr_gpu); binarize_weights_mean_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> > (weights, n, size, binary, mean_arr_gpu); CHECK_CUDA(hipPeekAtLastError()); } else { binarize_weights_gpu(weights, n, size, binary); } } __global__ void cuda_f32_to_f16(float* input_f32, size_t size, half *output_f16) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < size) output_f16[idx] = __float2half(input_f32[idx]); //if (idx < size) output_f16[idx] = __float2half_rn(input_f32[idx]); // can't be compiled on Linux without casting // __float2half_ru, __float2half_rd, __float2half_rz, __float2half_rn //if (idx < size) *((unsigned short *)output_f16 + idx) = __float2half(input_f32[idx]); } void cuda_convert_f32_to_f16(float* input_f32, size_t size, float *output_f16) { hipLaunchKernelGGL(( cuda_f32_to_f16) , dim3(get_number_of_blocks(size, BLOCK)), dim3(BLOCK), 0, get_cuda_stream() , input_f32, size, (half *)output_f16); CHECK_CUDA(hipPeekAtLastError()); } __global__ void cuda_f16_to_f32(half* input_f16, size_t size, float *output_f32) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < size) output_f32[idx] = __half2float(input_f16[idx]); //if (idx < size) output_f32[idx] = __half2float(*((unsigned short *)input_f16 + idx)); } void cuda_convert_f16_to_f32(float* input_f16, size_t size, float *output_f32) { hipLaunchKernelGGL(( cuda_f16_to_f32) , dim3(get_number_of_blocks(size, BLOCK)), dim3(BLOCK), 0, get_cuda_stream() , (half *)input_f16, size, output_f32); CHECK_CUDA(hipPeekAtLastError()); } half *cuda_make_f16_from_f32_array(float *src, size_t n) { half *dst16; size_t size = sizeof(half)*n; CHECK_CUDA(hipMalloc((void **)&dst16, size)); if (src) { assert(n > 0); cuda_convert_f32_to_f16(src, n, (float *)dst16); } if (!dst16) error("Cuda malloc failed\n"); return dst16; } void forward_convolutional_layer_gpu(convolutional_layer l, network_state state) { //fill_ongpu(l.outputs*l.batch, 0, l.output_gpu, 1); if(l.binary){ binarize_weights_gpu(l.weights_gpu, l.n, (l.c / l.groups)*l.size*l.size, l.binary_weights_gpu); swap_binary(&l); } if(l.xnor){ if (!l.align_bit_weights_gpu || state.train) { //binarize_weights_gpu(l.weights_gpu, l.n, (l.c / l.groups)*l.size*l.size, l.binary_weights_gpu); fast_binarize_weights_gpu(l.weights_gpu, l.n, (l.c / l.groups)*l.size*l.size, l.binary_weights_gpu, l.mean_arr_gpu); } if (l.align_bit_weights_gpu && !state.train && l.c >= 32 && l.stride_x == l.stride_y) { //return; //hipError_t status = hipSuccess; //int input_size = l.c*l.h*l.w*l.batch; int m = l.n / l.groups; int k = l.size*l.size*l.c / l.groups; int n = l.out_w*l.out_h; //float * a = l.weights_gpu; // int i, j; // for(i = 0; i < l.batch; ++i){ // for (j = 0; j < l.groups; ++j) { int ldb_align = l.lda_align; size_t new_ldb = k + (ldb_align - k%ldb_align); // (k / 8 + 1) * 8; //size_t t_intput_size = new_ldb * n; //size_t t_bit_input_size = t_intput_size / 8;// +1; if (l.c % 32 == 0) { //printf("\n\n l.index = %d, l.w = %d, l.c = %d, l.n = %d, l.stride = %d, l.pad = %d - new XNOR \n", l.index, l.w, l.c, l.n, l.stride, l.pad); //printf("l.align_workspace_size = %d, (l.c * l.w * l.h) = %d \n", l.align_workspace_size, (l.c * l.w * l.h)); //float *intput_cpu = (float *)calloc(l.inputs, sizeof(float)); // state.input //hipMemcpy(intput_cpu, state.input, l.inputs * sizeof(float), hipMemcpyDefault); int ldb_align = l.lda_align; size_t new_ldb = k + (ldb_align - k%ldb_align); // (k / 8 + 1) * 8; //size_t t_intput_size = new_ldb * l.bit_align;// n; //size_t t_bit_input_size = t_intput_size / 8;// +1; const int new_c = l.c / 32; //float *re_packed_input = (float *)calloc(l.c * l.w * l.h, sizeof(float)); //uint32_t *bin_re_packed_input = (uint32_t *)calloc(new_c * l.w * l.h + 1, sizeof(uint32_t)); // float32x4 by channel (as in cuDNN) //repack_input(intput_cpu, re_packed_input, l.w, l.h, l.c); // 32 x floats -> 1 x uint32_t //float_to_bit(re_packed_input, (uint8_t *)bin_re_packed_input, l.c * l.w * l.h); //hipDeviceSynchronize(); //start_timer(); repack_input_gpu_bin(state.input, (uint32_t *)l.align_workspace_gpu, l.w, l.h, l.c); //repack_input_gpu(state.input, state.workspace, l.w, l.h, l.c); // 32 x floats -> 1 x uint32_t //float_to_bit_gpu(state.workspace, (unsigned char *)l.align_workspace_gpu, l.c * l.w * l.h);// l.align_workspace_size); //hipDeviceSynchronize(); //stop_timer_and_show_name("repack_input_gpu + float_to_bit_gpu"); //free(re_packed_input); // slow - convolution the packed inputs and weights: float x 32 by channel (as in cuDNN) //convolution_repacked((uint32_t *)bin_re_packed_input, (uint32_t *)l.align_bit_weights, l.output, // l.w, l.h, l.c, l.n, l.size, l.pad, l.new_lda, l.mean_arr); // // then exit from if() //float *b = state.workspace; //float *b = (float *)calloc(100 * 1024 * 1024, sizeof(float)); //float *c = l.output; //memset(c, 0, l.outputs * sizeof(float)); //im2col_cpu_custom((float *)bin_re_packed_input, new_c, l.h, l.w, l.size, l.stride, l.pad, b); //hipMemcpy(l.align_workspace_gpu, bin_re_packed_input, (new_c * l.w * l.h + 1) * sizeof(uint32_t), hipMemcpyDefault); //start_timer(); im2col_ongpu(l.align_workspace_gpu, new_c, l.h, l.w, l.size, l.stride, l.pad, state.workspace); //hipDeviceSynchronize(); //stop_timer_and_show_name("im2col_ongpu"); //free(bin_re_packed_input); int new_k = l.size*l.size*l.c / 32; // good for (l.c == 64) //gemm_nn_bin_32bit_packed(m, n, new_k, 1, // l.align_bit_weights, l.new_lda/32, // b, n, // c, n, l.mean_arr); // // then exit from if() //size_t new_ldb = k + (ldb_align - k%ldb_align); // (k / 8 + 1) * 8; //size_t t_intput_size = new_ldb * l.bit_align;// n; //size_t t_bit_input_size = t_intput_size / 8;// +1; //char *t_bit_input = (char *)calloc(t_bit_input_size, sizeof(char)); //transpose_uint32((uint32_t *)b, (uint32_t *)t_bit_input, new_k, n, n, new_ldb); //hipMemcpy(l.transposed_align_workspace_gpu, t_bit_input, t_bit_input_size * sizeof(char), hipMemcpyDefault); //hipMemcpy(state.workspace, b, t_bit_input_size * sizeof(char), hipMemcpyDefault); //printf("\n n = %d, n % 32 = %d, new_ldb = %d, new_ldb % 32 = %d \n", n, n % 32, new_ldb, new_ldb % 32); //start_timer(); transpose_uint32_gpu((uint32_t *)state.workspace, (uint32_t *)l.transposed_align_workspace_gpu, new_k, n, n, new_ldb); //hipDeviceSynchronize(); //stop_timer_and_show_name("transpose_uint32_gpu"); //hipDeviceSynchronize(); //stop_timer_and_show_name("repack_input_gpu_bin + im2col_ongpu + transpose_uint32_gpu_2"); //start_timer(); gemm_nn_custom_bin_mean_transposed_gpu(m, n, k, (unsigned char *)l.align_bit_weights_gpu, new_ldb, (unsigned char *)l.transposed_align_workspace_gpu, new_ldb, l.output_gpu, n, l.mean_arr_gpu, l.biases_gpu, l.activation == LEAKY, l.bin_conv_shortcut_in_gpu, l.bin_conv_shortcut_out_gpu); //hipDeviceSynchronize(); //stop_timer_and_show_name("gemm_nn_custom_bin_mean_transposed_gpu"); // the main GEMM function //gemm_nn_custom_bin_mean_transposed(m, n, k, 1, (uint8_t *)l.align_bit_weights, new_ldb, (uint8_t *)t_bit_input, new_ldb, c, n, l.mean_arr); //add_bias(l.output, l.biases, l.batch, l.n, l.out_h*l.out_w); //hipMemcpy(l.output_gpu, l.output, l.outputs * sizeof(float), hipMemcpyDefault); // // alternative GEMM //gemm_nn_bin_transposed_32bit_packed(m, n, new_k, 1, // l.align_bit_weights, l.new_lda/32, // t_bit_input, new_ldb / 32, // c, n, l.mean_arr); //free(t_bit_input); //free(b); } else { //printf("\n\n l.index = %d, l.w = %d, l.c = %d, l.n = %d, l.stride = %d, l.pad = %d - old XNOR \n", l.index, l.w, l.c, l.n, l.stride, l.pad); //hipDeviceSynchronize(); int i = 0; /* // if (l.stride == 1 && l.c >= 256 && l.size > 1) if (l.stride == 1 && l.c >= 1024 && l.size > 1 && 0)// && l.w >= 13) // disabled { // stride=1 only //start_timer(); im2col_align_bin_ongpu(state.input + i*l.c*l.h*l.w, l.c, l.h, l.w, l.size, l.stride, l.pad, state.workspace, l.bit_align); //hipDeviceSynchronize(); //stop_timer_and_show_name("im2col_align_bin_ongpu"); } else*/ { //start_timer(); im2col_align_ongpu(state.input + i*l.c*l.h*l.w, l.c, l.h, l.w, l.size, l.stride, l.pad, l.align_workspace_gpu, l.bit_align); //hipDeviceSynchronize(); //stop_timer_and_show_name("im2col_align_ongpu"); //getchar(); // should be optimized //start_timer(); float_to_bit_gpu(l.align_workspace_gpu, (unsigned char *)state.workspace, l.align_workspace_size); //hipDeviceSynchronize(); //stop_timer_and_show_name("float_to_bit_gpu"); } //start_timer(); transpose_bin_gpu((unsigned char *)state.workspace, (unsigned char *)l.transposed_align_workspace_gpu, k, n, l.bit_align, new_ldb, 8); //hipDeviceSynchronize(); //stop_timer_and_show_name("transpose_bin_gpu"); //hipDeviceSynchronize(); //stop_timer_and_show_name("im2col_align_ongpu + float_to_bit_gpu + transpose_bin_gpu"); // should be optimized //if(0) {//if (k > 1000) { // sequentially input-shared - BAD // gemm_nn_custom_bin_mean_transposed_sequentially_gpu(m, n, k, // (unsigned char *)l.align_bit_weights_gpu, new_ldb, (unsigned char *)l.transposed_align_workspace_gpu, new_ldb, l.output_gpu, n, l.mean_arr_gpu); //} //else { // coalescing & weights-shared-memory - GOOD //start_timer(); gemm_nn_custom_bin_mean_transposed_gpu(m, n, k, (unsigned char *)l.align_bit_weights_gpu, new_ldb, (unsigned char *)l.transposed_align_workspace_gpu, new_ldb, l.output_gpu, n, l.mean_arr_gpu, l.biases_gpu, l.activation == LEAKY, l.bin_conv_shortcut_in_gpu, l.bin_conv_shortcut_out_gpu); //hipDeviceSynchronize(); //stop_timer_and_show_name("gemm_nn_custom_bin_mean_transposed_gpu"); //} //hipDeviceSynchronize(); //check_error(status); //getchar(); } /* { float_to_bit_gpu(state.input, (unsigned char *)l.align_workspace_gpu, input_size); convolve_bin_gpu(l.align_workspace_gpu, (float *)l.align_bit_weights_gpu, l.output_gpu, l.w, l.h, l.c, l.n, l.size, l.pad, l.new_lda, l.mean_arr_gpu); //convolve_gpu(state.input, l.weights_gpu, l.output_gpu, l.w, l.h, l.c, l.n, l.size, l.pad); //hipDeviceSynchronize(); //check_error(status); add_bias_gpu(l.output_gpu, l.biases_gpu, l.batch, l.n, l.out_w*l.out_h); } */ //add_bias_gpu(l.output_gpu, l.biases_gpu, l.batch, l.n, l.out_w*l.out_h); if (l.activation == SWISH) activate_array_swish_ongpu(l.output_gpu, l.outputs*l.batch, l.activation_input_gpu, l.output_gpu); else if (l.activation == MISH) activate_array_mish_ongpu(l.output_gpu, l.outputs*l.batch, l.activation_input_gpu, l.output_gpu); else if (l.activation == NORM_CHAN) activate_array_normalize_channels_ongpu(l.output_gpu, l.outputs*l.batch, l.batch, l.out_c, l.out_w*l.out_h, l.output_gpu); else if (l.activation == NORM_CHAN_SOFTMAX) activate_array_normalize_channels_softmax_ongpu(l.output_gpu, l.outputs*l.batch, l.batch, l.out_c, l.out_w*l.out_h, l.output_gpu); else if (l.activation != LINEAR && l.activation != LEAKY) activate_array_ongpu(l.output_gpu, l.outputs*l.batch, l.activation); //if(l.activation != LINEAR && l.activation != LEAKY) activate_array_ongpu(l.output_gpu, l.outputs*l.batch, l.activation); //if (l.binary || l.xnor) swap_binary(&l); //hipDeviceSynchronize(); return; } } if (l.xnor) { swap_binary(&l); binarize_gpu(state.input, l.c*l.h*l.w*l.batch, l.binary_input_gpu); state.input = l.binary_input_gpu; } //fill_ongpu(l.outputs*l.batch, 0, l.output_gpu, 1); #ifdef CUDNN //float one = 1; // alpha[0], beta[0] is float for HALF and FLOAT float alpha = 1, beta = 0; //#ifdef CUDNN_HALF //if (state.use_mixed_precision) { int iteration_num = (*state.net.seen) / (state.net.batch*state.net.subdivisions); if (state.index != 0 && state.net.cudnn_half && !l.xnor && (!state.train || iteration_num > 3*state.net.burn_in) && (l.c / l.groups) % 8 == 0 && l.n % 8 == 0 && !state.train) { //printf("\n CUDNN_HALF!!! state.index = %d \n", state.index); // Note: For improved performance it is advised to use beta[0] = 0.0. // For Tensor Core: cudnnSetConvolutionMathType() where cudnnMathType_t mathType = CUDNN_TENSOR_OP_MATH; // 1. or CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM and use CUDNN_DATA_HALF // 2. or CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED // More: http://docs.nvidia.com/deeplearning/sdk/cudnn-developer-guide/index.html#tensor_ops const size_t input16_size = l.batch*l.c*l.w*l.h; const size_t output16_size = l.batch*l.out_c*l.out_h*l.out_w; if (*state.net.max_input16_size < input16_size) { //printf("\n input16_size: cur = %zu \t max = %zu \n", input16_size, *state.net.max_input16_size); *state.net.max_input16_size = input16_size; if (*state.net.input16_gpu) cuda_free(*state.net.input16_gpu); assert(*state.net.max_input16_size > 0); *state.net.input16_gpu = (float *)cuda_make_f16_from_f32_array(NULL, *state.net.max_input16_size); } float *input16 = *state.net.input16_gpu; if (*state.net.max_output16_size < output16_size) { *state.net.max_output16_size = output16_size; if (*state.net.output16_gpu) cuda_free(*state.net.output16_gpu); assert(*state.net.max_output16_size > 0); *state.net.output16_gpu = (float *)cuda_make_f16_from_f32_array(NULL, *state.net.max_output16_size); } float *output16 = *state.net.output16_gpu; assert(input16_size > 0); cuda_convert_f32_to_f16(state.input, input16_size, input16); //fill_ongpu(output16_size / 2, 0, (float *)output16, 1); CHECK_CUDNN(cudnnConvolutionForward(cudnn_handle(), &alpha, l.srcTensorDesc16, input16, l.weightDesc16, l.weights_gpu16, l.convDesc, l.fw_algo16, state.workspace, l.workspace_size, &beta, l.dstTensorDesc16, output16)); if (l.batch_normalize) { if (state.train) // Training { simple_copy_ongpu(l.outputs*l.batch / 2, output16, l.x_gpu); //copy_ongpu(l.outputs*l.batch / 2, output16, 1, l.x_gpu, 1); //hipMemcpyAsync(l.x_gpu, output16, l.outputs*l.batch*sizeof(half), hipMemcpyDefault, get_cuda_stream()); float one = 1.0f; float zero = 0.0f; // Batch-normalization can still take FP16 inputs and outputs, saving half the bandwidth // compared to FP32, it's just that the statistics and value adjustment should be done in FP32. CHECK_CUDNN(cudnnBatchNormalizationForwardTraining(cudnn_handle(), CUDNN_BATCHNORM_SPATIAL, &one, &zero, l.normDstTensorDescF16, l.x_gpu, // input l.normDstTensorDescF16, output16, // output l.normTensorDesc, l.scales_gpu, // input l.biases_gpu, // input .01, l.rolling_mean_gpu, // input/output (should be FP32) l.rolling_variance_gpu, // input/output (should be FP32) .00001, l.mean_gpu, // output (should be FP32) - optional cache to speedup cudnnBatchNormalizationBackward() l.variance_gpu)); // output (should be FP32) - optional cache to speedup cudnnBatchNormalizationBackward() cuda_convert_f16_to_f32(output16, output16_size, l.output_gpu); //forward_batchnorm_layer_gpu(l, state); } else // Detection { cuda_convert_f16_to_f32(output16, output16_size, l.output_gpu); normalize_gpu(l.output_gpu, l.rolling_mean_gpu, l.rolling_variance_gpu, l.batch, l.out_c, l.out_h*l.out_w); scale_bias_gpu(l.output_gpu, l.scales_gpu, l.batch, l.out_c, l.out_h*l.out_w); add_bias_gpu(l.output_gpu, l.biases_gpu, l.batch, l.out_c, l.out_w*l.out_h); } } else // BIAS only { cuda_convert_f16_to_f32(output16, output16_size, l.output_gpu); add_bias_gpu(l.output_gpu, l.biases_gpu, l.batch, l.n, l.out_w*l.out_h); } } else { //#else /* int input_nan_inf = is_nan_or_inf(state.input, l.inputs * l.batch); printf("\n is_nan_or_inf(state.input) = %d \n", input_nan_inf); if (input_nan_inf) getchar(); int weights_nan_inf = is_nan_or_inf(l.weights_gpu, l.nweights); printf("\n is_nan_or_inf(l.weights_gpu) = %d \n", weights_nan_inf); if (weights_nan_inf) getchar(); */ CHECK_CUDNN(cudnnConvolutionForward(cudnn_handle(), &alpha, //&one, l.srcTensorDesc, state.input, l.weightDesc, l.weights_gpu, l.convDesc, l.fw_algo, state.workspace, l.workspace_size, &beta, //&one, l.dstTensorDesc, l.output_gpu)); //hipDeviceSynchronize(); if (l.batch_normalize) { forward_batchnorm_layer_gpu(l, state); } else { add_bias_gpu(l.output_gpu, l.biases_gpu, l.batch, l.n, l.out_w*l.out_h); } //#endif // CUDNN_HALF } #else fill_ongpu(l.outputs*l.batch, 0, l.output_gpu, 1); int i, j; int m = l.n / l.groups; int k = l.size*l.size*l.c / l.groups; int n = l.out_w*l.out_h; for(i = 0; i < l.batch; ++i){ for (j = 0; j < l.groups; ++j) { //float *im = state.input + i*l.c*l.h*l.w; float *im = state.input + (i*l.groups + j)*l.c / l.groups*l.h*l.w; float *a = l.weights_gpu + j*l.nweights / l.groups; float *b = state.workspace; float *c = l.output_gpu + (i*l.groups + j)*n*m; if (l.size == 1) { b = im; } else { //im2col_ongpu(im, l.c / l.groups, l.h, l.w, l.size, l.stride, l.pad, state.workspace); im2col_gpu_ext(im, // input l.c / l.groups, // input channels l.h, l.w, // input size (h, w) l.size, l.size, // kernel size (h, w) l.pad, l.pad, // padding (h, w) l.stride_y, l.stride_x, // stride (h, w) l.dilation, l.dilation, // dilation (h, w) state.workspace); // output } //gemm_ongpu(0, 0, m, n, k, 1., a, k, b, n, 1., c + i*m*n, n); gemm_ongpu(0, 0, m, n, k, 1, a, k, b, n, 1, c, n); } } if (l.batch_normalize) { forward_batchnorm_layer_gpu(l, state); } else { add_bias_gpu(l.output_gpu, l.biases_gpu, l.batch, l.n, l.out_w*l.out_h); } #endif //#ifndef CUDNN_HALF //#endif // no CUDNN_HALF if (l.activation == SWISH) activate_array_swish_ongpu(l.output_gpu, l.outputs*l.batch, l.activation_input_gpu, l.output_gpu); else if (l.activation == MISH) activate_array_mish_ongpu(l.output_gpu, l.outputs*l.batch, l.activation_input_gpu, l.output_gpu); else if (l.activation == NORM_CHAN) activate_array_normalize_channels_ongpu(l.output_gpu, l.outputs*l.batch, l.batch, l.out_c, l.out_w*l.out_h, l.output_gpu); else if (l.activation == NORM_CHAN_SOFTMAX) activate_array_normalize_channels_softmax_ongpu(l.output_gpu, l.outputs*l.batch, l.batch, l.out_c, l.out_w*l.out_h, l.output_gpu); else if (l.activation != LINEAR) activate_array_ongpu(l.output_gpu, l.outputs*l.batch, l.activation); //if(l.dot > 0) dot_error_gpu(l); if(l.binary || l.xnor) swap_binary(&l); //hipDeviceSynchronize(); // for correct profiling of performance if (state.net.try_fix_nan) { fix_nan_and_inf(l.output_gpu, l.outputs*l.batch); } if(l.assisted_excitation && state.train) assisted_excitation_forward_gpu(l, state); if (l.antialiasing) { network_state s = { 0 }; s.train = state.train; s.workspace = state.workspace; s.net = state.net; if (!state.train) s.index = state.index; // don't use TC for training (especially without cuda_convert_f32_to_f16() ) s.input = l.output_gpu; forward_convolutional_layer_gpu(*(l.input_layer), s); simple_copy_ongpu(l.outputs*l.batch, l.output_gpu, l.input_antialiasing_gpu); simple_copy_ongpu(l.input_layer->outputs*l.input_layer->batch, l.input_layer->output_gpu, l.output_gpu); } } void backward_convolutional_layer_gpu(convolutional_layer l, network_state state) { if (l.antialiasing) { network_state s = { 0 }; s.train = state.train; s.workspace = state.workspace; s.net = state.net; s.delta = l.delta_gpu; // s.delta will be returned to l.delta_gpu s.input = l.input_antialiasing_gpu; //if (!state.train) s.index = state.index; // don't use TC for training (especially without cuda_convert_f32_to_f16() ) simple_copy_ongpu(l.input_layer->outputs*l.input_layer->batch, l.delta_gpu, l.input_layer->delta_gpu); backward_convolutional_layer_gpu(*(l.input_layer), s); simple_copy_ongpu(l.outputs*l.batch, l.input_antialiasing_gpu, l.output_gpu); } if(state.net.try_fix_nan) constrain_ongpu(l.outputs*l.batch, 1, l.delta_gpu, 1); if (l.activation == SWISH) gradient_array_swish_ongpu(l.output_gpu, l.outputs*l.batch, l.activation_input_gpu, l.delta_gpu); else if (l.activation == MISH) gradient_array_mish_ongpu(l.outputs*l.batch, l.activation_input_gpu, l.delta_gpu); else if (l.activation == NORM_CHAN_SOFTMAX) gradient_array_normalize_channels_softmax_ongpu(l.output_gpu, l.outputs*l.batch, l.batch, l.out_c, l.out_w*l.out_h, l.delta_gpu); else gradient_array_ongpu(l.output_gpu, l.outputs*l.batch, l.activation, l.delta_gpu); if (!l.batch_normalize) backward_bias_gpu(l.bias_updates_gpu, l.delta_gpu, l.batch, l.n, l.out_w*l.out_h); //#ifndef CUDNN_HALF //if(l.batch_normalize){ // backward_batchnorm_layer_gpu(l, state); //} else { // //backward_bias_gpu(l.bias_updates_gpu, l.delta_gpu, l.batch, l.n, l.out_w*l.out_h); //} //#endif // no CUDNN_HALF float *original_input = state.input; if(l.xnor) state.input = l.binary_input_gpu; #ifdef CUDNN float one = 1.f; float alpha = 1, beta = 0; //#ifdef CUDNN_HALF int iteration_num = (*state.net.seen) / (state.net.batch*state.net.subdivisions); if (state.index != 0 && state.net.cudnn_half && !l.xnor && (!state.train || iteration_num > 3*state.net.burn_in) && (l.c / l.groups) % 8 == 0 && l.n % 8 == 0 && !state.train) { const size_t input16_size = l.batch*l.c*l.w*l.h; const size_t delta16_size = l.batch*l.n*l.out_w*l.out_h; if (*state.net.max_input16_size < input16_size) { *state.net.max_input16_size = input16_size; if (*state.net.input16_gpu) cuda_free(*state.net.input16_gpu); assert(*state.net.max_input16_size > 0); *state.net.input16_gpu = (float *)cuda_make_f16_from_f32_array(NULL, *state.net.max_input16_size); } float *input16 = *state.net.input16_gpu; if (*state.net.max_output16_size < delta16_size) { *state.net.max_output16_size = delta16_size; if (*state.net.output16_gpu) cuda_free(*state.net.output16_gpu); assert(*state.net.max_output16_size > 0); *state.net.output16_gpu = (float *)cuda_make_f16_from_f32_array(NULL, *state.net.max_output16_size); } float *delta16 = *state.net.output16_gpu; assert(input16_size > 0); assert(delta16_size > 0); cuda_convert_f32_to_f16(state.input, input16_size, input16); cuda_convert_f32_to_f16(l.delta_gpu, delta16_size, delta16); if (l.batch_normalize) { //if (!state.train) { // l.mean_gpu = l.rolling_mean_gpu; // l.variance_gpu = l.rolling_variance_gpu; //} float one = 1.0f; float zero = 0.0f; CHECK_CUDNN(cudnnBatchNormalizationBackward(cudnn_handle(), CUDNN_BATCHNORM_SPATIAL, &one, &zero, &one, &one, l.normDstTensorDescF16, l.x_gpu, // input (input in BN-forward-inference) l.normDstTensorDescF16, delta16, // input l.normDstTensorDescF16, l.x_norm_gpu, // output (new delta) l.normTensorDesc, l.scales_gpu, // input (should be FP32) l.scale_updates_gpu, // output (should be FP32) l.bias_updates_gpu, // output (should be FP32) .00001, l.mean_gpu, // input (should be FP32) l.variance_gpu)); // input (should be FP32) simple_copy_ongpu(l.outputs*l.batch / 2, l.x_norm_gpu, delta16); //copy_ongpu(l.outputs*l.batch / 2, l.x_norm_gpu, 1, delta16, 1); //hipMemcpyAsync(delta16, l.x_norm_gpu, l.outputs*l.batch * sizeof(half), hipMemcpyDefault, get_cuda_stream()); } else { //backward_bias_gpu(l.bias_updates_gpu, l.delta_gpu, l.batch, l.n, l.out_w*l.out_h); } // convert input: state.input (x), l.delta_gpu (y) from fp32 to fp16 // get output: l.weight_updates_gpu (dw) and convert it to fp32 (ONLY if it is fp16) // calculate conv weight updates // Already: l.weight_updates_gpu = (l.weight_updates_gpu - l.weight*decay*batch*subdivision)*momentum // so we should copy f32 to f16, or compute: f16=(w_up - w*d*b*s)*m assert((l.nweights) > 0); cuda_convert_f32_to_f16(l.weight_updates_gpu, l.nweights, l.weight_updates_gpu16); CHECK_CUDNN(cudnnConvolutionBackwardFilter(cudnn_handle(), &one, l.srcTensorDesc16, input16, //state.input, l.ddstTensorDesc16, delta16, //l.delta_gpu, l.convDesc, l.bf_algo16, state.workspace, l.workspace_size, &one, l.dweightDesc16, l.weight_updates_gpu16)); // l.weight_updates_gpu); cuda_convert_f16_to_f32(l.weight_updates_gpu16, l.nweights, l.weight_updates_gpu); if (state.delta) { if (l.binary || l.xnor) swap_binary(&l); // http://docs.nvidia.com/deeplearning/sdk/cudnn-developer-guide/index.html#cudnnConvolutionBackwardData // calculate delta for the next layer // convert input: l.weights_gpu (w), l.delta_gpu (dy) from fp32 to fp16 // get output: state.delta (dx) and convert it to fp32 (ONLY if it is fp16) CHECK_CUDNN(cudnnConvolutionBackwardData(cudnn_handle(), &alpha, l.weightDesc16, l.weights_gpu16, //l.weights_gpu, l.ddstTensorDesc16, delta16, //l.delta_gpu, l.convDesc, l.bd_algo16, state.workspace, l.workspace_size, &beta, l.dsrcTensorDesc16, input16)); // state.delta); cuda_convert_f16_to_f32(input16, input16_size, state.delta); if (l.binary || l.xnor) swap_binary(&l); if (l.xnor) gradient_array_ongpu(original_input, l.batch*l.c*l.h*l.w, HARDTAN, state.delta); } } else { //#else // CUDNN_HALF if(l.batch_normalize){ backward_batchnorm_layer_gpu(l, state); } // calculate conv weight updates // if used: beta=1 then loss decreases faster CHECK_CUDNN(cudnnConvolutionBackwardFilter(cudnn_handle(), &one, l.srcTensorDesc, state.input, l.ddstTensorDesc, l.delta_gpu, l.convDesc, l.bf_algo, state.workspace, l.workspace_size, &one, l.dweightDesc, l.weight_updates_gpu)); if (state.delta) { if (l.binary || l.xnor) swap_binary(&l); // http://docs.nvidia.com/deeplearning/sdk/cudnn-developer-guide/index.html#cudnnConvolutionBackwardData // calculate delta for the next layer CHECK_CUDNN(cudnnConvolutionBackwardData(cudnn_handle(), &one, l.weightDesc, l.weights_gpu, l.ddstTensorDesc, l.delta_gpu, l.convDesc, l.bd_algo, state.workspace, l.workspace_size, &one, l.dsrcTensorDesc, state.delta)); if (l.binary || l.xnor) swap_binary(&l); if (l.xnor) gradient_array_ongpu(original_input, l.batch*l.c*l.h*l.w, HARDTAN, state.delta); } } //#endif // CUDNN_HALF #else // CUDNN if (l.batch_normalize) { backward_batchnorm_layer_gpu(l, state); } int m = l.n / l.groups; int n = l.size*l.size*l.c / l.groups; int k = l.out_w*l.out_h; int i, j; for(i = 0; i < l.batch; ++i){ for (j = 0; j < l.groups; ++j) { float * a = l.delta_gpu + (i*l.groups + j)*m*k; float * b = state.workspace; float * c = l.weight_updates_gpu + j*l.nweights / l.groups; float *im = state.input + (i*l.groups + j)*l.c / l.groups*l.h*l.w; //im2col_ongpu(im, l.c / l.groups, l.h, l.w, l.size, l.stride, l.pad, state.workspace); im2col_gpu_ext(im, // input l.c / l.groups, // input channels l.h, l.w, // input size (h, w) l.size, l.size, // kernel size (h, w) l.pad, l.pad, // padding (h, w) l.stride_y, l.stride_x, // stride (h, w) l.dilation, l.dilation, // dilation (h, w) state.workspace); // output //gemm_ongpu(0, 1, m, n, k, 1, a + i*m*k, k, b, k, 1, c, n); gemm_ongpu(0, 1, m, n, k, 1, a, k, b, k, 1, c, n); if (state.delta) { if (l.binary || l.xnor) swap_binary(&l); float * a = l.weights_gpu + j*l.nweights / l.groups; float * b = l.delta_gpu + (i*l.groups + j)*m*k; float * c = state.workspace; //gemm_ongpu(1, 0, n, k, m, 1, a, n, b + i*k*m, k, 0, c, k); gemm_ongpu(1, 0, n, k, m, 1, a, n, b, k, 0, c, k); float *delta = state.delta + (i*l.groups + j)*l.c / l.groups*l.h*l.w; //col2im_ongpu(state.workspace, l.c / l.groups, l.h, l.w, l.size, l.stride, l.pad, delta); col2im_gpu_ext( state.workspace, // input l.c / l.groups, // input channels l.h, l.w, // input size (h, w) l.size, l.size, // kernel size (h, w) l.pad, l.pad, // padding size (h, w) l.stride_y, l.stride_x, // stride size (h, w) l.dilation, l.dilation, // dilation size (h, w) delta); // output (delta) if (l.binary || l.xnor) { swap_binary(&l); } if (l.xnor) gradient_array_ongpu(original_input + i*l.c*l.h*l.w, l.c*l.h*l.w, HARDTAN, state.delta + i*l.c*l.h*l.w); } } } #endif if (state.net.try_fix_nan) { if (state.delta) { fix_nan_and_inf(state.delta, l.inputs * l.batch); } int size = l.nweights; fix_nan_and_inf(l.weight_updates_gpu, size); fix_nan_and_inf(l.weights_gpu, size); } } __global__ void calc_avg_activation_kernel(float *src, float *dst, int size, int channels, int batches) { int i = blockIdx.x * blockDim.x + threadIdx.x; int xy = i % size; int b = i / size; if (i < size*batches) { dst[i] = 0; for (int c = 0; c < channels; ++c) { dst[i] += src[xy + size*(c + channels*b)]; } dst[i] = dst[i] / channels; } } void calc_avg_activation_gpu(float *src, float *dst, int size, int channels, int batches) { const int num_blocks = get_number_of_blocks(size*batches, BLOCK); calc_avg_activation_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> > (src, dst, size, channels, batches); } __global__ void assisted_activation_kernel(float alpha, float *output, float *gt_gpu, float *a_avg_gpu, int size, int channels, int batches) { int i = blockIdx.x * blockDim.x + threadIdx.x; int xy = i % size; int b = i / size; if (b < batches) { for (int c = 0; c < channels; ++c) { output[xy + size*(c + channels*b)] += alpha * gt_gpu[i] * a_avg_gpu[i]; //output[xy + size*(c + channels*b)] += gt_gpu[i] * a_avg_gpu[i]; //output[xy + size*(c + channels*b)] += gt_gpu[i] * output[xy + size*(c + channels*b)]; //output[xy + size*(c + channels*b)] = a_avg_gpu[i]; } } } void assisted_activation_gpu(float alpha, float *output, float *gt_gpu, float *a_avg_gpu, int size, int channels, int batches) { const int num_blocks = get_number_of_blocks(size*batches, BLOCK); assisted_activation_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> > (alpha, output, gt_gpu, a_avg_gpu, size, channels, batches); } __global__ void assisted_activation2_kernel(float alpha, float *output, float *gt_gpu, float *a_avg_gpu, int size, int channels, int batches) { int i = blockIdx.x * blockDim.x + threadIdx.x; int xy = i % size; int b = i / size; float beta = 1 - alpha; if (b < batches) { for (int c = 0; c < channels; ++c) { if(gt_gpu[i] == 0) output[xy + size*(c + channels*b)] *= beta; } } } void assisted_activation2_gpu(float alpha, float *output, float *gt_gpu, float *a_avg_gpu, int size, int channels, int batches) { const int num_blocks = get_number_of_blocks(size*batches, BLOCK); assisted_activation2_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> > (alpha, output, gt_gpu, a_avg_gpu, size, channels, batches); } void assisted_excitation_forward_gpu(convolutional_layer l, network_state state) { const int iteration_num = (*state.net.seen) / (state.net.batch*state.net.subdivisions); // epoch //const float epoch = (float)(*state.net.seen) / state.net.train_images_num; // calculate alpha //const float alpha = (1 + cos(3.141592 * iteration_num)) / (2 * state.net.max_batches); //const float alpha = (1 + cos(3.141592 * epoch)) / (2 * state.net.max_batches); float alpha = (1 + cos(3.141592 * iteration_num / state.net.max_batches)) / 2; //float alpha = (1 + cos(3.141592 * iteration_num / state.net.max_batches)); if (l.assisted_excitation == 1) { if (iteration_num > state.net.max_batches / 2) return; } else { if (iteration_num < state.net.burn_in) return; else if (iteration_num > l.assisted_excitation) return; else alpha = (1 + cos(3.141592 * iteration_num / (state.net.burn_in + l.assisted_excitation))) / 2; // from 1 to 0 } //printf("\n epoch = %f, alpha = %f, seen = %d, max_batches = %d, train_images_num = %d \n", // epoch, alpha, (*state.net.seen), state.net.max_batches, state.net.train_images_num); //const int size = l.outputs * l.batch; float *a_avg = (float *)calloc(l.out_w * l.out_h * l.batch, sizeof(float)); float *gt = (float *)calloc(l.out_w * l.out_h * l.batch, sizeof(float)); int b; int w, h; l.max_boxes = state.net.num_boxes; l.truths = l.max_boxes*(4 + 1); int num_truth = l.batch*l.truths; float *truth_cpu = (float *)calloc(num_truth, sizeof(float)); cuda_pull_array(state.truth, truth_cpu, num_truth); //hipStreamSynchronize(get_cuda_stream()); //CHECK_CUDA(hipPeekAtLastError()); for (b = 0; b < l.batch; ++b) { // calculate G int t; for (t = 0; t < state.net.num_boxes; ++t) { box truth = float_to_box_stride(truth_cpu + t*(4 + 1) + b*l.truths, 1); if (!truth.x) break; // continue; float beta = 0; //float beta = 1 - alpha; // from 0 to 1 float dw = (1 - truth.w) * beta; float dh = (1 - truth.h) * beta; //printf(" alpha = %f, beta = %f, truth.w = %f, dw = %f, tw+dw = %f, l.out_w = %d \n", alpha, beta, truth.w, dw, truth.w+dw, l.out_w); int left = floor((truth.x - (dw + truth.w) / 2) * l.out_w); int right = ceil((truth.x + (dw + truth.w) / 2) * l.out_w); int top = floor((truth.y - (dh + truth.h) / 2) * l.out_h); int bottom = ceil((truth.y + (dh + truth.h) / 2) * l.out_h); if (left < 0) left = 0; if (top < 0) top = 0; if (right > l.out_w) right = l.out_w; if (bottom > l.out_h) bottom = l.out_h; for (w = left; w <= right; w++) { for (h = top; h < bottom; h++) { gt[w + l.out_w * h + l.out_w*l.out_h*b] = 1; } } } } cuda_push_array(l.gt_gpu, gt, l.out_w * l.out_h * l.batch); //hipStreamSynchronize(get_cuda_stream()); //CHECK_CUDA(hipPeekAtLastError()); // calc avg_output on GPU - for whole batch calc_avg_activation_gpu(l.output_gpu, l.a_avg_gpu, l.out_w * l.out_h, l.out_c, l.batch); //hipStreamSynchronize(get_cuda_stream()); //CHECK_CUDA(hipPeekAtLastError()); // calc new output //assisted_activation2_gpu(1, l.output_gpu, l.gt_gpu, l.a_avg_gpu, l.out_w * l.out_h, l.out_c, l.batch); // AE3: gt increases (beta = 1 - alpha = 0) //assisted_activation2_gpu(alpha, l.output_gpu, l.gt_gpu, l.a_avg_gpu, l.out_w * l.out_h, l.out_c, l.batch); assisted_activation_gpu(alpha, l.output_gpu, l.gt_gpu, l.a_avg_gpu, l.out_w * l.out_h, l.out_c, l.batch); //hipStreamSynchronize(get_cuda_stream()); //CHECK_CUDA(hipPeekAtLastError()); /* for (b = 0; b < l.batch; ++b) { // calculate average A for (w = 0; w < l.out_w; w++) { for (h = 0; h < l.out_h; h++) { for (c = 0; c < l.out_c; c++) { a_avg[w + l.out_w*(h + l.out_h*b)] += l.output[w + l.out_w*(h + l.out_h*(c + l.out_c*b))]; } a_avg[w + l.out_w*(h + l.out_h*b)] /= l.out_c; // a_avg / d } } } // change activation for (b = 0; b < l.batch; ++b) { for (w = 0; w < l.out_w; w++) { for (h = 0; h < l.out_h; h++) { for (c = 0; c < l.out_c; c++) { // a = a + alpha(t) + e(c,i,j) = a + alpha(t) + g(i,j) * avg_a(i,j) / channels l.output[w + l.out_w*(h + l.out_h*(c + l.out_c*b))] += alpha * g[w + l.out_w*(h + l.out_h*b)] * a_avg[w + l.out_w*(h + l.out_h*b)]; //l.output[w + l.out_w*(h + l.out_h*(c + l.out_c*b))] = // alpha * g[w + l.out_w*(h + l.out_h*b)] * a_avg[w + l.out_w*(h + l.out_h*b)]; } } } } */ if (0) // visualize ground truth { #ifdef OPENCV cuda_pull_array(l.output_gpu, l.output, l.outputs * l.batch); hipStreamSynchronize(get_cuda_stream()); CHECK_CUDA(hipPeekAtLastError()); for (b = 0; b < l.batch; ++b) { printf(" Assisted Excitation alpha = %f \n", alpha); image img = float_to_image(l.out_w, l.out_h, 1, &gt[l.out_w*l.out_h*b]); char buff[100]; sprintf(buff, "a_excitation_gt_%d", b); show_image_cv(img, buff); //image img2 = float_to_image(l.out_w, l.out_h, 1, &l.output[l.out_w*l.out_h*l.out_c*b]); image img2 = float_to_image_scaled(l.out_w, l.out_h, 1, &l.output[l.out_w*l.out_h*l.out_c*b]); char buff2[100]; sprintf(buff2, "a_excitation_output_%d", b); show_image_cv(img2, buff2); /* int c = l.out_c; if (c > 4) c = 4; image img3 = float_to_image(l.out_w, l.out_h, c, &l.output[l.out_w*l.out_h*l.out_c*b]); image dc = collapse_image_layers(img3, 1); char buff3[100]; sprintf(buff3, "a_excitation_act_collapsed_%d", b); show_image_cv(dc, buff3); */ wait_key_cv(5); } wait_until_press_key_cv(); #endif // OPENCV } free(truth_cpu); free(gt); free(a_avg); } void pull_convolutional_layer(convolutional_layer l) { cuda_pull_array_async(l.weights_gpu, l.weights, l.nweights); cuda_pull_array_async(l.biases_gpu, l.biases, l.n); cuda_pull_array_async(l.weight_updates_gpu, l.weight_updates, l.nweights); cuda_pull_array_async(l.bias_updates_gpu, l.bias_updates, l.n); if (l.batch_normalize){ cuda_pull_array_async(l.scales_gpu, l.scales, l.n); cuda_pull_array_async(l.rolling_mean_gpu, l.rolling_mean, l.n); cuda_pull_array_async(l.rolling_variance_gpu, l.rolling_variance, l.n); } if (l.adam){ cuda_pull_array_async(l.m_gpu, l.m, l.nweights); cuda_pull_array_async(l.v_gpu, l.v, l.nweights); } CHECK_CUDA(hipPeekAtLastError()); hipStreamSynchronize(get_cuda_stream()); } void push_convolutional_layer(convolutional_layer l) { cuda_push_array(l.weights_gpu, l.weights, l.nweights); #ifdef CUDNN_HALF assert(l.nweights > 0); cuda_convert_f32_to_f16(l.weights_gpu, l.nweights, l.weights_gpu16); #endif cuda_push_array(l.biases_gpu, l.biases, l.n); if (l.train) { cuda_push_array(l.weight_updates_gpu, l.weight_updates, l.nweights); cuda_push_array(l.bias_updates_gpu, l.bias_updates, l.n); } if (l.batch_normalize){ cuda_push_array(l.scales_gpu, l.scales, l.n); cuda_push_array(l.rolling_mean_gpu, l.rolling_mean, l.n); cuda_push_array(l.rolling_variance_gpu, l.rolling_variance, l.n); } if (l.adam){ cuda_push_array(l.m_gpu, l.m, l.nweights); cuda_push_array(l.v_gpu, l.v, l.nweights); } CHECK_CUDA(hipPeekAtLastError()); } void update_convolutional_layer_gpu(layer l, int batch, float learning_rate_init, float momentum, float decay) { float learning_rate = learning_rate_init*l.learning_rate_scale; //float momentum = a.momentum; //float decay = a.decay; //int batch = a.batch; fix_nan_and_inf(l.weight_updates_gpu, l.nweights); fix_nan_and_inf(l.weights_gpu, l.nweights); if (l.adam) { //adam_update_gpu(l.weights_gpu, l.weight_updates_gpu, l.m_gpu, l.v_gpu, a.B1, a.B2, a.eps, decay, learning_rate, l.nweights, batch, a.t); adam_update_gpu(l.weights_gpu, l.weight_updates_gpu, l.m_gpu, l.v_gpu, l.B1, l.B2, l.eps, decay, learning_rate, l.nweights, batch, l.t); adam_update_gpu(l.biases_gpu, l.bias_updates_gpu, l.bias_m_gpu, l.bias_v_gpu, l.B1, l.B2, l.eps, decay, learning_rate, l.n, batch, l.t); if (l.scales_gpu) { adam_update_gpu(l.scales_gpu, l.scale_updates_gpu, l.scale_m_gpu, l.scale_v_gpu, l.B1, l.B2, l.eps, decay, learning_rate, l.n, batch, l.t); } } else { //axpy_ongpu(l.nweights, -decay*batch, l.weights_gpu, 1, l.weight_updates_gpu, 1); //axpy_ongpu(l.nweights, learning_rate / batch, l.weight_updates_gpu, 1, l.weights_gpu, 1); //scal_ongpu(l.nweights, momentum, l.weight_updates_gpu, 1); axpy_ongpu(l.nweights, -decay*batch, l.weights_gpu, 1, l.weight_updates_gpu, 1); axpy_ongpu(l.nweights, learning_rate / batch, l.weight_updates_gpu, 1, l.weights_gpu, 1); scal_ongpu(l.nweights, momentum, l.weight_updates_gpu, 1); axpy_ongpu(l.n, learning_rate / batch, l.bias_updates_gpu, 1, l.biases_gpu, 1); scal_ongpu(l.n, momentum, l.bias_updates_gpu, 1); if (l.scales_gpu) { axpy_ongpu(l.n, learning_rate / batch, l.scale_updates_gpu, 1, l.scales_gpu, 1); scal_ongpu(l.n, momentum, l.scale_updates_gpu, 1); } } //if (l.clip) { // constrain_gpu(l.nweights, l.clip, l.weights_gpu, 1); //} } /* void update_convolutional_layer_gpu(convolutional_layer layer, int batch, float learning_rate, float momentum, float decay) { int size = layer.size*layer.size*layer.c*layer.n; axpy_ongpu(layer.n, learning_rate/batch, layer.bias_updates_gpu, 1, layer.biases_gpu, 1); scal_ongpu(layer.n, momentum, layer.bias_updates_gpu, 1); if(layer.scales_gpu){ axpy_ongpu(layer.n, learning_rate/batch, layer.scale_updates_gpu, 1, layer.scales_gpu, 1); scal_ongpu(layer.n, momentum, layer.scale_updates_gpu, 1); } if(layer.adam){ scal_ongpu(size, layer.B1, layer.m_gpu, 1); scal_ongpu(size, layer.B2, layer.v_gpu, 1); axpy_ongpu(size, -decay*batch, layer.weights_gpu, 1, layer.weight_updates_gpu, 1); axpy_ongpu(size, -(1-layer.B1), layer.weight_updates_gpu, 1, layer.m_gpu, 1); mul_ongpu(size, layer.weight_updates_gpu, 1, layer.weight_updates_gpu, 1); axpy_ongpu(size, (1-layer.B2), layer.weight_updates_gpu, 1, layer.v_gpu, 1); adam_gpu(size, layer.weights_gpu, layer.m_gpu, layer.v_gpu, layer.B1, layer.B2, learning_rate/batch, layer.eps, layer.t+1); fill_ongpu(size, 0, layer.weight_updates_gpu, 1); }else{ axpy_ongpu(size, -decay*batch, layer.weights_gpu, 1, layer.weight_updates_gpu, 1); // wu = wu - w*decay*batch axpy_ongpu(size, learning_rate/batch, layer.weight_updates_gpu, 1, layer.weights_gpu, 1); // w = w + wu*lr/batch scal_ongpu(size, momentum, layer.weight_updates_gpu, 1); // wu = wu*momentum // wu = (wu - w*decay*batch)*momentum // w = w + (wu - w*decay*batch)*lr/batch = w + wu*lr/batch - w*decay*lr = w*(1-decay*lr) + wu*lr/batch //wu_prev = (wu_old - w_old*decay*batch)*momentum //weights_update = weights_update_new + (weights_update_old - weights_old*decay*batch)*momentum - weights_new*decay*batch = // = weights_update_new + weights_update_old*momentum - weights_old*decay*batch*momentum - weights_new*decay*batch // = weights_update_new + weights_update_old*momentum - (weights_old*momentum + weights_new)*decay*batch //------------- RESULT -------------- // weights_update = weights_update_new + weights_update_old*momentum - (weights_old*momentum + weights_new)*decay*batch //----------------------------------- // weights_newest = weights_new + (weights_update_new + weights_update_old*momentum - (weights_old*momentum + weights_new)*decay*batch)*lr/batch // = weights_new + weights_update_new*lr/batch + weights_update_old*momentum*lr/batch - weights_old*momentum*decay*batch*lr/batch - weights_new*decay*batch*lr/batch // = weights_new + weights_update_new*lr/batch + weights_update_old*momentum*lr/batch - weights_old*momentum*decay*lr - weights_new*decay*lr // = weights_new*(1 - decay*lr) - weights_old*momentum*decay*lr + (weights_update_new + weights_update_old*momentum)*lr/batch //------------- RESULT -------------- // weights_newest = weights_new*(1 - decay*lr) - weights_old*momentum*(decay*lr) + (weights_update_new + weights_update_old*momentum)*lr/batch = // = weights_new - (weights_new + weights_old*momentum)*decay*lr + (weights_update_new + weights_update_old*momentum)*lr / batch //----------------------------------- } } */
74a847af817b5d03ad6bfb58dff44f5c069ed402.cu
#include <cuda_runtime.h> #include <curand.h> #include <cublas_v2.h> #include "convolutional_layer.h" #include "batchnorm_layer.h" #include "gemm.h" #include "blas.h" #include "im2col.h" #include "col2im.h" #include "utils.h" #include "dark_cuda.h" #include "box.h" __global__ void binarize_kernel(float *x, int n, float *binary) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (i >= n) return; binary[i] = (x[i] >= 0) ? 1 : -1; } void binarize_gpu(float *x, int n, float *binary) { binarize_kernel<<<cuda_gridsize(n), BLOCK, 0, get_cuda_stream() >>>(x, n, binary); CHECK_CUDA(cudaPeekAtLastError()); } __global__ void binarize_input_kernel(float *input, int n, int size, float *binary) { int s = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (s >= size) return; int i = 0; float mean = 0; for(i = 0; i < n; ++i){ mean += fabs(input[i*size + s]); } mean = mean / n; for(i = 0; i < n; ++i){ binary[i*size + s] = (input[i*size + s] > 0) ? mean : -mean; } } void binarize_input_gpu(float *input, int n, int size, float *binary) { binarize_input_kernel<<<cuda_gridsize(size), BLOCK, 0, get_cuda_stream() >>>(input, n, size, binary); CHECK_CUDA(cudaPeekAtLastError()); } __global__ void binarize_weights_kernel(float *weights, int n, int size, float *binary) { int f = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (f >= n) return; int i = 0; float mean = 0; for (i = 0; i < size; ++i) { mean += fabs(weights[f*size + i]); } mean = mean / size; for (i = 0; i < size; ++i) { binary[f*size + i] = (weights[f*size + i] > 0) ? mean : -mean; //binary[f*size + i] = weights[f*size + i]; } } void binarize_weights_gpu(float *weights, int n, int size, float *binary) { binarize_weights_kernel << <cuda_gridsize(n), BLOCK, 0, get_cuda_stream() >> >(weights, n, size, binary); CHECK_CUDA(cudaPeekAtLastError()); } __global__ void set_zero_kernel(float *src, int size) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < size) src[i] = 0; } __inline__ __device__ float warpAllReduceSum(float val) { for (int mask = WARP_SIZE / 2; mask > 0; mask /= 2) #if CUDART_VERSION >= 9000 val += __shfl_xor_sync(0xffffffff, val, mask); #else val += __shfl_xor(val, mask); #endif return val; } // only if (size % 32 == 0) __global__ void reduce_kernel(float *weights, int n, int size, float *mean_arr_gpu) { int i = blockIdx.x * blockDim.x + threadIdx.x; int f = i / size; if (f >= n) return; float warp_mean = warpAllReduceSum(fabs(weights[i])); if(i % 32 == 0) atomicAdd(&mean_arr_gpu[f], warp_mean / size); } __global__ void binarize_weights_mean_kernel(float *weights, int n, int size, float *binary, float *mean_arr_gpu) { int i = blockIdx.x * blockDim.x + threadIdx.x; int f = i / size; if (f >= n) return; float mean = mean_arr_gpu[f]; binary[i] = (weights[i] > 0) ? mean : -mean; } void fast_binarize_weights_gpu(float *weights, int n, int size, float *binary, float *mean_arr_gpu) { if (size % 32 == 0) { size_t gridsize = n * size; const int num_blocks = get_number_of_blocks(gridsize, BLOCK);// gridsize / BLOCK + 1; set_zero_kernel << <(n/BLOCK + 1), BLOCK, 0, get_cuda_stream() >> > (mean_arr_gpu, n); reduce_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> > (weights, n, size, mean_arr_gpu); binarize_weights_mean_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> > (weights, n, size, binary, mean_arr_gpu); CHECK_CUDA(cudaPeekAtLastError()); } else { binarize_weights_gpu(weights, n, size, binary); } } __global__ void cuda_f32_to_f16(float* input_f32, size_t size, half *output_f16) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < size) output_f16[idx] = __float2half(input_f32[idx]); //if (idx < size) output_f16[idx] = __float2half_rn(input_f32[idx]); // can't be compiled on Linux without casting // __float2half_ru, __float2half_rd, __float2half_rz, __float2half_rn //if (idx < size) *((unsigned short *)output_f16 + idx) = __float2half(input_f32[idx]); } void cuda_convert_f32_to_f16(float* input_f32, size_t size, float *output_f16) { cuda_f32_to_f16 <<< get_number_of_blocks(size, BLOCK), BLOCK, 0, get_cuda_stream() >>> (input_f32, size, (half *)output_f16); CHECK_CUDA(cudaPeekAtLastError()); } __global__ void cuda_f16_to_f32(half* input_f16, size_t size, float *output_f32) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < size) output_f32[idx] = __half2float(input_f16[idx]); //if (idx < size) output_f32[idx] = __half2float(*((unsigned short *)input_f16 + idx)); } void cuda_convert_f16_to_f32(float* input_f16, size_t size, float *output_f32) { cuda_f16_to_f32 <<< get_number_of_blocks(size, BLOCK), BLOCK, 0, get_cuda_stream() >>> ((half *)input_f16, size, output_f32); CHECK_CUDA(cudaPeekAtLastError()); } half *cuda_make_f16_from_f32_array(float *src, size_t n) { half *dst16; size_t size = sizeof(half)*n; CHECK_CUDA(cudaMalloc((void **)&dst16, size)); if (src) { assert(n > 0); cuda_convert_f32_to_f16(src, n, (float *)dst16); } if (!dst16) error("Cuda malloc failed\n"); return dst16; } void forward_convolutional_layer_gpu(convolutional_layer l, network_state state) { //fill_ongpu(l.outputs*l.batch, 0, l.output_gpu, 1); if(l.binary){ binarize_weights_gpu(l.weights_gpu, l.n, (l.c / l.groups)*l.size*l.size, l.binary_weights_gpu); swap_binary(&l); } if(l.xnor){ if (!l.align_bit_weights_gpu || state.train) { //binarize_weights_gpu(l.weights_gpu, l.n, (l.c / l.groups)*l.size*l.size, l.binary_weights_gpu); fast_binarize_weights_gpu(l.weights_gpu, l.n, (l.c / l.groups)*l.size*l.size, l.binary_weights_gpu, l.mean_arr_gpu); } if (l.align_bit_weights_gpu && !state.train && l.c >= 32 && l.stride_x == l.stride_y) { //return; //cudaError_t status = cudaSuccess; //int input_size = l.c*l.h*l.w*l.batch; int m = l.n / l.groups; int k = l.size*l.size*l.c / l.groups; int n = l.out_w*l.out_h; //float * a = l.weights_gpu; // int i, j; // for(i = 0; i < l.batch; ++i){ // for (j = 0; j < l.groups; ++j) { int ldb_align = l.lda_align; size_t new_ldb = k + (ldb_align - k%ldb_align); // (k / 8 + 1) * 8; //size_t t_intput_size = new_ldb * n; //size_t t_bit_input_size = t_intput_size / 8;// +1; if (l.c % 32 == 0) { //printf("\n\n l.index = %d, l.w = %d, l.c = %d, l.n = %d, l.stride = %d, l.pad = %d - new XNOR \n", l.index, l.w, l.c, l.n, l.stride, l.pad); //printf("l.align_workspace_size = %d, (l.c * l.w * l.h) = %d \n", l.align_workspace_size, (l.c * l.w * l.h)); //float *intput_cpu = (float *)calloc(l.inputs, sizeof(float)); // state.input //cudaMemcpy(intput_cpu, state.input, l.inputs * sizeof(float), cudaMemcpyDefault); int ldb_align = l.lda_align; size_t new_ldb = k + (ldb_align - k%ldb_align); // (k / 8 + 1) * 8; //size_t t_intput_size = new_ldb * l.bit_align;// n; //size_t t_bit_input_size = t_intput_size / 8;// +1; const int new_c = l.c / 32; //float *re_packed_input = (float *)calloc(l.c * l.w * l.h, sizeof(float)); //uint32_t *bin_re_packed_input = (uint32_t *)calloc(new_c * l.w * l.h + 1, sizeof(uint32_t)); // float32x4 by channel (as in cuDNN) //repack_input(intput_cpu, re_packed_input, l.w, l.h, l.c); // 32 x floats -> 1 x uint32_t //float_to_bit(re_packed_input, (uint8_t *)bin_re_packed_input, l.c * l.w * l.h); //cudaDeviceSynchronize(); //start_timer(); repack_input_gpu_bin(state.input, (uint32_t *)l.align_workspace_gpu, l.w, l.h, l.c); //repack_input_gpu(state.input, state.workspace, l.w, l.h, l.c); // 32 x floats -> 1 x uint32_t //float_to_bit_gpu(state.workspace, (unsigned char *)l.align_workspace_gpu, l.c * l.w * l.h);// l.align_workspace_size); //cudaDeviceSynchronize(); //stop_timer_and_show_name("repack_input_gpu + float_to_bit_gpu"); //free(re_packed_input); // slow - convolution the packed inputs and weights: float x 32 by channel (as in cuDNN) //convolution_repacked((uint32_t *)bin_re_packed_input, (uint32_t *)l.align_bit_weights, l.output, // l.w, l.h, l.c, l.n, l.size, l.pad, l.new_lda, l.mean_arr); // // then exit from if() //float *b = state.workspace; //float *b = (float *)calloc(100 * 1024 * 1024, sizeof(float)); //float *c = l.output; //memset(c, 0, l.outputs * sizeof(float)); //im2col_cpu_custom((float *)bin_re_packed_input, new_c, l.h, l.w, l.size, l.stride, l.pad, b); //cudaMemcpy(l.align_workspace_gpu, bin_re_packed_input, (new_c * l.w * l.h + 1) * sizeof(uint32_t), cudaMemcpyDefault); //start_timer(); im2col_ongpu(l.align_workspace_gpu, new_c, l.h, l.w, l.size, l.stride, l.pad, state.workspace); //cudaDeviceSynchronize(); //stop_timer_and_show_name("im2col_ongpu"); //free(bin_re_packed_input); int new_k = l.size*l.size*l.c / 32; // good for (l.c == 64) //gemm_nn_bin_32bit_packed(m, n, new_k, 1, // l.align_bit_weights, l.new_lda/32, // b, n, // c, n, l.mean_arr); // // then exit from if() //size_t new_ldb = k + (ldb_align - k%ldb_align); // (k / 8 + 1) * 8; //size_t t_intput_size = new_ldb * l.bit_align;// n; //size_t t_bit_input_size = t_intput_size / 8;// +1; //char *t_bit_input = (char *)calloc(t_bit_input_size, sizeof(char)); //transpose_uint32((uint32_t *)b, (uint32_t *)t_bit_input, new_k, n, n, new_ldb); //cudaMemcpy(l.transposed_align_workspace_gpu, t_bit_input, t_bit_input_size * sizeof(char), cudaMemcpyDefault); //cudaMemcpy(state.workspace, b, t_bit_input_size * sizeof(char), cudaMemcpyDefault); //printf("\n n = %d, n % 32 = %d, new_ldb = %d, new_ldb % 32 = %d \n", n, n % 32, new_ldb, new_ldb % 32); //start_timer(); transpose_uint32_gpu((uint32_t *)state.workspace, (uint32_t *)l.transposed_align_workspace_gpu, new_k, n, n, new_ldb); //cudaDeviceSynchronize(); //stop_timer_and_show_name("transpose_uint32_gpu"); //cudaDeviceSynchronize(); //stop_timer_and_show_name("repack_input_gpu_bin + im2col_ongpu + transpose_uint32_gpu_2"); //start_timer(); gemm_nn_custom_bin_mean_transposed_gpu(m, n, k, (unsigned char *)l.align_bit_weights_gpu, new_ldb, (unsigned char *)l.transposed_align_workspace_gpu, new_ldb, l.output_gpu, n, l.mean_arr_gpu, l.biases_gpu, l.activation == LEAKY, l.bin_conv_shortcut_in_gpu, l.bin_conv_shortcut_out_gpu); //cudaDeviceSynchronize(); //stop_timer_and_show_name("gemm_nn_custom_bin_mean_transposed_gpu"); // the main GEMM function //gemm_nn_custom_bin_mean_transposed(m, n, k, 1, (uint8_t *)l.align_bit_weights, new_ldb, (uint8_t *)t_bit_input, new_ldb, c, n, l.mean_arr); //add_bias(l.output, l.biases, l.batch, l.n, l.out_h*l.out_w); //cudaMemcpy(l.output_gpu, l.output, l.outputs * sizeof(float), cudaMemcpyDefault); // // alternative GEMM //gemm_nn_bin_transposed_32bit_packed(m, n, new_k, 1, // l.align_bit_weights, l.new_lda/32, // t_bit_input, new_ldb / 32, // c, n, l.mean_arr); //free(t_bit_input); //free(b); } else { //printf("\n\n l.index = %d, l.w = %d, l.c = %d, l.n = %d, l.stride = %d, l.pad = %d - old XNOR \n", l.index, l.w, l.c, l.n, l.stride, l.pad); //cudaDeviceSynchronize(); int i = 0; /* // if (l.stride == 1 && l.c >= 256 && l.size > 1) if (l.stride == 1 && l.c >= 1024 && l.size > 1 && 0)// && l.w >= 13) // disabled { // stride=1 only //start_timer(); im2col_align_bin_ongpu(state.input + i*l.c*l.h*l.w, l.c, l.h, l.w, l.size, l.stride, l.pad, state.workspace, l.bit_align); //cudaDeviceSynchronize(); //stop_timer_and_show_name("im2col_align_bin_ongpu"); } else*/ { //start_timer(); im2col_align_ongpu(state.input + i*l.c*l.h*l.w, l.c, l.h, l.w, l.size, l.stride, l.pad, l.align_workspace_gpu, l.bit_align); //cudaDeviceSynchronize(); //stop_timer_and_show_name("im2col_align_ongpu"); //getchar(); // should be optimized //start_timer(); float_to_bit_gpu(l.align_workspace_gpu, (unsigned char *)state.workspace, l.align_workspace_size); //cudaDeviceSynchronize(); //stop_timer_and_show_name("float_to_bit_gpu"); } //start_timer(); transpose_bin_gpu((unsigned char *)state.workspace, (unsigned char *)l.transposed_align_workspace_gpu, k, n, l.bit_align, new_ldb, 8); //cudaDeviceSynchronize(); //stop_timer_and_show_name("transpose_bin_gpu"); //cudaDeviceSynchronize(); //stop_timer_and_show_name("im2col_align_ongpu + float_to_bit_gpu + transpose_bin_gpu"); // should be optimized //if(0) {//if (k > 1000) { // sequentially input-shared - BAD // gemm_nn_custom_bin_mean_transposed_sequentially_gpu(m, n, k, // (unsigned char *)l.align_bit_weights_gpu, new_ldb, (unsigned char *)l.transposed_align_workspace_gpu, new_ldb, l.output_gpu, n, l.mean_arr_gpu); //} //else { // coalescing & weights-shared-memory - GOOD //start_timer(); gemm_nn_custom_bin_mean_transposed_gpu(m, n, k, (unsigned char *)l.align_bit_weights_gpu, new_ldb, (unsigned char *)l.transposed_align_workspace_gpu, new_ldb, l.output_gpu, n, l.mean_arr_gpu, l.biases_gpu, l.activation == LEAKY, l.bin_conv_shortcut_in_gpu, l.bin_conv_shortcut_out_gpu); //cudaDeviceSynchronize(); //stop_timer_and_show_name("gemm_nn_custom_bin_mean_transposed_gpu"); //} //cudaDeviceSynchronize(); //check_error(status); //getchar(); } /* { float_to_bit_gpu(state.input, (unsigned char *)l.align_workspace_gpu, input_size); convolve_bin_gpu(l.align_workspace_gpu, (float *)l.align_bit_weights_gpu, l.output_gpu, l.w, l.h, l.c, l.n, l.size, l.pad, l.new_lda, l.mean_arr_gpu); //convolve_gpu(state.input, l.weights_gpu, l.output_gpu, l.w, l.h, l.c, l.n, l.size, l.pad); //cudaDeviceSynchronize(); //check_error(status); add_bias_gpu(l.output_gpu, l.biases_gpu, l.batch, l.n, l.out_w*l.out_h); } */ //add_bias_gpu(l.output_gpu, l.biases_gpu, l.batch, l.n, l.out_w*l.out_h); if (l.activation == SWISH) activate_array_swish_ongpu(l.output_gpu, l.outputs*l.batch, l.activation_input_gpu, l.output_gpu); else if (l.activation == MISH) activate_array_mish_ongpu(l.output_gpu, l.outputs*l.batch, l.activation_input_gpu, l.output_gpu); else if (l.activation == NORM_CHAN) activate_array_normalize_channels_ongpu(l.output_gpu, l.outputs*l.batch, l.batch, l.out_c, l.out_w*l.out_h, l.output_gpu); else if (l.activation == NORM_CHAN_SOFTMAX) activate_array_normalize_channels_softmax_ongpu(l.output_gpu, l.outputs*l.batch, l.batch, l.out_c, l.out_w*l.out_h, l.output_gpu); else if (l.activation != LINEAR && l.activation != LEAKY) activate_array_ongpu(l.output_gpu, l.outputs*l.batch, l.activation); //if(l.activation != LINEAR && l.activation != LEAKY) activate_array_ongpu(l.output_gpu, l.outputs*l.batch, l.activation); //if (l.binary || l.xnor) swap_binary(&l); //cudaDeviceSynchronize(); return; } } if (l.xnor) { swap_binary(&l); binarize_gpu(state.input, l.c*l.h*l.w*l.batch, l.binary_input_gpu); state.input = l.binary_input_gpu; } //fill_ongpu(l.outputs*l.batch, 0, l.output_gpu, 1); #ifdef CUDNN //float one = 1; // alpha[0], beta[0] is float for HALF and FLOAT float alpha = 1, beta = 0; //#ifdef CUDNN_HALF //if (state.use_mixed_precision) { int iteration_num = (*state.net.seen) / (state.net.batch*state.net.subdivisions); if (state.index != 0 && state.net.cudnn_half && !l.xnor && (!state.train || iteration_num > 3*state.net.burn_in) && (l.c / l.groups) % 8 == 0 && l.n % 8 == 0 && !state.train) { //printf("\n CUDNN_HALF!!! state.index = %d \n", state.index); // Note: For improved performance it is advised to use beta[0] = 0.0. // For Tensor Core: cudnnSetConvolutionMathType() where cudnnMathType_t mathType = CUDNN_TENSOR_OP_MATH; // 1. or CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM and use CUDNN_DATA_HALF // 2. or CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED // More: http://docs.nvidia.com/deeplearning/sdk/cudnn-developer-guide/index.html#tensor_ops const size_t input16_size = l.batch*l.c*l.w*l.h; const size_t output16_size = l.batch*l.out_c*l.out_h*l.out_w; if (*state.net.max_input16_size < input16_size) { //printf("\n input16_size: cur = %zu \t max = %zu \n", input16_size, *state.net.max_input16_size); *state.net.max_input16_size = input16_size; if (*state.net.input16_gpu) cuda_free(*state.net.input16_gpu); assert(*state.net.max_input16_size > 0); *state.net.input16_gpu = (float *)cuda_make_f16_from_f32_array(NULL, *state.net.max_input16_size); } float *input16 = *state.net.input16_gpu; if (*state.net.max_output16_size < output16_size) { *state.net.max_output16_size = output16_size; if (*state.net.output16_gpu) cuda_free(*state.net.output16_gpu); assert(*state.net.max_output16_size > 0); *state.net.output16_gpu = (float *)cuda_make_f16_from_f32_array(NULL, *state.net.max_output16_size); } float *output16 = *state.net.output16_gpu; assert(input16_size > 0); cuda_convert_f32_to_f16(state.input, input16_size, input16); //fill_ongpu(output16_size / 2, 0, (float *)output16, 1); CHECK_CUDNN(cudnnConvolutionForward(cudnn_handle(), &alpha, l.srcTensorDesc16, input16, l.weightDesc16, l.weights_gpu16, l.convDesc, l.fw_algo16, state.workspace, l.workspace_size, &beta, l.dstTensorDesc16, output16)); if (l.batch_normalize) { if (state.train) // Training { simple_copy_ongpu(l.outputs*l.batch / 2, output16, l.x_gpu); //copy_ongpu(l.outputs*l.batch / 2, output16, 1, l.x_gpu, 1); //cudaMemcpyAsync(l.x_gpu, output16, l.outputs*l.batch*sizeof(half), cudaMemcpyDefault, get_cuda_stream()); float one = 1.0f; float zero = 0.0f; // Batch-normalization can still take FP16 inputs and outputs, saving half the bandwidth // compared to FP32, it's just that the statistics and value adjustment should be done in FP32. CHECK_CUDNN(cudnnBatchNormalizationForwardTraining(cudnn_handle(), CUDNN_BATCHNORM_SPATIAL, &one, &zero, l.normDstTensorDescF16, l.x_gpu, // input l.normDstTensorDescF16, output16, // output l.normTensorDesc, l.scales_gpu, // input l.biases_gpu, // input .01, l.rolling_mean_gpu, // input/output (should be FP32) l.rolling_variance_gpu, // input/output (should be FP32) .00001, l.mean_gpu, // output (should be FP32) - optional cache to speedup cudnnBatchNormalizationBackward() l.variance_gpu)); // output (should be FP32) - optional cache to speedup cudnnBatchNormalizationBackward() cuda_convert_f16_to_f32(output16, output16_size, l.output_gpu); //forward_batchnorm_layer_gpu(l, state); } else // Detection { cuda_convert_f16_to_f32(output16, output16_size, l.output_gpu); normalize_gpu(l.output_gpu, l.rolling_mean_gpu, l.rolling_variance_gpu, l.batch, l.out_c, l.out_h*l.out_w); scale_bias_gpu(l.output_gpu, l.scales_gpu, l.batch, l.out_c, l.out_h*l.out_w); add_bias_gpu(l.output_gpu, l.biases_gpu, l.batch, l.out_c, l.out_w*l.out_h); } } else // BIAS only { cuda_convert_f16_to_f32(output16, output16_size, l.output_gpu); add_bias_gpu(l.output_gpu, l.biases_gpu, l.batch, l.n, l.out_w*l.out_h); } } else { //#else /* int input_nan_inf = is_nan_or_inf(state.input, l.inputs * l.batch); printf("\n is_nan_or_inf(state.input) = %d \n", input_nan_inf); if (input_nan_inf) getchar(); int weights_nan_inf = is_nan_or_inf(l.weights_gpu, l.nweights); printf("\n is_nan_or_inf(l.weights_gpu) = %d \n", weights_nan_inf); if (weights_nan_inf) getchar(); */ CHECK_CUDNN(cudnnConvolutionForward(cudnn_handle(), &alpha, //&one, l.srcTensorDesc, state.input, l.weightDesc, l.weights_gpu, l.convDesc, l.fw_algo, state.workspace, l.workspace_size, &beta, //&one, l.dstTensorDesc, l.output_gpu)); //cudaDeviceSynchronize(); if (l.batch_normalize) { forward_batchnorm_layer_gpu(l, state); } else { add_bias_gpu(l.output_gpu, l.biases_gpu, l.batch, l.n, l.out_w*l.out_h); } //#endif // CUDNN_HALF } #else fill_ongpu(l.outputs*l.batch, 0, l.output_gpu, 1); int i, j; int m = l.n / l.groups; int k = l.size*l.size*l.c / l.groups; int n = l.out_w*l.out_h; for(i = 0; i < l.batch; ++i){ for (j = 0; j < l.groups; ++j) { //float *im = state.input + i*l.c*l.h*l.w; float *im = state.input + (i*l.groups + j)*l.c / l.groups*l.h*l.w; float *a = l.weights_gpu + j*l.nweights / l.groups; float *b = state.workspace; float *c = l.output_gpu + (i*l.groups + j)*n*m; if (l.size == 1) { b = im; } else { //im2col_ongpu(im, l.c / l.groups, l.h, l.w, l.size, l.stride, l.pad, state.workspace); im2col_gpu_ext(im, // input l.c / l.groups, // input channels l.h, l.w, // input size (h, w) l.size, l.size, // kernel size (h, w) l.pad, l.pad, // padding (h, w) l.stride_y, l.stride_x, // stride (h, w) l.dilation, l.dilation, // dilation (h, w) state.workspace); // output } //gemm_ongpu(0, 0, m, n, k, 1., a, k, b, n, 1., c + i*m*n, n); gemm_ongpu(0, 0, m, n, k, 1, a, k, b, n, 1, c, n); } } if (l.batch_normalize) { forward_batchnorm_layer_gpu(l, state); } else { add_bias_gpu(l.output_gpu, l.biases_gpu, l.batch, l.n, l.out_w*l.out_h); } #endif //#ifndef CUDNN_HALF //#endif // no CUDNN_HALF if (l.activation == SWISH) activate_array_swish_ongpu(l.output_gpu, l.outputs*l.batch, l.activation_input_gpu, l.output_gpu); else if (l.activation == MISH) activate_array_mish_ongpu(l.output_gpu, l.outputs*l.batch, l.activation_input_gpu, l.output_gpu); else if (l.activation == NORM_CHAN) activate_array_normalize_channels_ongpu(l.output_gpu, l.outputs*l.batch, l.batch, l.out_c, l.out_w*l.out_h, l.output_gpu); else if (l.activation == NORM_CHAN_SOFTMAX) activate_array_normalize_channels_softmax_ongpu(l.output_gpu, l.outputs*l.batch, l.batch, l.out_c, l.out_w*l.out_h, l.output_gpu); else if (l.activation != LINEAR) activate_array_ongpu(l.output_gpu, l.outputs*l.batch, l.activation); //if(l.dot > 0) dot_error_gpu(l); if(l.binary || l.xnor) swap_binary(&l); //cudaDeviceSynchronize(); // for correct profiling of performance if (state.net.try_fix_nan) { fix_nan_and_inf(l.output_gpu, l.outputs*l.batch); } if(l.assisted_excitation && state.train) assisted_excitation_forward_gpu(l, state); if (l.antialiasing) { network_state s = { 0 }; s.train = state.train; s.workspace = state.workspace; s.net = state.net; if (!state.train) s.index = state.index; // don't use TC for training (especially without cuda_convert_f32_to_f16() ) s.input = l.output_gpu; forward_convolutional_layer_gpu(*(l.input_layer), s); simple_copy_ongpu(l.outputs*l.batch, l.output_gpu, l.input_antialiasing_gpu); simple_copy_ongpu(l.input_layer->outputs*l.input_layer->batch, l.input_layer->output_gpu, l.output_gpu); } } void backward_convolutional_layer_gpu(convolutional_layer l, network_state state) { if (l.antialiasing) { network_state s = { 0 }; s.train = state.train; s.workspace = state.workspace; s.net = state.net; s.delta = l.delta_gpu; // s.delta will be returned to l.delta_gpu s.input = l.input_antialiasing_gpu; //if (!state.train) s.index = state.index; // don't use TC for training (especially without cuda_convert_f32_to_f16() ) simple_copy_ongpu(l.input_layer->outputs*l.input_layer->batch, l.delta_gpu, l.input_layer->delta_gpu); backward_convolutional_layer_gpu(*(l.input_layer), s); simple_copy_ongpu(l.outputs*l.batch, l.input_antialiasing_gpu, l.output_gpu); } if(state.net.try_fix_nan) constrain_ongpu(l.outputs*l.batch, 1, l.delta_gpu, 1); if (l.activation == SWISH) gradient_array_swish_ongpu(l.output_gpu, l.outputs*l.batch, l.activation_input_gpu, l.delta_gpu); else if (l.activation == MISH) gradient_array_mish_ongpu(l.outputs*l.batch, l.activation_input_gpu, l.delta_gpu); else if (l.activation == NORM_CHAN_SOFTMAX) gradient_array_normalize_channels_softmax_ongpu(l.output_gpu, l.outputs*l.batch, l.batch, l.out_c, l.out_w*l.out_h, l.delta_gpu); else gradient_array_ongpu(l.output_gpu, l.outputs*l.batch, l.activation, l.delta_gpu); if (!l.batch_normalize) backward_bias_gpu(l.bias_updates_gpu, l.delta_gpu, l.batch, l.n, l.out_w*l.out_h); //#ifndef CUDNN_HALF //if(l.batch_normalize){ // backward_batchnorm_layer_gpu(l, state); //} else { // //backward_bias_gpu(l.bias_updates_gpu, l.delta_gpu, l.batch, l.n, l.out_w*l.out_h); //} //#endif // no CUDNN_HALF float *original_input = state.input; if(l.xnor) state.input = l.binary_input_gpu; #ifdef CUDNN float one = 1.f; float alpha = 1, beta = 0; //#ifdef CUDNN_HALF int iteration_num = (*state.net.seen) / (state.net.batch*state.net.subdivisions); if (state.index != 0 && state.net.cudnn_half && !l.xnor && (!state.train || iteration_num > 3*state.net.burn_in) && (l.c / l.groups) % 8 == 0 && l.n % 8 == 0 && !state.train) { const size_t input16_size = l.batch*l.c*l.w*l.h; const size_t delta16_size = l.batch*l.n*l.out_w*l.out_h; if (*state.net.max_input16_size < input16_size) { *state.net.max_input16_size = input16_size; if (*state.net.input16_gpu) cuda_free(*state.net.input16_gpu); assert(*state.net.max_input16_size > 0); *state.net.input16_gpu = (float *)cuda_make_f16_from_f32_array(NULL, *state.net.max_input16_size); } float *input16 = *state.net.input16_gpu; if (*state.net.max_output16_size < delta16_size) { *state.net.max_output16_size = delta16_size; if (*state.net.output16_gpu) cuda_free(*state.net.output16_gpu); assert(*state.net.max_output16_size > 0); *state.net.output16_gpu = (float *)cuda_make_f16_from_f32_array(NULL, *state.net.max_output16_size); } float *delta16 = *state.net.output16_gpu; assert(input16_size > 0); assert(delta16_size > 0); cuda_convert_f32_to_f16(state.input, input16_size, input16); cuda_convert_f32_to_f16(l.delta_gpu, delta16_size, delta16); if (l.batch_normalize) { //if (!state.train) { // l.mean_gpu = l.rolling_mean_gpu; // l.variance_gpu = l.rolling_variance_gpu; //} float one = 1.0f; float zero = 0.0f; CHECK_CUDNN(cudnnBatchNormalizationBackward(cudnn_handle(), CUDNN_BATCHNORM_SPATIAL, &one, &zero, &one, &one, l.normDstTensorDescF16, l.x_gpu, // input (input in BN-forward-inference) l.normDstTensorDescF16, delta16, // input l.normDstTensorDescF16, l.x_norm_gpu, // output (new delta) l.normTensorDesc, l.scales_gpu, // input (should be FP32) l.scale_updates_gpu, // output (should be FP32) l.bias_updates_gpu, // output (should be FP32) .00001, l.mean_gpu, // input (should be FP32) l.variance_gpu)); // input (should be FP32) simple_copy_ongpu(l.outputs*l.batch / 2, l.x_norm_gpu, delta16); //copy_ongpu(l.outputs*l.batch / 2, l.x_norm_gpu, 1, delta16, 1); //cudaMemcpyAsync(delta16, l.x_norm_gpu, l.outputs*l.batch * sizeof(half), cudaMemcpyDefault, get_cuda_stream()); } else { //backward_bias_gpu(l.bias_updates_gpu, l.delta_gpu, l.batch, l.n, l.out_w*l.out_h); } // convert input: state.input (x), l.delta_gpu (y) from fp32 to fp16 // get output: l.weight_updates_gpu (dw) and convert it to fp32 (ONLY if it is fp16) // calculate conv weight updates // Already: l.weight_updates_gpu = (l.weight_updates_gpu - l.weight*decay*batch*subdivision)*momentum // so we should copy f32 to f16, or compute: f16=(w_up - w*d*b*s)*m assert((l.nweights) > 0); cuda_convert_f32_to_f16(l.weight_updates_gpu, l.nweights, l.weight_updates_gpu16); CHECK_CUDNN(cudnnConvolutionBackwardFilter(cudnn_handle(), &one, l.srcTensorDesc16, input16, //state.input, l.ddstTensorDesc16, delta16, //l.delta_gpu, l.convDesc, l.bf_algo16, state.workspace, l.workspace_size, &one, l.dweightDesc16, l.weight_updates_gpu16)); // l.weight_updates_gpu); cuda_convert_f16_to_f32(l.weight_updates_gpu16, l.nweights, l.weight_updates_gpu); if (state.delta) { if (l.binary || l.xnor) swap_binary(&l); // http://docs.nvidia.com/deeplearning/sdk/cudnn-developer-guide/index.html#cudnnConvolutionBackwardData // calculate delta for the next layer // convert input: l.weights_gpu (w), l.delta_gpu (dy) from fp32 to fp16 // get output: state.delta (dx) and convert it to fp32 (ONLY if it is fp16) CHECK_CUDNN(cudnnConvolutionBackwardData(cudnn_handle(), &alpha, l.weightDesc16, l.weights_gpu16, //l.weights_gpu, l.ddstTensorDesc16, delta16, //l.delta_gpu, l.convDesc, l.bd_algo16, state.workspace, l.workspace_size, &beta, l.dsrcTensorDesc16, input16)); // state.delta); cuda_convert_f16_to_f32(input16, input16_size, state.delta); if (l.binary || l.xnor) swap_binary(&l); if (l.xnor) gradient_array_ongpu(original_input, l.batch*l.c*l.h*l.w, HARDTAN, state.delta); } } else { //#else // CUDNN_HALF if(l.batch_normalize){ backward_batchnorm_layer_gpu(l, state); } // calculate conv weight updates // if used: beta=1 then loss decreases faster CHECK_CUDNN(cudnnConvolutionBackwardFilter(cudnn_handle(), &one, l.srcTensorDesc, state.input, l.ddstTensorDesc, l.delta_gpu, l.convDesc, l.bf_algo, state.workspace, l.workspace_size, &one, l.dweightDesc, l.weight_updates_gpu)); if (state.delta) { if (l.binary || l.xnor) swap_binary(&l); // http://docs.nvidia.com/deeplearning/sdk/cudnn-developer-guide/index.html#cudnnConvolutionBackwardData // calculate delta for the next layer CHECK_CUDNN(cudnnConvolutionBackwardData(cudnn_handle(), &one, l.weightDesc, l.weights_gpu, l.ddstTensorDesc, l.delta_gpu, l.convDesc, l.bd_algo, state.workspace, l.workspace_size, &one, l.dsrcTensorDesc, state.delta)); if (l.binary || l.xnor) swap_binary(&l); if (l.xnor) gradient_array_ongpu(original_input, l.batch*l.c*l.h*l.w, HARDTAN, state.delta); } } //#endif // CUDNN_HALF #else // CUDNN if (l.batch_normalize) { backward_batchnorm_layer_gpu(l, state); } int m = l.n / l.groups; int n = l.size*l.size*l.c / l.groups; int k = l.out_w*l.out_h; int i, j; for(i = 0; i < l.batch; ++i){ for (j = 0; j < l.groups; ++j) { float * a = l.delta_gpu + (i*l.groups + j)*m*k; float * b = state.workspace; float * c = l.weight_updates_gpu + j*l.nweights / l.groups; float *im = state.input + (i*l.groups + j)*l.c / l.groups*l.h*l.w; //im2col_ongpu(im, l.c / l.groups, l.h, l.w, l.size, l.stride, l.pad, state.workspace); im2col_gpu_ext(im, // input l.c / l.groups, // input channels l.h, l.w, // input size (h, w) l.size, l.size, // kernel size (h, w) l.pad, l.pad, // padding (h, w) l.stride_y, l.stride_x, // stride (h, w) l.dilation, l.dilation, // dilation (h, w) state.workspace); // output //gemm_ongpu(0, 1, m, n, k, 1, a + i*m*k, k, b, k, 1, c, n); gemm_ongpu(0, 1, m, n, k, 1, a, k, b, k, 1, c, n); if (state.delta) { if (l.binary || l.xnor) swap_binary(&l); float * a = l.weights_gpu + j*l.nweights / l.groups; float * b = l.delta_gpu + (i*l.groups + j)*m*k; float * c = state.workspace; //gemm_ongpu(1, 0, n, k, m, 1, a, n, b + i*k*m, k, 0, c, k); gemm_ongpu(1, 0, n, k, m, 1, a, n, b, k, 0, c, k); float *delta = state.delta + (i*l.groups + j)*l.c / l.groups*l.h*l.w; //col2im_ongpu(state.workspace, l.c / l.groups, l.h, l.w, l.size, l.stride, l.pad, delta); col2im_gpu_ext( state.workspace, // input l.c / l.groups, // input channels l.h, l.w, // input size (h, w) l.size, l.size, // kernel size (h, w) l.pad, l.pad, // padding size (h, w) l.stride_y, l.stride_x, // stride size (h, w) l.dilation, l.dilation, // dilation size (h, w) delta); // output (delta) if (l.binary || l.xnor) { swap_binary(&l); } if (l.xnor) gradient_array_ongpu(original_input + i*l.c*l.h*l.w, l.c*l.h*l.w, HARDTAN, state.delta + i*l.c*l.h*l.w); } } } #endif if (state.net.try_fix_nan) { if (state.delta) { fix_nan_and_inf(state.delta, l.inputs * l.batch); } int size = l.nweights; fix_nan_and_inf(l.weight_updates_gpu, size); fix_nan_and_inf(l.weights_gpu, size); } } __global__ void calc_avg_activation_kernel(float *src, float *dst, int size, int channels, int batches) { int i = blockIdx.x * blockDim.x + threadIdx.x; int xy = i % size; int b = i / size; if (i < size*batches) { dst[i] = 0; for (int c = 0; c < channels; ++c) { dst[i] += src[xy + size*(c + channels*b)]; } dst[i] = dst[i] / channels; } } void calc_avg_activation_gpu(float *src, float *dst, int size, int channels, int batches) { const int num_blocks = get_number_of_blocks(size*batches, BLOCK); calc_avg_activation_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> > (src, dst, size, channels, batches); } __global__ void assisted_activation_kernel(float alpha, float *output, float *gt_gpu, float *a_avg_gpu, int size, int channels, int batches) { int i = blockIdx.x * blockDim.x + threadIdx.x; int xy = i % size; int b = i / size; if (b < batches) { for (int c = 0; c < channels; ++c) { output[xy + size*(c + channels*b)] += alpha * gt_gpu[i] * a_avg_gpu[i]; //output[xy + size*(c + channels*b)] += gt_gpu[i] * a_avg_gpu[i]; //output[xy + size*(c + channels*b)] += gt_gpu[i] * output[xy + size*(c + channels*b)]; //output[xy + size*(c + channels*b)] = a_avg_gpu[i]; } } } void assisted_activation_gpu(float alpha, float *output, float *gt_gpu, float *a_avg_gpu, int size, int channels, int batches) { const int num_blocks = get_number_of_blocks(size*batches, BLOCK); assisted_activation_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> > (alpha, output, gt_gpu, a_avg_gpu, size, channels, batches); } __global__ void assisted_activation2_kernel(float alpha, float *output, float *gt_gpu, float *a_avg_gpu, int size, int channels, int batches) { int i = blockIdx.x * blockDim.x + threadIdx.x; int xy = i % size; int b = i / size; float beta = 1 - alpha; if (b < batches) { for (int c = 0; c < channels; ++c) { if(gt_gpu[i] == 0) output[xy + size*(c + channels*b)] *= beta; } } } void assisted_activation2_gpu(float alpha, float *output, float *gt_gpu, float *a_avg_gpu, int size, int channels, int batches) { const int num_blocks = get_number_of_blocks(size*batches, BLOCK); assisted_activation2_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> > (alpha, output, gt_gpu, a_avg_gpu, size, channels, batches); } void assisted_excitation_forward_gpu(convolutional_layer l, network_state state) { const int iteration_num = (*state.net.seen) / (state.net.batch*state.net.subdivisions); // epoch //const float epoch = (float)(*state.net.seen) / state.net.train_images_num; // calculate alpha //const float alpha = (1 + cos(3.141592 * iteration_num)) / (2 * state.net.max_batches); //const float alpha = (1 + cos(3.141592 * epoch)) / (2 * state.net.max_batches); float alpha = (1 + cos(3.141592 * iteration_num / state.net.max_batches)) / 2; //float alpha = (1 + cos(3.141592 * iteration_num / state.net.max_batches)); if (l.assisted_excitation == 1) { if (iteration_num > state.net.max_batches / 2) return; } else { if (iteration_num < state.net.burn_in) return; else if (iteration_num > l.assisted_excitation) return; else alpha = (1 + cos(3.141592 * iteration_num / (state.net.burn_in + l.assisted_excitation))) / 2; // from 1 to 0 } //printf("\n epoch = %f, alpha = %f, seen = %d, max_batches = %d, train_images_num = %d \n", // epoch, alpha, (*state.net.seen), state.net.max_batches, state.net.train_images_num); //const int size = l.outputs * l.batch; float *a_avg = (float *)calloc(l.out_w * l.out_h * l.batch, sizeof(float)); float *gt = (float *)calloc(l.out_w * l.out_h * l.batch, sizeof(float)); int b; int w, h; l.max_boxes = state.net.num_boxes; l.truths = l.max_boxes*(4 + 1); int num_truth = l.batch*l.truths; float *truth_cpu = (float *)calloc(num_truth, sizeof(float)); cuda_pull_array(state.truth, truth_cpu, num_truth); //cudaStreamSynchronize(get_cuda_stream()); //CHECK_CUDA(cudaPeekAtLastError()); for (b = 0; b < l.batch; ++b) { // calculate G int t; for (t = 0; t < state.net.num_boxes; ++t) { box truth = float_to_box_stride(truth_cpu + t*(4 + 1) + b*l.truths, 1); if (!truth.x) break; // continue; float beta = 0; //float beta = 1 - alpha; // from 0 to 1 float dw = (1 - truth.w) * beta; float dh = (1 - truth.h) * beta; //printf(" alpha = %f, beta = %f, truth.w = %f, dw = %f, tw+dw = %f, l.out_w = %d \n", alpha, beta, truth.w, dw, truth.w+dw, l.out_w); int left = floor((truth.x - (dw + truth.w) / 2) * l.out_w); int right = ceil((truth.x + (dw + truth.w) / 2) * l.out_w); int top = floor((truth.y - (dh + truth.h) / 2) * l.out_h); int bottom = ceil((truth.y + (dh + truth.h) / 2) * l.out_h); if (left < 0) left = 0; if (top < 0) top = 0; if (right > l.out_w) right = l.out_w; if (bottom > l.out_h) bottom = l.out_h; for (w = left; w <= right; w++) { for (h = top; h < bottom; h++) { gt[w + l.out_w * h + l.out_w*l.out_h*b] = 1; } } } } cuda_push_array(l.gt_gpu, gt, l.out_w * l.out_h * l.batch); //cudaStreamSynchronize(get_cuda_stream()); //CHECK_CUDA(cudaPeekAtLastError()); // calc avg_output on GPU - for whole batch calc_avg_activation_gpu(l.output_gpu, l.a_avg_gpu, l.out_w * l.out_h, l.out_c, l.batch); //cudaStreamSynchronize(get_cuda_stream()); //CHECK_CUDA(cudaPeekAtLastError()); // calc new output //assisted_activation2_gpu(1, l.output_gpu, l.gt_gpu, l.a_avg_gpu, l.out_w * l.out_h, l.out_c, l.batch); // AE3: gt increases (beta = 1 - alpha = 0) //assisted_activation2_gpu(alpha, l.output_gpu, l.gt_gpu, l.a_avg_gpu, l.out_w * l.out_h, l.out_c, l.batch); assisted_activation_gpu(alpha, l.output_gpu, l.gt_gpu, l.a_avg_gpu, l.out_w * l.out_h, l.out_c, l.batch); //cudaStreamSynchronize(get_cuda_stream()); //CHECK_CUDA(cudaPeekAtLastError()); /* for (b = 0; b < l.batch; ++b) { // calculate average A for (w = 0; w < l.out_w; w++) { for (h = 0; h < l.out_h; h++) { for (c = 0; c < l.out_c; c++) { a_avg[w + l.out_w*(h + l.out_h*b)] += l.output[w + l.out_w*(h + l.out_h*(c + l.out_c*b))]; } a_avg[w + l.out_w*(h + l.out_h*b)] /= l.out_c; // a_avg / d } } } // change activation for (b = 0; b < l.batch; ++b) { for (w = 0; w < l.out_w; w++) { for (h = 0; h < l.out_h; h++) { for (c = 0; c < l.out_c; c++) { // a = a + alpha(t) + e(c,i,j) = a + alpha(t) + g(i,j) * avg_a(i,j) / channels l.output[w + l.out_w*(h + l.out_h*(c + l.out_c*b))] += alpha * g[w + l.out_w*(h + l.out_h*b)] * a_avg[w + l.out_w*(h + l.out_h*b)]; //l.output[w + l.out_w*(h + l.out_h*(c + l.out_c*b))] = // alpha * g[w + l.out_w*(h + l.out_h*b)] * a_avg[w + l.out_w*(h + l.out_h*b)]; } } } } */ if (0) // visualize ground truth { #ifdef OPENCV cuda_pull_array(l.output_gpu, l.output, l.outputs * l.batch); cudaStreamSynchronize(get_cuda_stream()); CHECK_CUDA(cudaPeekAtLastError()); for (b = 0; b < l.batch; ++b) { printf(" Assisted Excitation alpha = %f \n", alpha); image img = float_to_image(l.out_w, l.out_h, 1, &gt[l.out_w*l.out_h*b]); char buff[100]; sprintf(buff, "a_excitation_gt_%d", b); show_image_cv(img, buff); //image img2 = float_to_image(l.out_w, l.out_h, 1, &l.output[l.out_w*l.out_h*l.out_c*b]); image img2 = float_to_image_scaled(l.out_w, l.out_h, 1, &l.output[l.out_w*l.out_h*l.out_c*b]); char buff2[100]; sprintf(buff2, "a_excitation_output_%d", b); show_image_cv(img2, buff2); /* int c = l.out_c; if (c > 4) c = 4; image img3 = float_to_image(l.out_w, l.out_h, c, &l.output[l.out_w*l.out_h*l.out_c*b]); image dc = collapse_image_layers(img3, 1); char buff3[100]; sprintf(buff3, "a_excitation_act_collapsed_%d", b); show_image_cv(dc, buff3); */ wait_key_cv(5); } wait_until_press_key_cv(); #endif // OPENCV } free(truth_cpu); free(gt); free(a_avg); } void pull_convolutional_layer(convolutional_layer l) { cuda_pull_array_async(l.weights_gpu, l.weights, l.nweights); cuda_pull_array_async(l.biases_gpu, l.biases, l.n); cuda_pull_array_async(l.weight_updates_gpu, l.weight_updates, l.nweights); cuda_pull_array_async(l.bias_updates_gpu, l.bias_updates, l.n); if (l.batch_normalize){ cuda_pull_array_async(l.scales_gpu, l.scales, l.n); cuda_pull_array_async(l.rolling_mean_gpu, l.rolling_mean, l.n); cuda_pull_array_async(l.rolling_variance_gpu, l.rolling_variance, l.n); } if (l.adam){ cuda_pull_array_async(l.m_gpu, l.m, l.nweights); cuda_pull_array_async(l.v_gpu, l.v, l.nweights); } CHECK_CUDA(cudaPeekAtLastError()); cudaStreamSynchronize(get_cuda_stream()); } void push_convolutional_layer(convolutional_layer l) { cuda_push_array(l.weights_gpu, l.weights, l.nweights); #ifdef CUDNN_HALF assert(l.nweights > 0); cuda_convert_f32_to_f16(l.weights_gpu, l.nweights, l.weights_gpu16); #endif cuda_push_array(l.biases_gpu, l.biases, l.n); if (l.train) { cuda_push_array(l.weight_updates_gpu, l.weight_updates, l.nweights); cuda_push_array(l.bias_updates_gpu, l.bias_updates, l.n); } if (l.batch_normalize){ cuda_push_array(l.scales_gpu, l.scales, l.n); cuda_push_array(l.rolling_mean_gpu, l.rolling_mean, l.n); cuda_push_array(l.rolling_variance_gpu, l.rolling_variance, l.n); } if (l.adam){ cuda_push_array(l.m_gpu, l.m, l.nweights); cuda_push_array(l.v_gpu, l.v, l.nweights); } CHECK_CUDA(cudaPeekAtLastError()); } void update_convolutional_layer_gpu(layer l, int batch, float learning_rate_init, float momentum, float decay) { float learning_rate = learning_rate_init*l.learning_rate_scale; //float momentum = a.momentum; //float decay = a.decay; //int batch = a.batch; fix_nan_and_inf(l.weight_updates_gpu, l.nweights); fix_nan_and_inf(l.weights_gpu, l.nweights); if (l.adam) { //adam_update_gpu(l.weights_gpu, l.weight_updates_gpu, l.m_gpu, l.v_gpu, a.B1, a.B2, a.eps, decay, learning_rate, l.nweights, batch, a.t); adam_update_gpu(l.weights_gpu, l.weight_updates_gpu, l.m_gpu, l.v_gpu, l.B1, l.B2, l.eps, decay, learning_rate, l.nweights, batch, l.t); adam_update_gpu(l.biases_gpu, l.bias_updates_gpu, l.bias_m_gpu, l.bias_v_gpu, l.B1, l.B2, l.eps, decay, learning_rate, l.n, batch, l.t); if (l.scales_gpu) { adam_update_gpu(l.scales_gpu, l.scale_updates_gpu, l.scale_m_gpu, l.scale_v_gpu, l.B1, l.B2, l.eps, decay, learning_rate, l.n, batch, l.t); } } else { //axpy_ongpu(l.nweights, -decay*batch, l.weights_gpu, 1, l.weight_updates_gpu, 1); //axpy_ongpu(l.nweights, learning_rate / batch, l.weight_updates_gpu, 1, l.weights_gpu, 1); //scal_ongpu(l.nweights, momentum, l.weight_updates_gpu, 1); axpy_ongpu(l.nweights, -decay*batch, l.weights_gpu, 1, l.weight_updates_gpu, 1); axpy_ongpu(l.nweights, learning_rate / batch, l.weight_updates_gpu, 1, l.weights_gpu, 1); scal_ongpu(l.nweights, momentum, l.weight_updates_gpu, 1); axpy_ongpu(l.n, learning_rate / batch, l.bias_updates_gpu, 1, l.biases_gpu, 1); scal_ongpu(l.n, momentum, l.bias_updates_gpu, 1); if (l.scales_gpu) { axpy_ongpu(l.n, learning_rate / batch, l.scale_updates_gpu, 1, l.scales_gpu, 1); scal_ongpu(l.n, momentum, l.scale_updates_gpu, 1); } } //if (l.clip) { // constrain_gpu(l.nweights, l.clip, l.weights_gpu, 1); //} } /* void update_convolutional_layer_gpu(convolutional_layer layer, int batch, float learning_rate, float momentum, float decay) { int size = layer.size*layer.size*layer.c*layer.n; axpy_ongpu(layer.n, learning_rate/batch, layer.bias_updates_gpu, 1, layer.biases_gpu, 1); scal_ongpu(layer.n, momentum, layer.bias_updates_gpu, 1); if(layer.scales_gpu){ axpy_ongpu(layer.n, learning_rate/batch, layer.scale_updates_gpu, 1, layer.scales_gpu, 1); scal_ongpu(layer.n, momentum, layer.scale_updates_gpu, 1); } if(layer.adam){ scal_ongpu(size, layer.B1, layer.m_gpu, 1); scal_ongpu(size, layer.B2, layer.v_gpu, 1); axpy_ongpu(size, -decay*batch, layer.weights_gpu, 1, layer.weight_updates_gpu, 1); axpy_ongpu(size, -(1-layer.B1), layer.weight_updates_gpu, 1, layer.m_gpu, 1); mul_ongpu(size, layer.weight_updates_gpu, 1, layer.weight_updates_gpu, 1); axpy_ongpu(size, (1-layer.B2), layer.weight_updates_gpu, 1, layer.v_gpu, 1); adam_gpu(size, layer.weights_gpu, layer.m_gpu, layer.v_gpu, layer.B1, layer.B2, learning_rate/batch, layer.eps, layer.t+1); fill_ongpu(size, 0, layer.weight_updates_gpu, 1); }else{ axpy_ongpu(size, -decay*batch, layer.weights_gpu, 1, layer.weight_updates_gpu, 1); // wu = wu - w*decay*batch axpy_ongpu(size, learning_rate/batch, layer.weight_updates_gpu, 1, layer.weights_gpu, 1); // w = w + wu*lr/batch scal_ongpu(size, momentum, layer.weight_updates_gpu, 1); // wu = wu*momentum // wu = (wu - w*decay*batch)*momentum // w = w + (wu - w*decay*batch)*lr/batch = w + wu*lr/batch - w*decay*lr = w*(1-decay*lr) + wu*lr/batch //wu_prev = (wu_old - w_old*decay*batch)*momentum //weights_update = weights_update_new + (weights_update_old - weights_old*decay*batch)*momentum - weights_new*decay*batch = // = weights_update_new + weights_update_old*momentum - weights_old*decay*batch*momentum - weights_new*decay*batch // = weights_update_new + weights_update_old*momentum - (weights_old*momentum + weights_new)*decay*batch //------------- RESULT -------------- // weights_update = weights_update_new + weights_update_old*momentum - (weights_old*momentum + weights_new)*decay*batch //----------------------------------- // weights_newest = weights_new + (weights_update_new + weights_update_old*momentum - (weights_old*momentum + weights_new)*decay*batch)*lr/batch // = weights_new + weights_update_new*lr/batch + weights_update_old*momentum*lr/batch - weights_old*momentum*decay*batch*lr/batch - weights_new*decay*batch*lr/batch // = weights_new + weights_update_new*lr/batch + weights_update_old*momentum*lr/batch - weights_old*momentum*decay*lr - weights_new*decay*lr // = weights_new*(1 - decay*lr) - weights_old*momentum*decay*lr + (weights_update_new + weights_update_old*momentum)*lr/batch //------------- RESULT -------------- // weights_newest = weights_new*(1 - decay*lr) - weights_old*momentum*(decay*lr) + (weights_update_new + weights_update_old*momentum)*lr/batch = // = weights_new - (weights_new + weights_old*momentum)*decay*lr + (weights_update_new + weights_update_old*momentum)*lr / batch //----------------------------------- } } */
911f28d433083bde7a52164548f21b3cd9bc6c74.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "maxpool.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *input = NULL; hipMalloc(&input, XSIZE*YSIZE); float *output = NULL; hipMalloc(&output, XSIZE*YSIZE); const int input_size = 1; const int filter_size = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( maxpool), dim3(gridBlock),dim3(threadBlock), 0, 0, input,output,input_size,filter_size); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( maxpool), dim3(gridBlock),dim3(threadBlock), 0, 0, input,output,input_size,filter_size); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( maxpool), dim3(gridBlock),dim3(threadBlock), 0, 0, input,output,input_size,filter_size); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
911f28d433083bde7a52164548f21b3cd9bc6c74.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "maxpool.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *input = NULL; cudaMalloc(&input, XSIZE*YSIZE); float *output = NULL; cudaMalloc(&output, XSIZE*YSIZE); const int input_size = 1; const int filter_size = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); maxpool<<<gridBlock,threadBlock>>>(input,output,input_size,filter_size); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { maxpool<<<gridBlock,threadBlock>>>(input,output,input_size,filter_size); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { maxpool<<<gridBlock,threadBlock>>>(input,output,input_size,filter_size); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
65fa58f16ad65bf6bca16fb975aa839e35cbd060.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> __global__ void Reduce(int* in_data, int* out_data) { extern __shared__ int shared_data[]; unsigned int tid = threadIdx.x; unsigned int index = blockIdx.x * blockDim.x + threadIdx.x; shared_data[tid] = in_data[index]; __syncthreads(); for (unsigned int s = 1; s < blockDim.x; s *= 2) { int i = 2 * s * tid; if (i < blockDim.x) { shared_data[i] += shared_data[i + s]; } __syncthreads(); } if (tid == 0) { out_data[blockIdx.x] = shared_data[0]; } } int main() { const int block_size = 1024; // __shared__ int shared_data[]; const int array_size = 1 << 20; int* h_array = new int[array_size]; for (int i = 0; i < array_size; ++i) { h_array[i] = 1; } int* d_array; hipMalloc(&d_array, sizeof(int) * array_size); hipMemcpy(d_array, h_array, sizeof(int) * array_size, hipMemcpyHostToDevice); int num_blocks = array_size / block_size; int* d_blocksum; hipMalloc(&d_blocksum, sizeof(int) * num_blocks); int* h_blocksum = new int[num_blocks]; hipEvent_t start; hipEvent_t stop; // Creating event hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start); hipLaunchKernelGGL(( Reduce), dim3(num_blocks), dim3(block_size), sizeof(int) * block_size, 0, d_array, d_blocksum); hipEventRecord(stop); hipMemcpy(h_blocksum, d_blocksum, sizeof(int) * num_blocks, hipMemcpyDeviceToHost); hipEventSynchronize(stop); float milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); std::cout << milliseconds << " elapsed" << std::endl; int sum = 0; for (int i = 0; i < num_blocks; ++i) { sum += h_blocksum[i]; } std::cout << sum << std::endl; hipFree(d_blocksum); hipFree(d_array); delete[] h_array; delete[] h_blocksum; }
65fa58f16ad65bf6bca16fb975aa839e35cbd060.cu
#include <iostream> __global__ void Reduce(int* in_data, int* out_data) { extern __shared__ int shared_data[]; unsigned int tid = threadIdx.x; unsigned int index = blockIdx.x * blockDim.x + threadIdx.x; shared_data[tid] = in_data[index]; __syncthreads(); for (unsigned int s = 1; s < blockDim.x; s *= 2) { int i = 2 * s * tid; if (i < blockDim.x) { shared_data[i] += shared_data[i + s]; } __syncthreads(); } if (tid == 0) { out_data[blockIdx.x] = shared_data[0]; } } int main() { const int block_size = 1024; // __shared__ int shared_data[]; const int array_size = 1 << 20; int* h_array = new int[array_size]; for (int i = 0; i < array_size; ++i) { h_array[i] = 1; } int* d_array; cudaMalloc(&d_array, sizeof(int) * array_size); cudaMemcpy(d_array, h_array, sizeof(int) * array_size, cudaMemcpyHostToDevice); int num_blocks = array_size / block_size; int* d_blocksum; cudaMalloc(&d_blocksum, sizeof(int) * num_blocks); int* h_blocksum = new int[num_blocks]; cudaEvent_t start; cudaEvent_t stop; // Creating event cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); Reduce<<<num_blocks, block_size, sizeof(int) * block_size>>>(d_array, d_blocksum); cudaEventRecord(stop); cudaMemcpy(h_blocksum, d_blocksum, sizeof(int) * num_blocks, cudaMemcpyDeviceToHost); cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); std::cout << milliseconds << " elapsed" << std::endl; int sum = 0; for (int i = 0; i < num_blocks; ++i) { sum += h_blocksum[i]; } std::cout << sum << std::endl; cudaFree(d_blocksum); cudaFree(d_array); delete[] h_array; delete[] h_blocksum; }
8674344e4c7c6ceb8d1787159d3e759bdf1688b5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Looooong compile time. * * Authors: Luc Grosheintz <forbugrep@zoho.com> * Date: 2015-03-17 */ #include "call_back.cuh" __global__ void foo(double x) { S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); printf("%e\n", x); } int main(int argc, char *argv[]){ hipLaunchKernelGGL(( foo), dim3(1), dim3(1), 0, 0, 0.2); return 0; }
8674344e4c7c6ceb8d1787159d3e759bdf1688b5.cu
/* Looooong compile time. * * Authors: Luc Grosheintz <forbugrep@zoho.com> * Date: 2015-03-17 */ #include "call_back.cuh" __global__ void foo(double x) { S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); S::evil(x); printf("%e\n", x); } int main(int argc, char *argv[]){ foo<<<1, 1>>>(0.2); return 0; }
2a80eac14de3e3f14c837b1ed52283309f205b8b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #define TORCH_ASSERT_ONLY_METHOD_OPERATORS #include <ATen/core/Tensor.h> #include <ATen/hip/Atomic.cuh> #include <ATen/hip/HIPContext.h> #include <ATen/hip/NumericLimits.cuh> #include <ATen/Dispatch.h> #include <ATen/NumericUtils.h> #include <ATen/TensorUtils.h> #include <ATen/Utils.h> #include <c10/util/Exception.h> #ifndef AT_PER_OPERATOR_HEADERS #include <ATen/Functions.h> #include <ATen/NativeFunctions.h> #else #include <ATen/ops/adaptive_max_pool2d_backward_native.h> #include <ATen/ops/adaptive_max_pool2d_native.h> #include <ATen/ops/empty.h> #endif #include <algorithm> #include <cfloat> #include <cmath> namespace at::native { namespace { __device__ inline int64_t start_index(int64_t a, int64_t b, int64_t c) { return (a / b) * c + ((a % b) * c) / b; } __device__ inline int64_t end_index(int64_t a, int64_t b, int64_t c) { return 1 + ((a + 1) * c - 1) / b; } // 4d tensor B x D x H x W /* * Description: * this function adaptively maxpools an input 4D tensor along dimensions 2 and 3 * 4D input, 4D output, 4D argmax x and y */ template <typename T> __global__ void adaptivemaxpool(T *input, T *output, int64_t *indices, int isizeH, int isizeW, int osizeH, int osizeW, int64_t istrideD, int64_t istrideH, int64_t istrideW) { // iterators int oh, ow; // compute offsets based on thread/block ID int o_plane = blockIdx.x; int i_plane = o_plane; int ostartW = threadIdx.x; int oendW = osizeW; const int ostepW = blockDim.x; int ostartH = blockDim.y*blockIdx.y + threadIdx.y; int oendH = osizeH; const int ostepH = blockDim.y*gridDim.y; // select input/output plane output = output + o_plane*osizeH*osizeW; input = input + i_plane*istrideD; indices = indices + o_plane*osizeH*osizeW; // For all output pixels... for(oh = ostartH; oh < oendH; oh += ostepH) { int istartH = start_index(oh, osizeH, isizeH); int iendH = end_index(oh, osizeH, isizeH); int kH = iendH - istartH; for(ow = ostartW; ow < oendW; ow += ostepW) { int istartW = start_index(ow, osizeW, isizeW); int iendW = end_index(ow, osizeW, isizeW); int kW = iendW - istartW; // Compute the mean of the input image... T *ptr_input = input + istartH*istrideH + istartW*istrideW; T *ptr_output = output + oh*osizeW + ow; int64_t *ptr_ind = indices + oh*osizeW + ow; int argmax = istartH * isizeW + istartW; T max = at::numeric_limits<T>::lower_bound(); // -Infinity int ih, iw; for(ih = 0; ih < kH; ih++) { for(iw = 0; iw < kW; iw++) { T val = ptr_input[iw*istrideW]; if ((val > max) || at::_isnan(val)) { max = val; argmax = (ih+istartH)*isizeW + iw+istartW; } } ptr_input += istrideH; // next input line } // Update output and argmax *ptr_output = max; *ptr_ind = argmax; } } } /* * Description: * this function computes the gradInput from weight and gradOutput */ template <typename T> __global__ void adaptivemaxgradinput(T *gradInput, T *gradOutput, int64_t *indices, int isizeH, int isizeW, int osizeH, int osizeW) { // iterators int oh, ow; // compute offsets based on thread/block ID int o_plane = blockIdx.x; int i_plane = o_plane; //int k = blockIdx.x % sizeD; int ostartW = threadIdx.x; int oendW = osizeW; int ostepW = blockDim.x; int ostartH = blockDim.y*blockIdx.y + threadIdx.y; int oendH = osizeH; int ostepH = blockDim.y*gridDim.y; // select input/output plane gradOutput = gradOutput + o_plane*osizeH*osizeW; gradInput = gradInput + i_plane*isizeH*isizeW; indices = indices + o_plane*osizeH*osizeW; // compute gradInput for(oh = ostartH; oh < oendH; oh += ostepH) { for(ow = ostartW; ow < oendW; ow += ostepW) { T *ptr_gradOutput = gradOutput + oh*osizeW + ow; int64_t *ptr_ind = indices + oh*osizeW + ow; T z = *ptr_gradOutput; int argmax = (*ptr_ind); gradInput[argmax] += z; } } } /* * Description: * this function computes the gradInput from weight and gradOutput * when kH != dH or kW != dW (uses atomic add) */ template <typename T> __global__ void atomicadaptivemaxgradinput( T *gradInput, T *gradOutput, int64_t *indices, int isizeH, int isizeW, int osizeH, int osizeW ) { // iterators int oh, ow; // compute offsets based on thread/block ID int o_plane = blockIdx.x; int i_plane = o_plane; int ostartW = threadIdx.x; int oendW = osizeW; int ostepW = blockDim.x; int ostartH = blockDim.y*blockIdx.y + threadIdx.y; int oendH = osizeH; int ostepH = blockDim.y*gridDim.y; // select input/output plane gradOutput = gradOutput + o_plane*osizeH*osizeW; gradInput = gradInput + i_plane*isizeH*isizeW; indices = indices + o_plane*osizeH*osizeW; // compute gradInput for(oh = ostartH; oh < oendH; oh += ostepH) { for(ow = ostartW; ow < oendW; ow += ostepW) { T *ptr_gradOutput = gradOutput + oh*osizeW + ow; int64_t *ptr_ind = indices + oh*osizeW + ow; T z = *ptr_gradOutput; int argmax = (*ptr_ind); // atomic add since different threads could update same variable gpuAtomicAddNoReturn(&(gradInput[argmax]), z); } } } } // namespace // 4d tensor B x D x H x W TORCH_IMPL_FUNC(adaptive_max_pool2d_out_cuda) (const Tensor& input, IntArrayRef output_size, const Tensor& output, const Tensor& indices) { TensorArg output_arg{output, "output", 1}; TensorArg indices_arg{indices, "indices", 2}; TensorArg input_arg{input, "input", 3}; checkAllSameGPU( __func__, {output_arg, indices_arg, input_arg}); if (input.numel() == 0) { return; } int64_t osizeH = output_size[0]; int64_t osizeW = output_size[1]; const at::Tensor output_c = output.is_contiguous() ? output : at::empty(output.sizes(), output.options()); const at::Tensor indices_c = indices.is_contiguous() ? indices : at::empty(indices.sizes(), indices.options()); if (input.ndimension() == 3) { int64_t sizeD = input.size(0); int64_t isizeH = input.size(1); int64_t isizeW = input.size(2); int64_t istrideD = input.stride(0); int64_t istrideH = input.stride(1); int64_t istrideW = input.stride(2); AT_DISPATCH_FLOATING_TYPES_AND2( kHalf, kBFloat16, input.scalar_type(), "adaptive_max_pool2d_cuda", [&] { scalar_t* input_data = input.data_ptr<scalar_t>(); scalar_t* output_data = output_c.data_ptr<scalar_t>(); int64_t* indices_data = indices_c.data_ptr<int64_t>(); // cuda blocks & threads: int blocksH = (int)(16L / sizeD); blocksH = blocksH < 1 ? 1 : blocksH; dim3 blocks(sizeD, blocksH); dim3 threads(32, 8); // run maxpool kernel hipLaunchKernelGGL(( adaptivemaxpool), dim3(blocks), dim3(threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), input_data, output_data, indices_data, isizeH, isizeW, osizeH, osizeW, istrideD, istrideH, istrideW); C10_HIP_KERNEL_LAUNCH_CHECK(); }); } else { Tensor input_ = input.contiguous(); int64_t sizeB = input_.size(0); int64_t sizeD = input_.size(1); int64_t isizeH = input_.size(2); int64_t isizeW = input_.size(3); int64_t istrideD = input_.stride(1); int64_t istrideH = input_.stride(2); int64_t istrideW = input_.stride(3); AT_DISPATCH_FLOATING_TYPES_AND2( kHalf, kBFloat16, input_.scalar_type(), "adaptive_max_pool2d_cuda", [&] { scalar_t* input_data = input_.data_ptr<scalar_t>(); scalar_t* output_data = output_c.data_ptr<scalar_t>(); int64_t* indices_data = indices_c.data_ptr<int64_t>(); // cuda blocks & threads: int blocksH = (int)(16L / sizeD); blocksH = blocksH < 1 ? 1 : blocksH; dim3 blocks(sizeB * sizeD, blocksH); dim3 threads(32, 8); // run maxpool kernel hipLaunchKernelGGL(( adaptivemaxpool), dim3(blocks), dim3(threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), input_data, output_data, indices_data, isizeH, isizeW, osizeH, osizeW, istrideD, istrideH, istrideW); C10_HIP_KERNEL_LAUNCH_CHECK(); }); } if (!output.is_contiguous()) { output.copy_(output_c); } if (!indices.is_contiguous()) { indices.copy_(indices_c); } } TORCH_IMPL_FUNC(adaptive_max_pool2d_backward_out_cuda) (const Tensor& gradOutput, const Tensor& input, const Tensor& indices, const Tensor& gradInput) { globalContext().alertNotDeterministic( "adaptive_max_pool2d_backward_cuda"); TensorArg grad_input_arg{gradInput, "gradInput", 1}; TensorArg grad_output_arg{gradOutput, "gradOutput", 2}; TensorArg input_arg{input, "input", 3}; TensorArg indices_arg{indices, "indices", 4}; checkAllSameGPU( __func__, {grad_input_arg, grad_output_arg, input_arg, indices_arg}); if (gradOutput.numel() == 0) { return; } bool atomic = true; // suboptimal, but without atomic it doesn't pass the tests const at::Tensor gradOutput_ = gradOutput.contiguous(); const at::Tensor indices_ = indices.contiguous(); const at::Tensor gradInput_c = gradInput.is_contiguous() ? gradInput : at::empty(gradInput.sizes(), gradInput.options()); if (input.ndimension() == 3) { int64_t sizeD = input.size(0); int64_t isizeH = input.size(1); int64_t isizeW = input.size(2); int64_t osizeH = gradOutput_.size(1); int64_t osizeW = gradOutput_.size(2); // bool atomic = (isizeH%osizeH != 0) || (isizeW%osizeW != 0); gradInput_c.zero_(); AT_DISPATCH_FLOATING_TYPES_AND2( kHalf, kBFloat16, input.scalar_type(), "adaptive_max_pool2d_backward_cuda", [&] { scalar_t* gradInput_data = gradInput_c.data_ptr<scalar_t>(); scalar_t* gradOutput_data = gradOutput_.data_ptr<scalar_t>(); int64_t* indices_data = indices_.data_ptr<int64_t>(); // cuda blocks & threads: int blocksH = (int)(16L / sizeD); blocksH = blocksH < 1 ? 1 : blocksH; dim3 blocks(sizeD, blocksH); dim3 threads(32, 8); if (atomic) { // run updateGradInput kernel, accumulate gradients atomically hipLaunchKernelGGL(( atomicadaptivemaxgradinput), dim3(blocks), dim3(threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), gradInput_data, gradOutput_data, indices_data, isizeH, isizeW, osizeH, osizeW); C10_HIP_KERNEL_LAUNCH_CHECK(); } else { // run updateGradInput kernel hipLaunchKernelGGL(( atomicadaptivemaxgradinput), dim3(blocks), dim3(threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), gradInput_data, gradOutput_data, indices_data, isizeH, isizeW, osizeH, osizeW); C10_HIP_KERNEL_LAUNCH_CHECK(); } }); } else { int64_t sizeB = input.size(0); int64_t sizeD = input.size(1); int64_t isizeH = input.size(2); int64_t isizeW = input.size(3); int64_t osizeH = gradOutput_.size(2); int64_t osizeW = gradOutput_.size(3); gradInput_c.zero_(); // bool atomic = (isizeH%osizeH != 0) || (isizeW%osizeW != 0); AT_DISPATCH_FLOATING_TYPES_AND2( kHalf, kBFloat16, input.scalar_type(), "adaptive_max_pool2d_backward_cuda", [&] { scalar_t* gradInput_data = gradInput_c.data_ptr<scalar_t>(); scalar_t* gradOutput_data = gradOutput_.data_ptr<scalar_t>(); int64_t* indices_data = indices_.data_ptr<int64_t>(); // cuda blocks & threads: int blocksH = (int)(16L / sizeD); blocksH = blocksH < 1 ? 1 : blocksH; dim3 blocks(sizeB * sizeD, blocksH); dim3 threads(32, 8); if (atomic) { // run updateGradInput kernel, accumulate gradients atomically hipLaunchKernelGGL(( atomicadaptivemaxgradinput), dim3(blocks), dim3(threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), gradInput_data, gradOutput_data, indices_data, isizeH, isizeW, osizeH, osizeW); C10_HIP_KERNEL_LAUNCH_CHECK(); } else { // run updateGradInput kernel, accumulate gradients atomically hipLaunchKernelGGL(( adaptivemaxgradinput), dim3(blocks), dim3(threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), gradInput_data, gradOutput_data, indices_data, isizeH, isizeW, osizeH, osizeW); C10_HIP_KERNEL_LAUNCH_CHECK(); } }); } if (!gradInput.is_contiguous()) { gradInput.copy_(gradInput_c); } } } // namespace at::native
2a80eac14de3e3f14c837b1ed52283309f205b8b.cu
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS #include <ATen/core/Tensor.h> #include <ATen/cuda/Atomic.cuh> #include <ATen/cuda/CUDAContext.h> #include <ATen/cuda/NumericLimits.cuh> #include <ATen/Dispatch.h> #include <ATen/NumericUtils.h> #include <ATen/TensorUtils.h> #include <ATen/Utils.h> #include <c10/util/Exception.h> #ifndef AT_PER_OPERATOR_HEADERS #include <ATen/Functions.h> #include <ATen/NativeFunctions.h> #else #include <ATen/ops/adaptive_max_pool2d_backward_native.h> #include <ATen/ops/adaptive_max_pool2d_native.h> #include <ATen/ops/empty.h> #endif #include <algorithm> #include <cfloat> #include <cmath> namespace at::native { namespace { __device__ inline int64_t start_index(int64_t a, int64_t b, int64_t c) { return (a / b) * c + ((a % b) * c) / b; } __device__ inline int64_t end_index(int64_t a, int64_t b, int64_t c) { return 1 + ((a + 1) * c - 1) / b; } // 4d tensor B x D x H x W /* * Description: * this function adaptively maxpools an input 4D tensor along dimensions 2 and 3 * 4D input, 4D output, 4D argmax x and y */ template <typename T> __global__ void adaptivemaxpool(T *input, T *output, int64_t *indices, int isizeH, int isizeW, int osizeH, int osizeW, int64_t istrideD, int64_t istrideH, int64_t istrideW) { // iterators int oh, ow; // compute offsets based on thread/block ID int o_plane = blockIdx.x; int i_plane = o_plane; int ostartW = threadIdx.x; int oendW = osizeW; const int ostepW = blockDim.x; int ostartH = blockDim.y*blockIdx.y + threadIdx.y; int oendH = osizeH; const int ostepH = blockDim.y*gridDim.y; // select input/output plane output = output + o_plane*osizeH*osizeW; input = input + i_plane*istrideD; indices = indices + o_plane*osizeH*osizeW; // For all output pixels... for(oh = ostartH; oh < oendH; oh += ostepH) { int istartH = start_index(oh, osizeH, isizeH); int iendH = end_index(oh, osizeH, isizeH); int kH = iendH - istartH; for(ow = ostartW; ow < oendW; ow += ostepW) { int istartW = start_index(ow, osizeW, isizeW); int iendW = end_index(ow, osizeW, isizeW); int kW = iendW - istartW; // Compute the mean of the input image... T *ptr_input = input + istartH*istrideH + istartW*istrideW; T *ptr_output = output + oh*osizeW + ow; int64_t *ptr_ind = indices + oh*osizeW + ow; int argmax = istartH * isizeW + istartW; T max = at::numeric_limits<T>::lower_bound(); // -Infinity int ih, iw; for(ih = 0; ih < kH; ih++) { for(iw = 0; iw < kW; iw++) { T val = ptr_input[iw*istrideW]; if ((val > max) || at::_isnan(val)) { max = val; argmax = (ih+istartH)*isizeW + iw+istartW; } } ptr_input += istrideH; // next input line } // Update output and argmax *ptr_output = max; *ptr_ind = argmax; } } } /* * Description: * this function computes the gradInput from weight and gradOutput */ template <typename T> __global__ void adaptivemaxgradinput(T *gradInput, T *gradOutput, int64_t *indices, int isizeH, int isizeW, int osizeH, int osizeW) { // iterators int oh, ow; // compute offsets based on thread/block ID int o_plane = blockIdx.x; int i_plane = o_plane; //int k = blockIdx.x % sizeD; int ostartW = threadIdx.x; int oendW = osizeW; int ostepW = blockDim.x; int ostartH = blockDim.y*blockIdx.y + threadIdx.y; int oendH = osizeH; int ostepH = blockDim.y*gridDim.y; // select input/output plane gradOutput = gradOutput + o_plane*osizeH*osizeW; gradInput = gradInput + i_plane*isizeH*isizeW; indices = indices + o_plane*osizeH*osizeW; // compute gradInput for(oh = ostartH; oh < oendH; oh += ostepH) { for(ow = ostartW; ow < oendW; ow += ostepW) { T *ptr_gradOutput = gradOutput + oh*osizeW + ow; int64_t *ptr_ind = indices + oh*osizeW + ow; T z = *ptr_gradOutput; int argmax = (*ptr_ind); gradInput[argmax] += z; } } } /* * Description: * this function computes the gradInput from weight and gradOutput * when kH != dH or kW != dW (uses atomic add) */ template <typename T> __global__ void atomicadaptivemaxgradinput( T *gradInput, T *gradOutput, int64_t *indices, int isizeH, int isizeW, int osizeH, int osizeW ) { // iterators int oh, ow; // compute offsets based on thread/block ID int o_plane = blockIdx.x; int i_plane = o_plane; int ostartW = threadIdx.x; int oendW = osizeW; int ostepW = blockDim.x; int ostartH = blockDim.y*blockIdx.y + threadIdx.y; int oendH = osizeH; int ostepH = blockDim.y*gridDim.y; // select input/output plane gradOutput = gradOutput + o_plane*osizeH*osizeW; gradInput = gradInput + i_plane*isizeH*isizeW; indices = indices + o_plane*osizeH*osizeW; // compute gradInput for(oh = ostartH; oh < oendH; oh += ostepH) { for(ow = ostartW; ow < oendW; ow += ostepW) { T *ptr_gradOutput = gradOutput + oh*osizeW + ow; int64_t *ptr_ind = indices + oh*osizeW + ow; T z = *ptr_gradOutput; int argmax = (*ptr_ind); // atomic add since different threads could update same variable gpuAtomicAddNoReturn(&(gradInput[argmax]), z); } } } } // namespace // 4d tensor B x D x H x W TORCH_IMPL_FUNC(adaptive_max_pool2d_out_cuda) (const Tensor& input, IntArrayRef output_size, const Tensor& output, const Tensor& indices) { TensorArg output_arg{output, "output", 1}; TensorArg indices_arg{indices, "indices", 2}; TensorArg input_arg{input, "input", 3}; checkAllSameGPU( __func__, {output_arg, indices_arg, input_arg}); if (input.numel() == 0) { return; } int64_t osizeH = output_size[0]; int64_t osizeW = output_size[1]; const at::Tensor output_c = output.is_contiguous() ? output : at::empty(output.sizes(), output.options()); const at::Tensor indices_c = indices.is_contiguous() ? indices : at::empty(indices.sizes(), indices.options()); if (input.ndimension() == 3) { int64_t sizeD = input.size(0); int64_t isizeH = input.size(1); int64_t isizeW = input.size(2); int64_t istrideD = input.stride(0); int64_t istrideH = input.stride(1); int64_t istrideW = input.stride(2); AT_DISPATCH_FLOATING_TYPES_AND2( kHalf, kBFloat16, input.scalar_type(), "adaptive_max_pool2d_cuda", [&] { scalar_t* input_data = input.data_ptr<scalar_t>(); scalar_t* output_data = output_c.data_ptr<scalar_t>(); int64_t* indices_data = indices_c.data_ptr<int64_t>(); // cuda blocks & threads: int blocksH = (int)(16L / sizeD); blocksH = blocksH < 1 ? 1 : blocksH; dim3 blocks(sizeD, blocksH); dim3 threads(32, 8); // run maxpool kernel adaptivemaxpool<<< blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>( input_data, output_data, indices_data, isizeH, isizeW, osizeH, osizeW, istrideD, istrideH, istrideW); C10_CUDA_KERNEL_LAUNCH_CHECK(); }); } else { Tensor input_ = input.contiguous(); int64_t sizeB = input_.size(0); int64_t sizeD = input_.size(1); int64_t isizeH = input_.size(2); int64_t isizeW = input_.size(3); int64_t istrideD = input_.stride(1); int64_t istrideH = input_.stride(2); int64_t istrideW = input_.stride(3); AT_DISPATCH_FLOATING_TYPES_AND2( kHalf, kBFloat16, input_.scalar_type(), "adaptive_max_pool2d_cuda", [&] { scalar_t* input_data = input_.data_ptr<scalar_t>(); scalar_t* output_data = output_c.data_ptr<scalar_t>(); int64_t* indices_data = indices_c.data_ptr<int64_t>(); // cuda blocks & threads: int blocksH = (int)(16L / sizeD); blocksH = blocksH < 1 ? 1 : blocksH; dim3 blocks(sizeB * sizeD, blocksH); dim3 threads(32, 8); // run maxpool kernel adaptivemaxpool<<< blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>( input_data, output_data, indices_data, isizeH, isizeW, osizeH, osizeW, istrideD, istrideH, istrideW); C10_CUDA_KERNEL_LAUNCH_CHECK(); }); } if (!output.is_contiguous()) { output.copy_(output_c); } if (!indices.is_contiguous()) { indices.copy_(indices_c); } } TORCH_IMPL_FUNC(adaptive_max_pool2d_backward_out_cuda) (const Tensor& gradOutput, const Tensor& input, const Tensor& indices, const Tensor& gradInput) { globalContext().alertNotDeterministic( "adaptive_max_pool2d_backward_cuda"); TensorArg grad_input_arg{gradInput, "gradInput", 1}; TensorArg grad_output_arg{gradOutput, "gradOutput", 2}; TensorArg input_arg{input, "input", 3}; TensorArg indices_arg{indices, "indices", 4}; checkAllSameGPU( __func__, {grad_input_arg, grad_output_arg, input_arg, indices_arg}); if (gradOutput.numel() == 0) { return; } bool atomic = true; // suboptimal, but without atomic it doesn't pass the tests const at::Tensor gradOutput_ = gradOutput.contiguous(); const at::Tensor indices_ = indices.contiguous(); const at::Tensor gradInput_c = gradInput.is_contiguous() ? gradInput : at::empty(gradInput.sizes(), gradInput.options()); if (input.ndimension() == 3) { int64_t sizeD = input.size(0); int64_t isizeH = input.size(1); int64_t isizeW = input.size(2); int64_t osizeH = gradOutput_.size(1); int64_t osizeW = gradOutput_.size(2); // bool atomic = (isizeH%osizeH != 0) || (isizeW%osizeW != 0); gradInput_c.zero_(); AT_DISPATCH_FLOATING_TYPES_AND2( kHalf, kBFloat16, input.scalar_type(), "adaptive_max_pool2d_backward_cuda", [&] { scalar_t* gradInput_data = gradInput_c.data_ptr<scalar_t>(); scalar_t* gradOutput_data = gradOutput_.data_ptr<scalar_t>(); int64_t* indices_data = indices_.data_ptr<int64_t>(); // cuda blocks & threads: int blocksH = (int)(16L / sizeD); blocksH = blocksH < 1 ? 1 : blocksH; dim3 blocks(sizeD, blocksH); dim3 threads(32, 8); if (atomic) { // run updateGradInput kernel, accumulate gradients atomically atomicadaptivemaxgradinput<<< blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>( gradInput_data, gradOutput_data, indices_data, isizeH, isizeW, osizeH, osizeW); C10_CUDA_KERNEL_LAUNCH_CHECK(); } else { // run updateGradInput kernel atomicadaptivemaxgradinput<<< blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>( gradInput_data, gradOutput_data, indices_data, isizeH, isizeW, osizeH, osizeW); C10_CUDA_KERNEL_LAUNCH_CHECK(); } }); } else { int64_t sizeB = input.size(0); int64_t sizeD = input.size(1); int64_t isizeH = input.size(2); int64_t isizeW = input.size(3); int64_t osizeH = gradOutput_.size(2); int64_t osizeW = gradOutput_.size(3); gradInput_c.zero_(); // bool atomic = (isizeH%osizeH != 0) || (isizeW%osizeW != 0); AT_DISPATCH_FLOATING_TYPES_AND2( kHalf, kBFloat16, input.scalar_type(), "adaptive_max_pool2d_backward_cuda", [&] { scalar_t* gradInput_data = gradInput_c.data_ptr<scalar_t>(); scalar_t* gradOutput_data = gradOutput_.data_ptr<scalar_t>(); int64_t* indices_data = indices_.data_ptr<int64_t>(); // cuda blocks & threads: int blocksH = (int)(16L / sizeD); blocksH = blocksH < 1 ? 1 : blocksH; dim3 blocks(sizeB * sizeD, blocksH); dim3 threads(32, 8); if (atomic) { // run updateGradInput kernel, accumulate gradients atomically atomicadaptivemaxgradinput<<< blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>( gradInput_data, gradOutput_data, indices_data, isizeH, isizeW, osizeH, osizeW); C10_CUDA_KERNEL_LAUNCH_CHECK(); } else { // run updateGradInput kernel, accumulate gradients atomically adaptivemaxgradinput<<< blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>( gradInput_data, gradOutput_data, indices_data, isizeH, isizeW, osizeH, osizeW); C10_CUDA_KERNEL_LAUNCH_CHECK(); } }); } if (!gradInput.is_contiguous()) { gradInput.copy_(gradInput_c); } } } // namespace at::native
68a51a55aba3bb6d71279eea1a5950ea06b2aacd.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.5.4) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date October 2020 @precisions normal z -> c d s */ #include "magmasparse_internal.h" #define PRECISION_z __global__ void magma_zparilu_csr_kernel( const magma_int_t num_rows, const magma_int_t nnz, const magma_index_t *rowidxA, const magma_index_t *colidxA, const magmaDoubleComplex * __restrict__ A, const magma_index_t *rowptrL, const magma_index_t *colidxL, magmaDoubleComplex *valL, const magma_index_t *rowptrU, const magma_index_t *colidxU, magmaDoubleComplex *valU) { int i, j; int k = blockDim.x * blockIdx.x + threadIdx.x; magmaDoubleComplex zero = MAGMA_Z_MAKE(0.0, 0.0); magmaDoubleComplex s, sp; int il, iu, jl, ju; if (k < nnz) { i = rowidxA[k]; j = colidxA[k]; #if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s)) s = __ldg(A+k); #else s = A[k]; #endif il = rowptrL[i]; iu = rowptrU[j]; while (il < rowptrL[i+1] && iu < rowptrU[j+1]) { sp = zero; jl = colidxL[il]; ju = colidxU[iu]; sp = (jl == ju) ? valL[il] * valU[iu] : sp; s = (jl == ju) ? s-sp : s; il = (jl <= ju) ? il+1 : il; iu = (jl >= ju) ? iu+1 : iu; } s += sp; // undo the last operation (it must be the last) if (i > j) // modify l entry valL[il-1] = s / valU[rowptrU[j+1]-1]; else // modify u entry valU[iu-1] = s; } } /** Purpose ------- This routine iteratively computes an incomplete LU factorization. For reference, see: E. Chow and A. Patel: "Fine-grained Parallel Incomplete LU Factorization", SIAM Journal on Scientific Computing, 37, C169-C193 (2015). This routine was used in the ISC 2015 paper: E. Chow et al.: "Asynchronous Iterative Algorithm for Computing Incomplete Factorizations on GPUs", ISC High Performance 2015, LNCS 9137, pp. 1-16, 2015. The input format of the system matrix is COO, the lower triangular factor L is stored in CSR, the upper triangular factor U is transposed, then also stored in CSR (equivalent to CSC format for the non-transposed U). Every component of L and U is handled by one thread. Arguments --------- @param[in] A magma_z_matrix input matrix A determing initial guess & processing order @param[in,out] L magma_z_matrix input/output matrix L containing the lower triangular factor @param[in,out] U magma_z_matrix input/output matrix U containing the upper triangular factor @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_zgegpuk ********************************************************************/ extern "C" magma_int_t magma_zparilu_csr( magma_z_matrix A, magma_z_matrix L, magma_z_matrix U, magma_queue_t queue) { int blocksize1 = 128; int blocksize2 = 1; int dimgrid1 = magma_ceildiv(A.nnz, blocksize1); int dimgrid2 = 1; int dimgrid3 = 1; dim3 grid(dimgrid1, dimgrid2, dimgrid3); dim3 block(blocksize1, blocksize2, 1); hipLaunchKernelGGL(( magma_zparilu_csr_kernel), dim3(grid), dim3(block), 0, queue->cuda_stream() , A.num_rows, A.nnz, A.rowidx, A.col, A.val, L.row, L.col, L.val, U.row, U.col, U.val); return MAGMA_SUCCESS; }
68a51a55aba3bb6d71279eea1a5950ea06b2aacd.cu
/* -- MAGMA (version 2.5.4) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date October 2020 @precisions normal z -> c d s */ #include "magmasparse_internal.h" #define PRECISION_z __global__ void magma_zparilu_csr_kernel( const magma_int_t num_rows, const magma_int_t nnz, const magma_index_t *rowidxA, const magma_index_t *colidxA, const magmaDoubleComplex * __restrict__ A, const magma_index_t *rowptrL, const magma_index_t *colidxL, magmaDoubleComplex *valL, const magma_index_t *rowptrU, const magma_index_t *colidxU, magmaDoubleComplex *valU) { int i, j; int k = blockDim.x * blockIdx.x + threadIdx.x; magmaDoubleComplex zero = MAGMA_Z_MAKE(0.0, 0.0); magmaDoubleComplex s, sp; int il, iu, jl, ju; if (k < nnz) { i = rowidxA[k]; j = colidxA[k]; #if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s)) s = __ldg(A+k); #else s = A[k]; #endif il = rowptrL[i]; iu = rowptrU[j]; while (il < rowptrL[i+1] && iu < rowptrU[j+1]) { sp = zero; jl = colidxL[il]; ju = colidxU[iu]; sp = (jl == ju) ? valL[il] * valU[iu] : sp; s = (jl == ju) ? s-sp : s; il = (jl <= ju) ? il+1 : il; iu = (jl >= ju) ? iu+1 : iu; } s += sp; // undo the last operation (it must be the last) if (i > j) // modify l entry valL[il-1] = s / valU[rowptrU[j+1]-1]; else // modify u entry valU[iu-1] = s; } } /** Purpose ------- This routine iteratively computes an incomplete LU factorization. For reference, see: E. Chow and A. Patel: "Fine-grained Parallel Incomplete LU Factorization", SIAM Journal on Scientific Computing, 37, C169-C193 (2015). This routine was used in the ISC 2015 paper: E. Chow et al.: "Asynchronous Iterative Algorithm for Computing Incomplete Factorizations on GPUs", ISC High Performance 2015, LNCS 9137, pp. 1-16, 2015. The input format of the system matrix is COO, the lower triangular factor L is stored in CSR, the upper triangular factor U is transposed, then also stored in CSR (equivalent to CSC format for the non-transposed U). Every component of L and U is handled by one thread. Arguments --------- @param[in] A magma_z_matrix input matrix A determing initial guess & processing order @param[in,out] L magma_z_matrix input/output matrix L containing the lower triangular factor @param[in,out] U magma_z_matrix input/output matrix U containing the upper triangular factor @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_zgegpuk ********************************************************************/ extern "C" magma_int_t magma_zparilu_csr( magma_z_matrix A, magma_z_matrix L, magma_z_matrix U, magma_queue_t queue) { int blocksize1 = 128; int blocksize2 = 1; int dimgrid1 = magma_ceildiv(A.nnz, blocksize1); int dimgrid2 = 1; int dimgrid3 = 1; dim3 grid(dimgrid1, dimgrid2, dimgrid3); dim3 block(blocksize1, blocksize2, 1); magma_zparilu_csr_kernel<<< grid, block, 0, queue->cuda_stream() >>> (A.num_rows, A.nnz, A.rowidx, A.col, A.val, L.row, L.col, L.val, U.row, U.col, U.val); return MAGMA_SUCCESS; }
99c75fe793d499264080ac0df955882d20f66a3a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*************************************************************************************************** * Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, are permitted * provided that the following conditions are met: * * Redistributions of source code must retain the above copyright notice, this list of * conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright notice, this list of * conditions and the following disclaimer in the documentation and/or other materials * provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the names of its contributors may be used * to endorse or promote products derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND * FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /* This example requires NVIDIA Ampere GPU or later. */ // Standard Library includes #include <iostream> #include <sstream> #include <vector> // CUTLASS Includes #include "cutlass/cutlass.h" #include "cutlass/functional.h" #include "cutlass/layout/matrix.h" #include "cutlass/gemm/warp/default_mma_tensor_op.h" #include "cutlass/epilogue/warp/fragment_iterator_tensor_op.h" #include "cutlass/epilogue/warp/tile_iterator_tensor_op.h" // CUTLASS Utility Includes #include "cutlass/util/host_tensor.h" #include "cutlass/util/tensor_view_io.h" #include "cutlass/util/reference/host/tensor_compare.h" #include "cutlass/util/reference/host/tensor_fill.h" #include "cutlass/util/reference/host/gemm_complex.h" /////////////////////////////////////////////////////////////////////////////////////////////////// // Define the overal warp-level problem shape int const kM = 27; int const kN = 31; int const kK = 17; /////////////////////////////////////////////////////////////////////////////////////////////////// // Define a warp-level GEMM operator. // // This template could be part of the CUTLASS Template Library or implemented internally. This // wraps the matrix multiply operation and epilogue with a GEMM-like interface that can be // instantiated in device code. namespace cutlass { namespace gemm { namespace warp { template < typename Shape, typename InstructionShape, typename ElementA, typename LayoutA, typename ElementB, typename LayoutB, typename ElementC, typename LayoutC, typename ElementScalar > class GemmTensorOp { public: using WarpShape = GemmShape< ((Shape::kM + InstructionShape::kM - 1) / InstructionShape::kM) * InstructionShape::kM, ((Shape::kN + InstructionShape::kN - 1) / InstructionShape::kN) * InstructionShape::kN, InstructionShape::kK >; using MmaWarp = typename cutlass::gemm::warp::DefaultMmaTensorOp< WarpShape, InstructionShape, double, // Data type of A elements cutlass::layout::RowMajor, // Layout of A matrix double, // Data type of B elements cutlass::layout::ColumnMajor, // Layout of B matrix double, // Data type of C elements cutlass::layout::RowMajor // Layout of C matrix >::Type; // Number of 'K groups' int const kKgroups = (Shape::kK + InstructionShape::kK - 1) / InstructionShape::kK; // Define a 'FragmentIterator' to iterate over slices of accumulators using FragmentIterator = cutlass::epilogue::warp::FragmentIteratorTensorOp< typename MmaWarp::Shape, InstructionShape, double, typename MmaWarp::Policy::Operator::FragmentC, cutlass::layout::RowMajor >; // Define an epilogue 'Tile Iteterator' to iterate over slices of elements in Shared Memory using AccumulatorTileIterator = cutlass::epilogue::warp::TileIteratorTensorOpCanonical< typename MmaWarp::Shape, InstructionShape, double, cutlass::layout::RowMajor >; using TensorRefA = typename MmaWarp::IteratorA::TensorRef; using TensorRefB = typename MmaWarp::IteratorB::TensorRef; using TensorRefC = typename AccumulatorTileIterator::TensorRef; public: CUTLASS_HOST_DEVICE GemmTensorOp() { } CUTLASS_DEVICE void operator()( ElementScalar alpha, TensorRefA ref_A, TensorRefB ref_B, ElementScalar beta, TensorRefC ref_C, TensorRefC ref_D, int lane_id) const { // Instantiate iterators pointing to slices of the A and B matrices in shared memory typename MmaWarp::IteratorA iter_A(ref_A, {Shape::kM, Shape::kK}, lane_id); typename MmaWarp::IteratorB iter_B(ref_B, {Shape::kK, Shape::kN}, lane_id); // Instantiate and clear accumulator tile holding the C matrix typename MmaWarp::FragmentC accum; accum.clear(); // Instantiate the warp-level matrix multiply operator MmaWarp mma_op; // Instantiate fragments holding the slice of the matrix held by each warp typename MmaWarp::FragmentA frag_A[2]; typename MmaWarp::FragmentB frag_B[2]; // Load fragments from shared memory iter_A.load(frag_A[0]); iter_B.load(frag_B[0]); ++iter_A; ++iter_B; // Load fragments from shared memory CUTLASS_PRAGMA_UNROLL for (int k = 0; k < kKgroups; ++k) { // Load fragments from shared memory iter_A.load(frag_A[(k + 1) % 2]); iter_B.load(frag_B[(k + 1) % 2]); ++iter_A; ++iter_B; // Compute the matrix multiply mma_op(accum, frag_A[k % 2], frag_B[k % 2], accum); } // Instantiate iterators FragmentIterator accum_frag_it(accum); AccumulatorTileIterator source_tile_it(ref_C, {Shape::kM, Shape::kN}, lane_id); AccumulatorTileIterator dest_tile_it(ref_D, {Shape::kM, Shape::kN}, lane_id); // Define function objects for linear scaling operation cutlass::multiplies<typename FragmentIterator::Fragment> mul_source; cutlass::multiply_add<typename FragmentIterator::Fragment> mul_add_accumulator; // Iterate over the epilogue components CUTLASS_PRAGMA_UNROLL for (int idx = 0; idx < FragmentIterator::kIterations; ++idx) { // Define storage for slices of the accumulators typename FragmentIterator::Fragment accum_fragment; typename FragmentIterator::Fragment source_fragment; // Select a slice of accumulators from the accumulator tile accum_frag_it.load(accum_fragment); ++accum_frag_it; // Load a corresponding slice from Shared memory source_tile_it.load(source_fragment); ++source_tile_it; // Compute linear scaling - alpha * AB + beta * C source_fragment = mul_source(beta, source_fragment); accum_fragment = mul_add_accumulator(alpha, accum_fragment, source_fragment); // Store the result to shared memory dest_tile_it.store(accum_fragment); ++dest_tile_it; } } }; } // namespace warp } // namespace gemm } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////// // Sample kernel demonstrating a collective GEMM operation by a warp on arbitrary matrices held // in Shared Memory. __global__ void kernel( double *D_gmem, double alpha, double const *A_gmem, double const *B_gmem, double beta, double const *C_gmem) { // Define several matrices in shared memory __shared__ double A[kM][kK]; __shared__ double B[kN][kK]; __shared__ double C[kM][kN]; // Copy data into SMEM if (threadIdx.x == 0) { CUTLASS_PRAGMA_NO_UNROLL for (int m = 0; m < kM; ++m) { for (int k = 0; k < kK; ++k) { A[m][k] = A_gmem[m * kK + k]; } } CUTLASS_PRAGMA_NO_UNROLL for (int n = 0; n < kN; ++n) { for (int k = 0; k < kK; ++k) { B[n][k] = B_gmem[n * kK + k]; } } CUTLASS_PRAGMA_NO_UNROLL for (int m = 0; m < kM; ++m) { CUTLASS_PRAGMA_NO_UNROLL for (int n = 0; n < kN; ++n) { C[m][n] = C_gmem[m * kN + n]; } } } __syncthreads(); // // Instantiate a warp-level matrix multiply operator given the fundamental instruction shape (8x8x4), // overall shape, data type of each operand, and layout of each operand. // using GemmTensorOp = cutlass::gemm::warp::GemmTensorOp< cutlass::gemm::GemmShape<kM, kN, kK>, cutlass::gemm::GemmShape<8, 8, 4>, double, // Data type of A elements cutlass::layout::RowMajor, // Layout of A matrix double, // Data type of B elements cutlass::layout::ColumnMajor, // Layout of B matrix double, // Data type of C elements cutlass::layout::RowMajor, // Layout of C matrix double // Scalar type of alpha and beta >; // Instantiate the GEMM operator GemmTensorOp gemm; // Execute the warp-level GEMM operation gemm( alpha, {&A[0][0], kK}, {&B[0][0], kK}, beta, {&C[0][0], kN}, {&C[0][0], kN}, threadIdx.x); __syncthreads(); // Copy data into SMEM if (threadIdx.x == 0) { CUTLASS_PRAGMA_NO_UNROLL for (int m = 0; m < kM; ++m) { CUTLASS_PRAGMA_NO_UNROLL for (int n = 0; n < kN; ++n) { D_gmem[m * kN + n] = C[m][n]; } } } } /////////////////////////////////////////////////////////////////////////////////////////////////// /// Entry point to canonical warp-level GEMM operation int main(int argc, const char *arg[]) { bool notSupported = false; // CUTLASS must be compiled with CUDA 11 Toolkit to run these examples. if (!(__CUDACC_VER_MAJOR__ >= 11)) { std::cerr << "NVIDIA Ampere Tensor Core operations must be compiled with CUDA 11.0 Toolkit or later." << std::endl; notSupported = true; } hipDeviceProp_t props; hipError_t error = hipGetDeviceProperties(&props, 0); if (error != hipSuccess) { std::cerr << "hipGetDeviceProperties() returned an error: " << hipGetErrorString(error) << std::endl; return -1; } if (!((props.major * 10 + props.minor) >= 80)) { std::cerr << "This example requires compute capability at least 80." << std::endl; notSupported = true; } if (notSupported) { // Return 0 so tests are considered passing if run on unsupported platforms. return 0; } cutlass::HostTensor<double, cutlass::layout::RowMajor> A({kM, kK}); cutlass::HostTensor<double, cutlass::layout::ColumnMajor> B({kK, kN}); cutlass::HostTensor<double, cutlass::layout::RowMajor> C({kM, kN}); cutlass::HostTensor<double, cutlass::layout::RowMajor> D({kM, kN}); uint64_t seed = 2020; double max = 8; double min = -8; cutlass::reference::host::TensorFillRandomUniform( A.host_view(), seed, max, min, 0 ); cutlass::reference::host::TensorFillRandomUniform( B.host_view(), seed + 17, max, min, 0 ); cutlass::reference::host::TensorFillRandomUniform( C.host_view(), seed + 31, max, min, 0 ); A.sync_device(); B.sync_device(); C.sync_device(); D.sync_device(); dim3 grid(1,1); dim3 block(32, 1, 1); double alpha = 2.25; double beta = 1.24; hipLaunchKernelGGL(( kernel), dim3(grid), dim3(block) , 0, 0, D.device_data(), alpha, A.device_data(), B.device_data(), beta, C.device_data() ); hipError_t result = hipDeviceSynchronize(); if (result != hipSuccess) { std::cerr << "Failed to synchronize device after kernel launch." << std::endl; return -1; } D.sync_host(); // Compute reference on host cutlass::HostTensor<double, cutlass::layout::RowMajor> D_ref({kM, kN}, false); cutlass::reference::host::GemmComplex( {kM, kN, kK}, alpha, A.host_ref(), cutlass::ComplexTransform::kNone, B.host_ref(), cutlass::ComplexTransform::kNone, beta, C.host_ref(), D_ref.host_ref(), double() ); // Verify reference matches computed if (!cutlass::reference::host::TensorEquals( D.host_view(), D_ref.host_view())) { std::cerr << "A =\n" << A.host_view() << "\n\nB = \n" << B.host_view() << "\n\nC = " << C.host_view() << "\n\nRef =\n" << D_ref.host_view() << "\n\nD =\n" << D.host_view() << "\n\n"; std::cerr << "Error - device results mismatch host reference." << std::endl; return -1; } std::cout << "Passed" << std::endl; return 0; } ///////////////////////////////////////////////////////////////////////////////////////////////////
99c75fe793d499264080ac0df955882d20f66a3a.cu
/*************************************************************************************************** * Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, are permitted * provided that the following conditions are met: * * Redistributions of source code must retain the above copyright notice, this list of * conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright notice, this list of * conditions and the following disclaimer in the documentation and/or other materials * provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the names of its contributors may be used * to endorse or promote products derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND * FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /* This example requires NVIDIA Ampere GPU or later. */ // Standard Library includes #include <iostream> #include <sstream> #include <vector> // CUTLASS Includes #include "cutlass/cutlass.h" #include "cutlass/functional.h" #include "cutlass/layout/matrix.h" #include "cutlass/gemm/warp/default_mma_tensor_op.h" #include "cutlass/epilogue/warp/fragment_iterator_tensor_op.h" #include "cutlass/epilogue/warp/tile_iterator_tensor_op.h" // CUTLASS Utility Includes #include "cutlass/util/host_tensor.h" #include "cutlass/util/tensor_view_io.h" #include "cutlass/util/reference/host/tensor_compare.h" #include "cutlass/util/reference/host/tensor_fill.h" #include "cutlass/util/reference/host/gemm_complex.h" /////////////////////////////////////////////////////////////////////////////////////////////////// // Define the overal warp-level problem shape int const kM = 27; int const kN = 31; int const kK = 17; /////////////////////////////////////////////////////////////////////////////////////////////////// // Define a warp-level GEMM operator. // // This template could be part of the CUTLASS Template Library or implemented internally. This // wraps the matrix multiply operation and epilogue with a GEMM-like interface that can be // instantiated in device code. namespace cutlass { namespace gemm { namespace warp { template < typename Shape, typename InstructionShape, typename ElementA, typename LayoutA, typename ElementB, typename LayoutB, typename ElementC, typename LayoutC, typename ElementScalar > class GemmTensorOp { public: using WarpShape = GemmShape< ((Shape::kM + InstructionShape::kM - 1) / InstructionShape::kM) * InstructionShape::kM, ((Shape::kN + InstructionShape::kN - 1) / InstructionShape::kN) * InstructionShape::kN, InstructionShape::kK >; using MmaWarp = typename cutlass::gemm::warp::DefaultMmaTensorOp< WarpShape, InstructionShape, double, // Data type of A elements cutlass::layout::RowMajor, // Layout of A matrix double, // Data type of B elements cutlass::layout::ColumnMajor, // Layout of B matrix double, // Data type of C elements cutlass::layout::RowMajor // Layout of C matrix >::Type; // Number of 'K groups' int const kKgroups = (Shape::kK + InstructionShape::kK - 1) / InstructionShape::kK; // Define a 'FragmentIterator' to iterate over slices of accumulators using FragmentIterator = cutlass::epilogue::warp::FragmentIteratorTensorOp< typename MmaWarp::Shape, InstructionShape, double, typename MmaWarp::Policy::Operator::FragmentC, cutlass::layout::RowMajor >; // Define an epilogue 'Tile Iteterator' to iterate over slices of elements in Shared Memory using AccumulatorTileIterator = cutlass::epilogue::warp::TileIteratorTensorOpCanonical< typename MmaWarp::Shape, InstructionShape, double, cutlass::layout::RowMajor >; using TensorRefA = typename MmaWarp::IteratorA::TensorRef; using TensorRefB = typename MmaWarp::IteratorB::TensorRef; using TensorRefC = typename AccumulatorTileIterator::TensorRef; public: CUTLASS_HOST_DEVICE GemmTensorOp() { } CUTLASS_DEVICE void operator()( ElementScalar alpha, TensorRefA ref_A, TensorRefB ref_B, ElementScalar beta, TensorRefC ref_C, TensorRefC ref_D, int lane_id) const { // Instantiate iterators pointing to slices of the A and B matrices in shared memory typename MmaWarp::IteratorA iter_A(ref_A, {Shape::kM, Shape::kK}, lane_id); typename MmaWarp::IteratorB iter_B(ref_B, {Shape::kK, Shape::kN}, lane_id); // Instantiate and clear accumulator tile holding the C matrix typename MmaWarp::FragmentC accum; accum.clear(); // Instantiate the warp-level matrix multiply operator MmaWarp mma_op; // Instantiate fragments holding the slice of the matrix held by each warp typename MmaWarp::FragmentA frag_A[2]; typename MmaWarp::FragmentB frag_B[2]; // Load fragments from shared memory iter_A.load(frag_A[0]); iter_B.load(frag_B[0]); ++iter_A; ++iter_B; // Load fragments from shared memory CUTLASS_PRAGMA_UNROLL for (int k = 0; k < kKgroups; ++k) { // Load fragments from shared memory iter_A.load(frag_A[(k + 1) % 2]); iter_B.load(frag_B[(k + 1) % 2]); ++iter_A; ++iter_B; // Compute the matrix multiply mma_op(accum, frag_A[k % 2], frag_B[k % 2], accum); } // Instantiate iterators FragmentIterator accum_frag_it(accum); AccumulatorTileIterator source_tile_it(ref_C, {Shape::kM, Shape::kN}, lane_id); AccumulatorTileIterator dest_tile_it(ref_D, {Shape::kM, Shape::kN}, lane_id); // Define function objects for linear scaling operation cutlass::multiplies<typename FragmentIterator::Fragment> mul_source; cutlass::multiply_add<typename FragmentIterator::Fragment> mul_add_accumulator; // Iterate over the epilogue components CUTLASS_PRAGMA_UNROLL for (int idx = 0; idx < FragmentIterator::kIterations; ++idx) { // Define storage for slices of the accumulators typename FragmentIterator::Fragment accum_fragment; typename FragmentIterator::Fragment source_fragment; // Select a slice of accumulators from the accumulator tile accum_frag_it.load(accum_fragment); ++accum_frag_it; // Load a corresponding slice from Shared memory source_tile_it.load(source_fragment); ++source_tile_it; // Compute linear scaling - alpha * AB + beta * C source_fragment = mul_source(beta, source_fragment); accum_fragment = mul_add_accumulator(alpha, accum_fragment, source_fragment); // Store the result to shared memory dest_tile_it.store(accum_fragment); ++dest_tile_it; } } }; } // namespace warp } // namespace gemm } // namespace cutlass /////////////////////////////////////////////////////////////////////////////////////////////////// // Sample kernel demonstrating a collective GEMM operation by a warp on arbitrary matrices held // in Shared Memory. __global__ void kernel( double *D_gmem, double alpha, double const *A_gmem, double const *B_gmem, double beta, double const *C_gmem) { // Define several matrices in shared memory __shared__ double A[kM][kK]; __shared__ double B[kN][kK]; __shared__ double C[kM][kN]; // Copy data into SMEM if (threadIdx.x == 0) { CUTLASS_PRAGMA_NO_UNROLL for (int m = 0; m < kM; ++m) { for (int k = 0; k < kK; ++k) { A[m][k] = A_gmem[m * kK + k]; } } CUTLASS_PRAGMA_NO_UNROLL for (int n = 0; n < kN; ++n) { for (int k = 0; k < kK; ++k) { B[n][k] = B_gmem[n * kK + k]; } } CUTLASS_PRAGMA_NO_UNROLL for (int m = 0; m < kM; ++m) { CUTLASS_PRAGMA_NO_UNROLL for (int n = 0; n < kN; ++n) { C[m][n] = C_gmem[m * kN + n]; } } } __syncthreads(); // // Instantiate a warp-level matrix multiply operator given the fundamental instruction shape (8x8x4), // overall shape, data type of each operand, and layout of each operand. // using GemmTensorOp = cutlass::gemm::warp::GemmTensorOp< cutlass::gemm::GemmShape<kM, kN, kK>, cutlass::gemm::GemmShape<8, 8, 4>, double, // Data type of A elements cutlass::layout::RowMajor, // Layout of A matrix double, // Data type of B elements cutlass::layout::ColumnMajor, // Layout of B matrix double, // Data type of C elements cutlass::layout::RowMajor, // Layout of C matrix double // Scalar type of alpha and beta >; // Instantiate the GEMM operator GemmTensorOp gemm; // Execute the warp-level GEMM operation gemm( alpha, {&A[0][0], kK}, {&B[0][0], kK}, beta, {&C[0][0], kN}, {&C[0][0], kN}, threadIdx.x); __syncthreads(); // Copy data into SMEM if (threadIdx.x == 0) { CUTLASS_PRAGMA_NO_UNROLL for (int m = 0; m < kM; ++m) { CUTLASS_PRAGMA_NO_UNROLL for (int n = 0; n < kN; ++n) { D_gmem[m * kN + n] = C[m][n]; } } } } /////////////////////////////////////////////////////////////////////////////////////////////////// /// Entry point to canonical warp-level GEMM operation int main(int argc, const char *arg[]) { bool notSupported = false; // CUTLASS must be compiled with CUDA 11 Toolkit to run these examples. if (!(__CUDACC_VER_MAJOR__ >= 11)) { std::cerr << "NVIDIA Ampere Tensor Core operations must be compiled with CUDA 11.0 Toolkit or later." << std::endl; notSupported = true; } cudaDeviceProp props; cudaError_t error = cudaGetDeviceProperties(&props, 0); if (error != cudaSuccess) { std::cerr << "cudaGetDeviceProperties() returned an error: " << cudaGetErrorString(error) << std::endl; return -1; } if (!((props.major * 10 + props.minor) >= 80)) { std::cerr << "This example requires compute capability at least 80." << std::endl; notSupported = true; } if (notSupported) { // Return 0 so tests are considered passing if run on unsupported platforms. return 0; } cutlass::HostTensor<double, cutlass::layout::RowMajor> A({kM, kK}); cutlass::HostTensor<double, cutlass::layout::ColumnMajor> B({kK, kN}); cutlass::HostTensor<double, cutlass::layout::RowMajor> C({kM, kN}); cutlass::HostTensor<double, cutlass::layout::RowMajor> D({kM, kN}); uint64_t seed = 2020; double max = 8; double min = -8; cutlass::reference::host::TensorFillRandomUniform( A.host_view(), seed, max, min, 0 ); cutlass::reference::host::TensorFillRandomUniform( B.host_view(), seed + 17, max, min, 0 ); cutlass::reference::host::TensorFillRandomUniform( C.host_view(), seed + 31, max, min, 0 ); A.sync_device(); B.sync_device(); C.sync_device(); D.sync_device(); dim3 grid(1,1); dim3 block(32, 1, 1); double alpha = 2.25; double beta = 1.24; kernel<<< grid, block >>>( D.device_data(), alpha, A.device_data(), B.device_data(), beta, C.device_data() ); cudaError_t result = cudaDeviceSynchronize(); if (result != cudaSuccess) { std::cerr << "Failed to synchronize device after kernel launch." << std::endl; return -1; } D.sync_host(); // Compute reference on host cutlass::HostTensor<double, cutlass::layout::RowMajor> D_ref({kM, kN}, false); cutlass::reference::host::GemmComplex( {kM, kN, kK}, alpha, A.host_ref(), cutlass::ComplexTransform::kNone, B.host_ref(), cutlass::ComplexTransform::kNone, beta, C.host_ref(), D_ref.host_ref(), double() ); // Verify reference matches computed if (!cutlass::reference::host::TensorEquals( D.host_view(), D_ref.host_view())) { std::cerr << "A =\n" << A.host_view() << "\n\nB = \n" << B.host_view() << "\n\nC = " << C.host_view() << "\n\nRef =\n" << D_ref.host_view() << "\n\nD =\n" << D.host_view() << "\n\n"; std::cerr << "Error - device results mismatch host reference." << std::endl; return -1; } std::cout << "Passed" << std::endl; return 0; } ///////////////////////////////////////////////////////////////////////////////////////////////////
bf0676c2126ba6e14a5647903819cf0ab66cbddd.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "gpu_array_2norm2_r8__.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; size_t arr_size = XSIZE*YSIZE; const double *arr = NULL; hipMalloc(&arr, XSIZE*YSIZE); double *bnorm2 = NULL; hipMalloc(&bnorm2, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( gpu_array_2norm2_r8__), dim3(gridBlock),dim3(threadBlock), 0, 0, arr_size,arr,bnorm2); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( gpu_array_2norm2_r8__), dim3(gridBlock),dim3(threadBlock), 0, 0, arr_size,arr,bnorm2); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( gpu_array_2norm2_r8__), dim3(gridBlock),dim3(threadBlock), 0, 0, arr_size,arr,bnorm2); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
bf0676c2126ba6e14a5647903819cf0ab66cbddd.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "gpu_array_2norm2_r8__.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; size_t arr_size = XSIZE*YSIZE; const double *arr = NULL; cudaMalloc(&arr, XSIZE*YSIZE); double *bnorm2 = NULL; cudaMalloc(&bnorm2, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); gpu_array_2norm2_r8__<<<gridBlock,threadBlock>>>(arr_size,arr,bnorm2); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { gpu_array_2norm2_r8__<<<gridBlock,threadBlock>>>(arr_size,arr,bnorm2); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { gpu_array_2norm2_r8__<<<gridBlock,threadBlock>>>(arr_size,arr,bnorm2); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
60f173d8693a2f031bce7ce65a26398938152cbc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.6.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date November 2014 @generated from ztranspose_inplace.cu normal z -> d, Sat Nov 15 19:53:59 2014 @author Stan Tomov @author Mark Gates */ #include "common_magma.h" #define PRECISION_d #define NB 16 //////////////////////////////////////////////////////////////////////////////// // grid is (n/nb) x ((n/nb)/2 + 1), where n/nb is odd. // lower indicates blocks in lower triangle of grid, including diagonal. // lower blocks cover left side of matrix, including diagonal. // upper blocks swap block indices (x,y) and shift by grid width (or width-1) // to cover right side of matrix. // [ A00 A01 A02 ] [ A00 . . | . . ] // [ A10 A11 A12 ] [ A10 A11 . | . . ] // grid [ A20 A21 A22 ] covers matrix as [ A20 A21 A22 | . . ] // [ A30 A31 A32 ] [ A30 A31 A32 | A01 . ] // [ A40 A41 A42 ] [ A40 A41 A42 | A02 A12 ] // // See dtranspose_inplace_even for description of threads. __global__ void dtranspose_inplace_odd( int n, double *matrix, int lda ) { __shared__ double sA[ NB ][ NB+1 ]; __shared__ double sB[ NB ][ NB+1 ]; int i = threadIdx.x; int j = threadIdx.y; bool lower = (blockIdx.x >= blockIdx.y); int ii = (lower ? blockIdx.x : (blockIdx.y + gridDim.y - 1)); int jj = (lower ? blockIdx.y : (blockIdx.x + gridDim.y )); ii *= NB; jj *= NB; double *A = matrix + ii+i + (jj+j)*lda; if( ii == jj ) { if ( ii+i < n && jj+j < n ) { sA[j][i] = *A; } __syncthreads(); if ( ii+i < n && jj+j < n ) { *A = sA[i][j]; } } else { double *B = matrix + jj+i + (ii+j)*lda; if ( ii+i < n && jj+j < n ) { sA[j][i] = *A; } if ( jj+i < n && ii+j < n ) { sB[j][i] = *B; } __syncthreads(); if ( ii+i < n && jj+j < n ) { *A = sB[i][j]; } if ( jj+i < n && ii+j < n ) { *B = sA[i][j]; } } } //////////////////////////////////////////////////////////////////////////////// // grid is ((n/nb) + 1) x (n/nb)/2, where n/nb is even. // lower indicates blocks in strictly lower triangle of grid, excluding diagonal. // lower blocks shift up by one to cover left side of matrix including diagonal. // upper blocks swap block indices (x,y) and shift by grid width // to cover right side of matrix. // [ A00 A01 ] [ A10 . | . . ] // [ A10 A11 ] [ A20 A21 | . . ] // grid [ A20 A21 ] covers matrix as [ A30 A31 | A00 . ] // [ A30 A31 ] [ A40 A41 | A01 A11 ] // [ A40 A41 ] // // Each block is NB x NB threads. // For non-diagonal block A, block B is symmetric block. // Thread (i,j) loads A(i,j) into sA(j,i) and B(i,j) into sB(j,i), i.e., transposed, // syncs, then saves sA(i,j) to B(i,j) and sB(i,j) to A(i,j). // Threads outside the matrix do not touch memory. __global__ void dtranspose_inplace_even( int n, double *matrix, int lda ) { __shared__ double sA[ NB ][ NB+1 ]; __shared__ double sB[ NB ][ NB+1 ]; int i = threadIdx.x; int j = threadIdx.y; bool lower = (blockIdx.x > blockIdx.y); int ii = (lower ? (blockIdx.x - 1) : (blockIdx.y + gridDim.y)); int jj = (lower ? (blockIdx.y ) : (blockIdx.x + gridDim.y)); ii *= NB; jj *= NB; double *A = matrix + ii+i + (jj+j)*lda; if( ii == jj ) { if ( ii+i < n && jj+j < n ) { sA[j][i] = *A; } __syncthreads(); if ( ii+i < n && jj+j < n ) { *A = sA[i][j]; } } else { double *B = matrix + jj+i + (ii+j)*lda; if ( ii+i < n && jj+j < n ) { sA[j][i] = *A; } if ( jj+i < n && ii+j < n ) { sB[j][i] = *B; } __syncthreads(); if ( ii+i < n && jj+j < n ) { *A = sB[i][j]; } if ( jj+i < n && ii+j < n ) { *B = sA[i][j]; } } } /** Purpose ------- dtranspose_inplace_q transposes a square N-by-N matrix in-place. Same as dtranspose_inplace, but adds queue argument. Arguments --------- @param[in] n INTEGER The number of rows & columns of the matrix dA. N >= 0. @param[in] dA DOUBLE_PRECISION array, dimension (LDDA,N) The N-by-N matrix dA. On exit, dA(j,i) = dA_original(i,j), for 0 <= i,j < N. @param[in] ldda INTEGER The leading dimension of the array dA. LDDA >= N. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_daux2 ********************************************************************/ extern "C" void magmablas_dtranspose_inplace_q( magma_int_t n, magmaDouble_ptr dA, magma_int_t ldda, magma_queue_t queue ) { magma_int_t info = 0; if ( n < 0 ) info = -1; else if ( ldda < n ) info = -3; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return; //info; } dim3 threads( NB, NB ); int nblock = (n + NB - 1)/NB; // need 1/2 * (nblock+1) * nblock to cover lower triangle and diagonal of matrix. // block assignment differs depending on whether nblock is odd or even. if( nblock % 2 == 1 ) { dim3 grid( nblock, (nblock+1)/2 ); hipLaunchKernelGGL(( dtranspose_inplace_odd), dim3(grid), dim3(threads), 0, queue , n, dA, ldda ); } else { dim3 grid( nblock+1, nblock/2 ); hipLaunchKernelGGL(( dtranspose_inplace_even), dim3(grid), dim3(threads), 0, queue , n, dA, ldda ); } } /** @see magmablas_dtranspose_inplace_q @ingroup magma_daux2 ********************************************************************/ extern "C" void magmablas_dtranspose_inplace( magma_int_t n, magmaDouble_ptr dA, magma_int_t ldda ) { magmablas_dtranspose_inplace_q( n, dA, ldda, magma_stream ); }
60f173d8693a2f031bce7ce65a26398938152cbc.cu
/* -- MAGMA (version 1.6.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date November 2014 @generated from ztranspose_inplace.cu normal z -> d, Sat Nov 15 19:53:59 2014 @author Stan Tomov @author Mark Gates */ #include "common_magma.h" #define PRECISION_d #define NB 16 //////////////////////////////////////////////////////////////////////////////// // grid is (n/nb) x ((n/nb)/2 + 1), where n/nb is odd. // lower indicates blocks in lower triangle of grid, including diagonal. // lower blocks cover left side of matrix, including diagonal. // upper blocks swap block indices (x,y) and shift by grid width (or width-1) // to cover right side of matrix. // [ A00 A01 A02 ] [ A00 . . | . . ] // [ A10 A11 A12 ] [ A10 A11 . | . . ] // grid [ A20 A21 A22 ] covers matrix as [ A20 A21 A22 | . . ] // [ A30 A31 A32 ] [ A30 A31 A32 | A01 . ] // [ A40 A41 A42 ] [ A40 A41 A42 | A02 A12 ] // // See dtranspose_inplace_even for description of threads. __global__ void dtranspose_inplace_odd( int n, double *matrix, int lda ) { __shared__ double sA[ NB ][ NB+1 ]; __shared__ double sB[ NB ][ NB+1 ]; int i = threadIdx.x; int j = threadIdx.y; bool lower = (blockIdx.x >= blockIdx.y); int ii = (lower ? blockIdx.x : (blockIdx.y + gridDim.y - 1)); int jj = (lower ? blockIdx.y : (blockIdx.x + gridDim.y )); ii *= NB; jj *= NB; double *A = matrix + ii+i + (jj+j)*lda; if( ii == jj ) { if ( ii+i < n && jj+j < n ) { sA[j][i] = *A; } __syncthreads(); if ( ii+i < n && jj+j < n ) { *A = sA[i][j]; } } else { double *B = matrix + jj+i + (ii+j)*lda; if ( ii+i < n && jj+j < n ) { sA[j][i] = *A; } if ( jj+i < n && ii+j < n ) { sB[j][i] = *B; } __syncthreads(); if ( ii+i < n && jj+j < n ) { *A = sB[i][j]; } if ( jj+i < n && ii+j < n ) { *B = sA[i][j]; } } } //////////////////////////////////////////////////////////////////////////////// // grid is ((n/nb) + 1) x (n/nb)/2, where n/nb is even. // lower indicates blocks in strictly lower triangle of grid, excluding diagonal. // lower blocks shift up by one to cover left side of matrix including diagonal. // upper blocks swap block indices (x,y) and shift by grid width // to cover right side of matrix. // [ A00 A01 ] [ A10 . | . . ] // [ A10 A11 ] [ A20 A21 | . . ] // grid [ A20 A21 ] covers matrix as [ A30 A31 | A00 . ] // [ A30 A31 ] [ A40 A41 | A01 A11 ] // [ A40 A41 ] // // Each block is NB x NB threads. // For non-diagonal block A, block B is symmetric block. // Thread (i,j) loads A(i,j) into sA(j,i) and B(i,j) into sB(j,i), i.e., transposed, // syncs, then saves sA(i,j) to B(i,j) and sB(i,j) to A(i,j). // Threads outside the matrix do not touch memory. __global__ void dtranspose_inplace_even( int n, double *matrix, int lda ) { __shared__ double sA[ NB ][ NB+1 ]; __shared__ double sB[ NB ][ NB+1 ]; int i = threadIdx.x; int j = threadIdx.y; bool lower = (blockIdx.x > blockIdx.y); int ii = (lower ? (blockIdx.x - 1) : (blockIdx.y + gridDim.y)); int jj = (lower ? (blockIdx.y ) : (blockIdx.x + gridDim.y)); ii *= NB; jj *= NB; double *A = matrix + ii+i + (jj+j)*lda; if( ii == jj ) { if ( ii+i < n && jj+j < n ) { sA[j][i] = *A; } __syncthreads(); if ( ii+i < n && jj+j < n ) { *A = sA[i][j]; } } else { double *B = matrix + jj+i + (ii+j)*lda; if ( ii+i < n && jj+j < n ) { sA[j][i] = *A; } if ( jj+i < n && ii+j < n ) { sB[j][i] = *B; } __syncthreads(); if ( ii+i < n && jj+j < n ) { *A = sB[i][j]; } if ( jj+i < n && ii+j < n ) { *B = sA[i][j]; } } } /** Purpose ------- dtranspose_inplace_q transposes a square N-by-N matrix in-place. Same as dtranspose_inplace, but adds queue argument. Arguments --------- @param[in] n INTEGER The number of rows & columns of the matrix dA. N >= 0. @param[in] dA DOUBLE_PRECISION array, dimension (LDDA,N) The N-by-N matrix dA. On exit, dA(j,i) = dA_original(i,j), for 0 <= i,j < N. @param[in] ldda INTEGER The leading dimension of the array dA. LDDA >= N. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_daux2 ********************************************************************/ extern "C" void magmablas_dtranspose_inplace_q( magma_int_t n, magmaDouble_ptr dA, magma_int_t ldda, magma_queue_t queue ) { magma_int_t info = 0; if ( n < 0 ) info = -1; else if ( ldda < n ) info = -3; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return; //info; } dim3 threads( NB, NB ); int nblock = (n + NB - 1)/NB; // need 1/2 * (nblock+1) * nblock to cover lower triangle and diagonal of matrix. // block assignment differs depending on whether nblock is odd or even. if( nblock % 2 == 1 ) { dim3 grid( nblock, (nblock+1)/2 ); dtranspose_inplace_odd<<< grid, threads, 0, queue >>>( n, dA, ldda ); } else { dim3 grid( nblock+1, nblock/2 ); dtranspose_inplace_even<<< grid, threads, 0, queue >>>( n, dA, ldda ); } } /** @see magmablas_dtranspose_inplace_q @ingroup magma_daux2 ********************************************************************/ extern "C" void magmablas_dtranspose_inplace( magma_int_t n, magmaDouble_ptr dA, magma_int_t ldda ) { magmablas_dtranspose_inplace_q( n, dA, ldda, magma_stream ); }
e13e226e1ab517df63ea32dc7bb7ca45bcb47c78.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #define THREAD_PER_BLOCK 2 __global__ void add_matrix(int* a, int* b, int* c,int n) { int col = blockDim.x*blockIdx.x+ threadIdx.x; int row = blockDim.y*blockIdx.y+ threadIdx.y; if ( col<n && row<n ) { c[row*n+col] = a[row*n+col] + b[row*n+col]; } } __global__ void mult_matrix(int* a, int* b, int* c,int n) { int col = blockDim.x*blockIdx.x+ threadIdx.x; int row = blockDim.y*blockIdx.y+ threadIdx.y; if ( col<n && row<n ) { int i; c[row*n+col] = 0; for(i=0;i<n;i++) { c[row*n + col] += a[ row*n + i ]*b[ i*n + col ]; } } } __global__ void mult_matrix_shared(int* a, int* b, int* c,int n) { __shared__ float sub_a[THREAD_PER_BLOCK][THREAD_PER_BLOCK]; __shared__ float sub_b[THREAD_PER_BLOCK][THREAD_PER_BLOCK]; int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int Row = by * THREAD_PER_BLOCK + ty; int Col = bx * THREAD_PER_BLOCK + tx; int Pvalue = 0; for (int ph = 0; ph < n/THREAD_PER_BLOCK; ++ph) { sub_a[ty][tx] = a[Row*n + ph*THREAD_PER_BLOCK + tx]; sub_b[ty][tx] = b[(ph*THREAD_PER_BLOCK + ty)*n + Col]; __syncthreads(); for (int k = 0; k < THREAD_PER_BLOCK; ++k) { Pvalue += sub_a[ty][k] * sub_b[k][tx]; } __syncthreads(); } c[Row*n + Col] = Pvalue; } __global__ void mult_mat_rectangular(int *d_M, int *d_N, int *p,int N){ __shared__ int Mds[THREAD_PER_BLOCK][THREAD_PER_BLOCK]; __shared__ int Nds[THREAD_PER_BLOCK][THREAD_PER_BLOCK]; int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int Row = by*THREAD_PER_BLOCK + ty; int Col = bx*2*THREAD_PER_BLOCK + tx; int Col2 = (bx*2 + 1)*THREAD_PER_BLOCK + tx; int p1 = 0; int p2 = 0; int k = 0; int prefM = d_M[Row*N + k*THREAD_PER_BLOCK + tx]; int prefN = d_N[(k*THREAD_PER_BLOCK + ty)*N + Col]; int prefN2 = d_N[(k*THREAD_PER_BLOCK + ty)*N + Col2]; Mds[ty][tx] = prefM; Nds[ty][tx] = prefN; __syncthreads(); for(int m = 0; m < N/THREAD_PER_BLOCK ; ++m){ prefM = d_M[Row*N + m*THREAD_PER_BLOCK + tx]; prefN = d_N[(m*THREAD_PER_BLOCK + ty)*N + Col]; for(int k = 0; k < THREAD_PER_BLOCK; k++){ p1 += Mds[ty][k] * Nds[k][tx]; } __syncthreads(); Nds[ty][tx] = prefN2; __syncthreads(); prefN2 = d_N[(m*THREAD_PER_BLOCK + ty)*N + Col2]; for(int k = 0; k < THREAD_PER_BLOCK; k++){ p2 += Mds[ty][k] * Nds[k][tx]; } __syncthreads(); Mds[ty][tx] = prefM; Nds[ty][tx] = prefN; } p[Row*N + Col] = p1; p[Row*N + Col2] = p2; } void print_matrix(int* a,int n) { int i,j; for(i=0;i<n;i++) { for(j=0;j<n;j++) { printf("%d ",a[i*n+j]); } printf("\n"); } } void fill_matrix(int* a,int n) { int i,j; for(i=0;i<n;i++) { for(j=0;j<n;j++) { //a[i*n+j] = rand()%5+1; a[i*n+j] = 1; } } } int main() { int *a,*b,*c; int *d_a,*d_b,*d_c; int mat_elem = 8; int my_size = mat_elem*mat_elem*sizeof(int); //hipEvent_t my_start,my_stop; //hipEventCreate(&my_start); //hipEventCreate(&my_stop); a = (int*) malloc(my_size); b = (int*) malloc(my_size); c = (int*) malloc(my_size); fill_matrix(a,mat_elem); fill_matrix(b,mat_elem); printf("Matrix A\n"); print_matrix(a,mat_elem); printf("Matrix B\n"); print_matrix(b,mat_elem); printf("\n"); hipMalloc((void**)&d_a,my_size); hipMalloc((void**)&d_b,my_size); hipMalloc((void**)&d_c,my_size); hipMemcpy(d_a,a,my_size,hipMemcpyHostToDevice); hipMemcpy(d_b,b,my_size,hipMemcpyHostToDevice); dim3 my_block(THREAD_PER_BLOCK,THREAD_PER_BLOCK); dim3 my_grid((mat_elem + THREAD_PER_BLOCK-1)/my_block.x,(mat_elem + THREAD_PER_BLOCK-1)/my_block.y); //////////////////////ELAPSED TIME /////////////////////////////// //hipEventRecord(my_start,0); //mult_matrix<<<my_grid,my_block>>>(d_a, d_b, d_c,mat_elem); hipLaunchKernelGGL(( mult_mat_rectangular), dim3(my_grid),dim3(my_block), 0, 0, d_a, d_b, d_c,mat_elem); //hipEventRecord(my_stop,0); //hipEventSynchronize(my_stop); ///////////////////////////////////////////////////// //float elapsed_time; //hipEventElapsedTime(&elapsed_time,my_start,my_stop); hipMemcpy(c,d_c,my_size,hipMemcpyDeviceToHost); printf("Matrix C\n"); print_matrix(c,mat_elem); //printf("time : %f\n",elapsed_time); return 0; }
e13e226e1ab517df63ea32dc7bb7ca45bcb47c78.cu
#include <stdio.h> #include <stdlib.h> #define THREAD_PER_BLOCK 2 __global__ void add_matrix(int* a, int* b, int* c,int n) { int col = blockDim.x*blockIdx.x+ threadIdx.x; int row = blockDim.y*blockIdx.y+ threadIdx.y; if ( col<n && row<n ) { c[row*n+col] = a[row*n+col] + b[row*n+col]; } } __global__ void mult_matrix(int* a, int* b, int* c,int n) { int col = blockDim.x*blockIdx.x+ threadIdx.x; int row = blockDim.y*blockIdx.y+ threadIdx.y; if ( col<n && row<n ) { int i; c[row*n+col] = 0; for(i=0;i<n;i++) { c[row*n + col] += a[ row*n + i ]*b[ i*n + col ]; } } } __global__ void mult_matrix_shared(int* a, int* b, int* c,int n) { __shared__ float sub_a[THREAD_PER_BLOCK][THREAD_PER_BLOCK]; __shared__ float sub_b[THREAD_PER_BLOCK][THREAD_PER_BLOCK]; int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int Row = by * THREAD_PER_BLOCK + ty; int Col = bx * THREAD_PER_BLOCK + tx; int Pvalue = 0; for (int ph = 0; ph < n/THREAD_PER_BLOCK; ++ph) { sub_a[ty][tx] = a[Row*n + ph*THREAD_PER_BLOCK + tx]; sub_b[ty][tx] = b[(ph*THREAD_PER_BLOCK + ty)*n + Col]; __syncthreads(); for (int k = 0; k < THREAD_PER_BLOCK; ++k) { Pvalue += sub_a[ty][k] * sub_b[k][tx]; } __syncthreads(); } c[Row*n + Col] = Pvalue; } __global__ void mult_mat_rectangular(int *d_M, int *d_N, int *p,int N){ __shared__ int Mds[THREAD_PER_BLOCK][THREAD_PER_BLOCK]; __shared__ int Nds[THREAD_PER_BLOCK][THREAD_PER_BLOCK]; int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int Row = by*THREAD_PER_BLOCK + ty; int Col = bx*2*THREAD_PER_BLOCK + tx; int Col2 = (bx*2 + 1)*THREAD_PER_BLOCK + tx; int p1 = 0; int p2 = 0; int k = 0; int prefM = d_M[Row*N + k*THREAD_PER_BLOCK + tx]; int prefN = d_N[(k*THREAD_PER_BLOCK + ty)*N + Col]; int prefN2 = d_N[(k*THREAD_PER_BLOCK + ty)*N + Col2]; Mds[ty][tx] = prefM; Nds[ty][tx] = prefN; __syncthreads(); for(int m = 0; m < N/THREAD_PER_BLOCK ; ++m){ prefM = d_M[Row*N + m*THREAD_PER_BLOCK + tx]; prefN = d_N[(m*THREAD_PER_BLOCK + ty)*N + Col]; for(int k = 0; k < THREAD_PER_BLOCK; k++){ p1 += Mds[ty][k] * Nds[k][tx]; } __syncthreads(); Nds[ty][tx] = prefN2; __syncthreads(); prefN2 = d_N[(m*THREAD_PER_BLOCK + ty)*N + Col2]; for(int k = 0; k < THREAD_PER_BLOCK; k++){ p2 += Mds[ty][k] * Nds[k][tx]; } __syncthreads(); Mds[ty][tx] = prefM; Nds[ty][tx] = prefN; } p[Row*N + Col] = p1; p[Row*N + Col2] = p2; } void print_matrix(int* a,int n) { int i,j; for(i=0;i<n;i++) { for(j=0;j<n;j++) { printf("%d ",a[i*n+j]); } printf("\n"); } } void fill_matrix(int* a,int n) { int i,j; for(i=0;i<n;i++) { for(j=0;j<n;j++) { //a[i*n+j] = rand()%5+1; a[i*n+j] = 1; } } } int main() { int *a,*b,*c; int *d_a,*d_b,*d_c; int mat_elem = 8; int my_size = mat_elem*mat_elem*sizeof(int); //cudaEvent_t my_start,my_stop; //cudaEventCreate(&my_start); //cudaEventCreate(&my_stop); a = (int*) malloc(my_size); b = (int*) malloc(my_size); c = (int*) malloc(my_size); fill_matrix(a,mat_elem); fill_matrix(b,mat_elem); printf("Matrix A\n"); print_matrix(a,mat_elem); printf("Matrix B\n"); print_matrix(b,mat_elem); printf("\n"); cudaMalloc((void**)&d_a,my_size); cudaMalloc((void**)&d_b,my_size); cudaMalloc((void**)&d_c,my_size); cudaMemcpy(d_a,a,my_size,cudaMemcpyHostToDevice); cudaMemcpy(d_b,b,my_size,cudaMemcpyHostToDevice); dim3 my_block(THREAD_PER_BLOCK,THREAD_PER_BLOCK); dim3 my_grid((mat_elem + THREAD_PER_BLOCK-1)/my_block.x,(mat_elem + THREAD_PER_BLOCK-1)/my_block.y); //////////////////////ELAPSED TIME /////////////////////////////// //cudaEventRecord(my_start,0); //mult_matrix<<<my_grid,my_block>>>(d_a, d_b, d_c,mat_elem); mult_mat_rectangular<<<my_grid,my_block>>>(d_a, d_b, d_c,mat_elem); //cudaEventRecord(my_stop,0); //cudaEventSynchronize(my_stop); ///////////////////////////////////////////////////// //float elapsed_time; //cudaEventElapsedTime(&elapsed_time,my_start,my_stop); cudaMemcpy(c,d_c,my_size,cudaMemcpyDeviceToHost); printf("Matrix C\n"); print_matrix(c,mat_elem); //printf("time : %f\n",elapsed_time); return 0; }
38f3514166809e68d0a90298ecf7401f51ee0d4b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * pairwise_loss_layer.cu * * Created on: Jan 3, 2017 * Author: Limbo */ #include <algorithm> #include <cfloat> #include <vector> #include "caffe/util/io.hpp" #include "caffe/layers/pairwise_loss_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> void PairwiseLossLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const int count = bottom[0]->count(); caffe_gpu_sub( count, bottom[0]->gpu_data(), bottom[1]->gpu_data(), diff_ap_.mutable_gpu_data()); caffe_gpu_sub( count, bottom[2]->gpu_data(), bottom[3]->gpu_data(), diff_wn_.mutable_gpu_data()); caffe_gpu_sub( count, bottom[1]->gpu_data(), bottom[2]->gpu_data(), diff_pn_.mutable_gpu_data()); caffe_gpu_powx( count, diff_ap_.mutable_gpu_data(), Dtype(2), diff_sq_ap_.mutable_gpu_data()); caffe_gpu_gemv( CblasNoTrans, bottom[0]->num(), bottom[0]->channels(), Dtype(1.0), diff_sq_ap_.gpu_data(), summer_vec_.gpu_data(), Dtype(0.0), dist_sq_ap_.mutable_gpu_data()); caffe_gpu_powx( count, diff_wn_.mutable_gpu_data(), Dtype(2), diff_sq_wn_.mutable_gpu_data()); caffe_gpu_gemv( CblasNoTrans, bottom[0]->num(), bottom[0]->channels(), Dtype(1.0), diff_sq_wn_.gpu_data(), summer_vec_.gpu_data(), Dtype(0.0), dist_sq_wn_.mutable_gpu_data()); Dtype margin = this->layer_param_.triplet_loss_param().margin(); Dtype loss(0.0); Dtype loss1(0.0); Dtype loss2(0.0); Dtype Sam(0.0); Dtype unfaml(0.0); const Dtype* sampleW = bottom[4]->gpu_data(); //1111 for (int i = 0; i < bottom[0]->num(); ++i) { loss1 += ::max(Dtype(0.05) - margin +dist_sq_ap_.cpu_data()[i], Dtype(0.0)); Sam +=dist_sq_ap_.cpu_data()[i]; } for (int i = 0; i < bottom[0]->num(); ++i) { unfaml +=dist_sq_wn_.cpu_data()[i]; loss2 += ::max(Dtype(0.05) + margin -dist_sq_wn_.cpu_data()[i], Dtype(0.0)); } loss = loss1 + loss2 ; loss = loss / static_cast<Dtype>(bottom[0]->num()); top[0]->mutable_cpu_data()[0] = loss /Dtype(2) ; } template <typename Dtype> __global__ void CLLBackward(const int count, const int channels, const Dtype margin, const Dtype alpha, const Dtype* sampleW, const Dtype* diff, const Dtype* dist_sq_ap_, const Dtype* dist_sq_wn_, Dtype *bottom_diff,const Dtype type) { if (type == 1){ CUDA_KERNEL_LOOP(i, count) { int n = i / channels; Dtype mdist(0.0); mdist = Dtype(0.05) - margin + dist_sq_ap_[n] ; if (mdist > 0.0) { bottom_diff[i] = alpha*diff[i]; } else { bottom_diff[i] = 0; } } } if (type == 0){ CUDA_KERNEL_LOOP(i, count) { int n = i / channels; Dtype mdist(0.0); mdist = Dtype(0.05) + margin - dist_sq_wn_[n]; if (mdist > 0.0) { bottom_diff[i] = alpha*diff[i]; } else { bottom_diff[i] = 0; } } } } template <typename Dtype> void PairwiseLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { Dtype margin = this->layer_param_.triplet_loss_param().margin(); const int count = bottom[0]->count(); const int channels = bottom[0]->channels(); for (int i = 0; i < 4; ++i) { if (propagate_down[i]) { const Dtype type = (i<2) ? 1 : 0; const Dtype sign = ((i<=2)&&(i>=1)) ? -1 : 1; const Dtype alpha = sign * top[0]->cpu_diff()[0] / static_cast<Dtype>(bottom[0]->num()); if(i==0){ // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( CLLBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, channels, margin, alpha, bottom[4]->gpu_data(), diff_ap_.gpu_data(), dist_sq_ap_.gpu_data(), dist_sq_wn_.gpu_data(), bottom[i]->mutable_gpu_diff(), type); CUDA_POST_KERNEL_CHECK; }else if(i==1){ // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( CLLBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, channels, margin, alpha, bottom[4]->gpu_data(), diff_ap_.gpu_data(), dist_sq_ap_.gpu_data(), dist_sq_wn_.gpu_data(), bottom[i]->mutable_gpu_diff(), type); CUDA_POST_KERNEL_CHECK; }else if(i==2){ // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( CLLBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, channels, margin, alpha, bottom[4]->gpu_data(), diff_wn_.gpu_data(), dist_sq_ap_.gpu_data(), dist_sq_wn_.gpu_data(), bottom[i]->mutable_gpu_diff(), type); CUDA_POST_KERNEL_CHECK; } else if(i==3){ // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( CLLBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, channels, margin, alpha, bottom[4]->gpu_data(), diff_wn_.gpu_data(), dist_sq_ap_.gpu_data(), dist_sq_wn_.gpu_data(), bottom[i]->mutable_gpu_diff(), type); CUDA_POST_KERNEL_CHECK; } } } } INSTANTIATE_LAYER_GPU_FUNCS(PairwiseLossLayer); } // namespace caffe
38f3514166809e68d0a90298ecf7401f51ee0d4b.cu
/* * pairwise_loss_layer.cu * * Created on: Jan 3, 2017 * Author: Limbo */ #include <algorithm> #include <cfloat> #include <vector> #include "caffe/util/io.hpp" #include "caffe/layers/pairwise_loss_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> void PairwiseLossLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const int count = bottom[0]->count(); caffe_gpu_sub( count, bottom[0]->gpu_data(), bottom[1]->gpu_data(), diff_ap_.mutable_gpu_data()); caffe_gpu_sub( count, bottom[2]->gpu_data(), bottom[3]->gpu_data(), diff_wn_.mutable_gpu_data()); caffe_gpu_sub( count, bottom[1]->gpu_data(), bottom[2]->gpu_data(), diff_pn_.mutable_gpu_data()); caffe_gpu_powx( count, diff_ap_.mutable_gpu_data(), Dtype(2), diff_sq_ap_.mutable_gpu_data()); caffe_gpu_gemv( CblasNoTrans, bottom[0]->num(), bottom[0]->channels(), Dtype(1.0), diff_sq_ap_.gpu_data(), summer_vec_.gpu_data(), Dtype(0.0), dist_sq_ap_.mutable_gpu_data()); caffe_gpu_powx( count, diff_wn_.mutable_gpu_data(), Dtype(2), diff_sq_wn_.mutable_gpu_data()); caffe_gpu_gemv( CblasNoTrans, bottom[0]->num(), bottom[0]->channels(), Dtype(1.0), diff_sq_wn_.gpu_data(), summer_vec_.gpu_data(), Dtype(0.0), dist_sq_wn_.mutable_gpu_data()); Dtype margin = this->layer_param_.triplet_loss_param().margin(); Dtype loss(0.0); Dtype loss1(0.0); Dtype loss2(0.0); Dtype Sam(0.0); Dtype unfaml(0.0); const Dtype* sampleW = bottom[4]->gpu_data(); //1111 for (int i = 0; i < bottom[0]->num(); ++i) { loss1 += std::max(Dtype(0.05) - margin +dist_sq_ap_.cpu_data()[i], Dtype(0.0)); Sam +=dist_sq_ap_.cpu_data()[i]; } for (int i = 0; i < bottom[0]->num(); ++i) { unfaml +=dist_sq_wn_.cpu_data()[i]; loss2 += std::max(Dtype(0.05) + margin -dist_sq_wn_.cpu_data()[i], Dtype(0.0)); } loss = loss1 + loss2 ; loss = loss / static_cast<Dtype>(bottom[0]->num()); top[0]->mutable_cpu_data()[0] = loss /Dtype(2) ; } template <typename Dtype> __global__ void CLLBackward(const int count, const int channels, const Dtype margin, const Dtype alpha, const Dtype* sampleW, const Dtype* diff, const Dtype* dist_sq_ap_, const Dtype* dist_sq_wn_, Dtype *bottom_diff,const Dtype type) { if (type == 1){ CUDA_KERNEL_LOOP(i, count) { int n = i / channels; Dtype mdist(0.0); mdist = Dtype(0.05) - margin + dist_sq_ap_[n] ; if (mdist > 0.0) { bottom_diff[i] = alpha*diff[i]; } else { bottom_diff[i] = 0; } } } if (type == 0){ CUDA_KERNEL_LOOP(i, count) { int n = i / channels; Dtype mdist(0.0); mdist = Dtype(0.05) + margin - dist_sq_wn_[n]; if (mdist > 0.0) { bottom_diff[i] = alpha*diff[i]; } else { bottom_diff[i] = 0; } } } } template <typename Dtype> void PairwiseLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { Dtype margin = this->layer_param_.triplet_loss_param().margin(); const int count = bottom[0]->count(); const int channels = bottom[0]->channels(); for (int i = 0; i < 4; ++i) { if (propagate_down[i]) { const Dtype type = (i<2) ? 1 : 0; const Dtype sign = ((i<=2)&&(i>=1)) ? -1 : 1; const Dtype alpha = sign * top[0]->cpu_diff()[0] / static_cast<Dtype>(bottom[0]->num()); if(i==0){ // NOLINT_NEXT_LINE(whitespace/operators) CLLBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, channels, margin, alpha, bottom[4]->gpu_data(), diff_ap_.gpu_data(), dist_sq_ap_.gpu_data(), dist_sq_wn_.gpu_data(), bottom[i]->mutable_gpu_diff(), type); CUDA_POST_KERNEL_CHECK; }else if(i==1){ // NOLINT_NEXT_LINE(whitespace/operators) CLLBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, channels, margin, alpha, bottom[4]->gpu_data(), diff_ap_.gpu_data(), dist_sq_ap_.gpu_data(), dist_sq_wn_.gpu_data(), bottom[i]->mutable_gpu_diff(), type); CUDA_POST_KERNEL_CHECK; }else if(i==2){ // NOLINT_NEXT_LINE(whitespace/operators) CLLBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, channels, margin, alpha, bottom[4]->gpu_data(), diff_wn_.gpu_data(), dist_sq_ap_.gpu_data(), dist_sq_wn_.gpu_data(), bottom[i]->mutable_gpu_diff(), type); CUDA_POST_KERNEL_CHECK; } else if(i==3){ // NOLINT_NEXT_LINE(whitespace/operators) CLLBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, channels, margin, alpha, bottom[4]->gpu_data(), diff_wn_.gpu_data(), dist_sq_ap_.gpu_data(), dist_sq_wn_.gpu_data(), bottom[i]->mutable_gpu_diff(), type); CUDA_POST_KERNEL_CHECK; } } } } INSTANTIATE_LAYER_GPU_FUNCS(PairwiseLossLayer); } // namespace caffe
51c80eeec08b9e222ae9bd07f1d99fd2b28fd650.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <opencv2/highgui/highgui.hpp> #include <opencv2/imgproc/imgproc.hpp> #include <cstdio> #include <algorithm> #include <ctime> using namespace std; using namespace cv; __global__ void blue(int *mat, int height, int width) { int i = blockIdx.x * 32 + threadIdx.x , j = blockIdx.y * 32 + threadIdx.y; if (i < height && j < width) { int &mi = mat[i * width + j]; mi = mi * 2 + 5; } } void Video(const char **argv) { // Setup video capture device // Link it to the first capture device hipError_t err; VideoCapture captureVideo; captureVideo.open(argv[1]); int i, j; Mat frameFromVideo; double cnt = 0; int *hmat, *dmat = NULL; while (true){ captureVideo >> frameFromVideo; if (frameFromVideo.empty()) break; // imshow("origin", frameFromVideo); // for (int k = 0; k < 3; ++k) { int rows = frameFromVideo.rows, cols = frameFromVideo.cols; int size = rows * cols * sizeof(int); hmat = (int *)malloc(size); for (i = 0; i < rows; i++) for (j = 0; j < cols; j++) hmat[i * cols + j] = frameFromVideo.at<Vec3b>(i, j)[0]; double last = getTickCount(); dmat = NULL; hipMalloc(&dmat, size); hipMemcpy(dmat, hmat, size, hipMemcpyHostToDevice); dim3 blk(32, 32); dim3 grid(rows / blk.x, cols / blk.y); blue << <grid, blk >> >(dmat, rows, cols); cnt += getTickCount() - last; err = hipMemcpy(hmat, dmat, size, hipMemcpyDeviceToHost); if (err != hipSuccess) { fprintf(stderr, "Failed while Memcpying back! %s\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // getting max value int max_val = 0; for (i = 0; i < rows; i++) for (j = 0; j < cols; j++) { if (hmat[i * cols + j] > max_val) max_val = hmat[i * cols + j]; } // normalizing for (i = 0; i < rows; i++) for (j = 0; j < cols; j++) frameFromVideo.at<Vec3b>(i, j)[0] = hmat[i * cols + j] * 255 / max_val; hipFree(dmat); free(hmat); cnt += getTickCount() - last; // } // imshow("outputCamera", frameFromVideo); if (waitKey(30) >= 0) break; } printf("Total = %fms\n", 1.0*cnt / (getTickFrequency() / 1000.0)); } int main(int argc, const char** argv){ if (CV_MAJOR_VERSION < 3) { puts("Advise you update to OpenCV3"); } Video(argv); return 0; }
51c80eeec08b9e222ae9bd07f1d99fd2b28fd650.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <opencv2/highgui/highgui.hpp> #include <opencv2/imgproc/imgproc.hpp> #include <cstdio> #include <algorithm> #include <ctime> using namespace std; using namespace cv; __global__ void blue(int *mat, int height, int width) { int i = blockIdx.x * 32 + threadIdx.x , j = blockIdx.y * 32 + threadIdx.y; if (i < height && j < width) { int &mi = mat[i * width + j]; mi = mi * 2 + 5; } } void Video(const char **argv) { // Setup video capture device // Link it to the first capture device cudaError_t err; VideoCapture captureVideo; captureVideo.open(argv[1]); int i, j; Mat frameFromVideo; double cnt = 0; int *hmat, *dmat = NULL; while (true){ captureVideo >> frameFromVideo; if (frameFromVideo.empty()) break; // imshow("origin", frameFromVideo); // for (int k = 0; k < 3; ++k) { int rows = frameFromVideo.rows, cols = frameFromVideo.cols; int size = rows * cols * sizeof(int); hmat = (int *)malloc(size); for (i = 0; i < rows; i++) for (j = 0; j < cols; j++) hmat[i * cols + j] = frameFromVideo.at<Vec3b>(i, j)[0]; double last = getTickCount(); dmat = NULL; cudaMalloc(&dmat, size); cudaMemcpy(dmat, hmat, size, cudaMemcpyHostToDevice); dim3 blk(32, 32); dim3 grid(rows / blk.x, cols / blk.y); blue << <grid, blk >> >(dmat, rows, cols); cnt += getTickCount() - last; err = cudaMemcpy(hmat, dmat, size, cudaMemcpyDeviceToHost); if (err != cudaSuccess) { fprintf(stderr, "Failed while Memcpying back! %s\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // getting max value int max_val = 0; for (i = 0; i < rows; i++) for (j = 0; j < cols; j++) { if (hmat[i * cols + j] > max_val) max_val = hmat[i * cols + j]; } // normalizing for (i = 0; i < rows; i++) for (j = 0; j < cols; j++) frameFromVideo.at<Vec3b>(i, j)[0] = hmat[i * cols + j] * 255 / max_val; cudaFree(dmat); free(hmat); cnt += getTickCount() - last; // } // imshow("outputCamera", frameFromVideo); if (waitKey(30) >= 0) break; } printf("Total = %fms\n", 1.0*cnt / (getTickFrequency() / 1000.0)); } int main(int argc, const char** argv){ if (CV_MAJOR_VERSION < 3) { puts("Advise you update to OpenCV3"); } Video(argv); return 0; }
069b95c10e0bd9197c2f52e94a9adc92bfc79f3c.hip
// !!! This is a file automatically generated by hipify!!! #define NBDKIT_API_VERSION 2 #include <nbdkit-plugin.h> #include <stdlib.h> #include <string.h> #include <assert.h> #include <iostream> #define THREAD_MODEL NBDKIT_THREAD_MODEL_SERIALIZE_CONNECTIONS int64_t gpuGAME_SIZE; char *gpuGAME_PTR; static void * gpuGAME_open (int readonly){ /* create a handle ... */ return NBDKIT_HANDLE_NOT_NEEDED; } static int64_t gpuGAME_get_size (void *handle){ return gpuGAME_SIZE; } static int gpuGAME_config (const char *key, const char *value) { if ( strcmp(key, "size") == 0 ) { gpuGAME_SIZE = nbdkit_parse_size(value); //Should check if able to allocate memory if ( hipMalloc (&gpuGAME_PTR, gpuGAME_SIZE ) != hipSuccess ) { std::cerr << hipGetLastError() << std::endl; abort(); } } else { return -1; } return 0; } static int gpuGAME_pread (void *handle, void *buf, uint32_t count, uint64_t offset, uint32_t flags) { assert(hipMemcpy(buf, (gpuGAME_PTR + offset), count, hipMemcpyDeviceToHost) == hipSuccess); return 0; } static int gpuGAME_pwrite (void *handle, const void *buf, uint32_t count, uint64_t offset, uint32_t flags) { assert( hipMemcpy((gpuGAME_PTR + offset), buf, count, hipMemcpyHostToDevice) == hipSuccess); return 0; } static struct nbdkit_plugin plugin = { .name = "gpuGAME", .config = gpuGAME_config, .open = gpuGAME_open, .get_size = gpuGAME_get_size, .pread = gpuGAME_pread, .pwrite = gpuGAME_pwrite, /* etc */ }; NBDKIT_REGISTER_PLUGIN(plugin)
069b95c10e0bd9197c2f52e94a9adc92bfc79f3c.cu
#define NBDKIT_API_VERSION 2 #include <nbdkit-plugin.h> #include <stdlib.h> #include <string.h> #include <assert.h> #include <iostream> #define THREAD_MODEL NBDKIT_THREAD_MODEL_SERIALIZE_CONNECTIONS int64_t gpuGAME_SIZE; char *gpuGAME_PTR; static void * gpuGAME_open (int readonly){ /* create a handle ... */ return NBDKIT_HANDLE_NOT_NEEDED; } static int64_t gpuGAME_get_size (void *handle){ return gpuGAME_SIZE; } static int gpuGAME_config (const char *key, const char *value) { if ( strcmp(key, "size") == 0 ) { gpuGAME_SIZE = nbdkit_parse_size(value); //Should check if able to allocate memory if ( cudaMalloc (&gpuGAME_PTR, gpuGAME_SIZE ) != cudaSuccess ) { std::cerr << cudaGetLastError() << std::endl; abort(); } } else { return -1; } return 0; } static int gpuGAME_pread (void *handle, void *buf, uint32_t count, uint64_t offset, uint32_t flags) { assert(cudaMemcpy(buf, (gpuGAME_PTR + offset), count, cudaMemcpyDeviceToHost) == cudaSuccess); return 0; } static int gpuGAME_pwrite (void *handle, const void *buf, uint32_t count, uint64_t offset, uint32_t flags) { assert( cudaMemcpy((gpuGAME_PTR + offset), buf, count, cudaMemcpyHostToDevice) == cudaSuccess); return 0; } static struct nbdkit_plugin plugin = { .name = "gpuGAME", .config = gpuGAME_config, .open = gpuGAME_open, .get_size = gpuGAME_get_size, .pread = gpuGAME_pread, .pwrite = gpuGAME_pwrite, /* etc */ }; NBDKIT_REGISTER_PLUGIN(plugin)
30f28b7b283be1e456f7640724a72d9fe443b681.hip
// !!! This is a file automatically generated by hipify!!! #if !MEGDNN_TEGRA_X1 // generated by gen_cuda_conv_bias_kern_impls.py // ignore warning of cutlass #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wstrict-aliasing" #include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl" using LayoutSrc = cutlass::layout::TensorNCxHWx<4>; using LayoutFilter = cutlass::layout::TensorCxRSKx<4>; using LayoutDst = cutlass::layout::TensorNCxHWx<4>; using ThreadBlockShape = cutlass::gemm::GemmShape<128, 128, 32>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 32>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>; using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationClamp< int8_t, 4, int32_t, int32_t, float>; using Convolution = cutlass::conv::device::Convolution< int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t, LayoutDst, int32_t, LayoutDst, int32_t, cutlass::conv::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61, ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp, cutlass::conv::threadblock::ConvolutionFpropNCxHWxThreadblockSwizzle, 2, 4, 16, true, cutlass::arch::OpMultiplyAddSaturate>; template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( const typename Convolution::ElementSrc* d_src, const typename Convolution::ElementFilter* d_filter, const typename Convolution::ElementBias* d_bias, const typename Convolution::ElementDst* d_z, typename Convolution::ElementDst* d_dst, int* workspace, typename Convolution::ConvolutionParameter const& conv_param, typename Convolution::EpilogueOutputOp::Params const& epilogue, hipStream_t stream); #pragma GCC diagnostic pop #endif
30f28b7b283be1e456f7640724a72d9fe443b681.cu
#if !MEGDNN_TEGRA_X1 // generated by gen_cuda_conv_bias_kern_impls.py // ignore warning of cutlass #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wstrict-aliasing" #include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl" using LayoutSrc = cutlass::layout::TensorNCxHWx<4>; using LayoutFilter = cutlass::layout::TensorCxRSKx<4>; using LayoutDst = cutlass::layout::TensorNCxHWx<4>; using ThreadBlockShape = cutlass::gemm::GemmShape<128, 128, 32>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 32>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>; using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationClamp< int8_t, 4, int32_t, int32_t, float>; using Convolution = cutlass::conv::device::Convolution< int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t, LayoutDst, int32_t, LayoutDst, int32_t, cutlass::conv::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61, ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp, cutlass::conv::threadblock::ConvolutionFpropNCxHWxThreadblockSwizzle, 2, 4, 16, true, cutlass::arch::OpMultiplyAddSaturate>; template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( const typename Convolution::ElementSrc* d_src, const typename Convolution::ElementFilter* d_filter, const typename Convolution::ElementBias* d_bias, const typename Convolution::ElementDst* d_z, typename Convolution::ElementDst* d_dst, int* workspace, typename Convolution::ConvolutionParameter const& conv_param, typename Convolution::EpilogueOutputOp::Params const& epilogue, cudaStream_t stream); #pragma GCC diagnostic pop #endif
609d01dd9dc1c99b02cb62c46cec60d592109025.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <algorithm> #include <cassert> #include <chrono> #include <cstdio> #include <cstdlib> #include <numeric> #include <sstream> #include <vector> #include <numa.h> #include <roctracer/roctx.h> #include <unistd.h> #include "common/cuda_check.hpp" #include "common/common.hpp" template <typename data_type> __global__ void gpu_write(data_type *ptr, const size_t stride, const size_t count) { const size_t gx = blockIdx.x * blockDim.x + threadIdx.x; const size_t wx = gx >> 5; // warp id const size_t lx = threadIdx.x & 0x1F; // lane id const size_t warpsInGrid = gridDim.x * blockDim.x / 32; const size_t dataInStride = stride / sizeof(data_type); const size_t dataInCount = count / sizeof(data_type); for (size_t i = wx * dataInStride; i < dataInCount; i += warpsInGrid * dataInStride) { for (size_t strideOff = lx; strideOff < dataInStride && (i + strideOff < dataInCount); strideOff += 32) { ptr[i + strideOff] = i; } } } static void gpu_gpu_bw(const Device &dst, const Device &src, const size_t count) { assert(src.is_gpu() && dst.is_gpu()); int *dstPtr; RT_CHECK(hipSetDevice(src.id())); { hipError_t err = hipDeviceEnablePeerAccess(dst.id(), 0); if (err != hipErrorPeerAccessAlreadyEnabled) { RT_CHECK(err); } } RT_CHECK(hipSetDevice(dst.id())); RT_CHECK(hipMalloc(&dstPtr, count)); { hipError_t err = hipDeviceEnablePeerAccess(src.id(), 0); if (err != hipErrorPeerAccessAlreadyEnabled) { RT_CHECK(err); } } // fill up GPU with blocks const size_t numMps = num_mps(dst); const size_t maxBlocksPerMp = max_blocks_per_mp(dst); const size_t maxThreadsPerMp = max_threads_per_mp(dst); dim3 gridDim(numMps * maxBlocksPerMp); dim3 blockDim(maxThreadsPerMp / maxBlocksPerMp); const long pageSize = sysconf(_SC_PAGESIZE); std::vector<double> times; const size_t numIters = 20; for (size_t i = 0; i < numIters; ++i) { RT_CHECK(hipSetDevice(src.id())); RT_CHECK(hipDeviceSynchronize()); roctxRangePush("dst"); auto start = std::chrono::high_resolution_clock::now(); hipLaunchKernelGGL(( gpu_write), dim3(gridDim), dim3(blockDim), 0, 0, dstPtr, pageSize, count); RT_CHECK(hipDeviceSynchronize()); auto end = std::chrono::high_resolution_clock::now(); std::chrono::duration<double> txSeconds = end - start; roctxRangePop(); times.push_back(txSeconds.count()); } const double minTime = *std::min_element(times.begin(), times.end()); const double avgTime = std::accumulate(times.begin(), times.end(), 0.0) / times.size(); printf(",%.2f", count / 1024.0 / 1024.0 / minTime); RT_CHECK(hipFree(dstPtr)); } int main(void) { const size_t numNodes = numa_max_node(); std::vector<Device> gpus = get_gpus(); if (gpus.size() < 2) { std::cerr << "Not enough GPUs\n"; return 1; } // print header printf("Transfer Size (MB)"); for (const auto dst : gpus) { for (const auto src : gpus) { if (src != dst) { int can; RT_CHECK(hipDeviceCanAccessPeer(&can, src.id(), dst.id())); if (can) { printf(",%s:%s", src.name().c_str(), dst.name().c_str()); } } } } printf("\n"); auto freeMem = gpu_free_memory(gpus); auto counts = Sequence::geometric(2048, freeMem, 2) | Sequence::geometric(2048 * 1.5, freeMem, 2); for (auto count : counts) { printf("%f", count / 1024.0 / 1024.0); for (const auto dst : gpus) { for (const auto src : gpus) { if (src != dst) { int can; RT_CHECK(hipDeviceCanAccessPeer(&can, src.id(), dst.id())); if (can) { gpu_gpu_bw(dst, src, count); } } } } printf("\n"); } return 0; }
609d01dd9dc1c99b02cb62c46cec60d592109025.cu
#include <algorithm> #include <cassert> #include <chrono> #include <cstdio> #include <cstdlib> #include <numeric> #include <sstream> #include <vector> #include <numa.h> #include <nvToolsExt.h> #include <unistd.h> #include "common/cuda_check.hpp" #include "common/common.hpp" template <typename data_type> __global__ void gpu_write(data_type *ptr, const size_t stride, const size_t count) { const size_t gx = blockIdx.x * blockDim.x + threadIdx.x; const size_t wx = gx >> 5; // warp id const size_t lx = threadIdx.x & 0x1F; // lane id const size_t warpsInGrid = gridDim.x * blockDim.x / 32; const size_t dataInStride = stride / sizeof(data_type); const size_t dataInCount = count / sizeof(data_type); for (size_t i = wx * dataInStride; i < dataInCount; i += warpsInGrid * dataInStride) { for (size_t strideOff = lx; strideOff < dataInStride && (i + strideOff < dataInCount); strideOff += 32) { ptr[i + strideOff] = i; } } } static void gpu_gpu_bw(const Device &dst, const Device &src, const size_t count) { assert(src.is_gpu() && dst.is_gpu()); int *dstPtr; RT_CHECK(cudaSetDevice(src.id())); { cudaError_t err = cudaDeviceEnablePeerAccess(dst.id(), 0); if (err != cudaErrorPeerAccessAlreadyEnabled) { RT_CHECK(err); } } RT_CHECK(cudaSetDevice(dst.id())); RT_CHECK(cudaMalloc(&dstPtr, count)); { cudaError_t err = cudaDeviceEnablePeerAccess(src.id(), 0); if (err != cudaErrorPeerAccessAlreadyEnabled) { RT_CHECK(err); } } // fill up GPU with blocks const size_t numMps = num_mps(dst); const size_t maxBlocksPerMp = max_blocks_per_mp(dst); const size_t maxThreadsPerMp = max_threads_per_mp(dst); dim3 gridDim(numMps * maxBlocksPerMp); dim3 blockDim(maxThreadsPerMp / maxBlocksPerMp); const long pageSize = sysconf(_SC_PAGESIZE); std::vector<double> times; const size_t numIters = 20; for (size_t i = 0; i < numIters; ++i) { RT_CHECK(cudaSetDevice(src.id())); RT_CHECK(cudaDeviceSynchronize()); nvtxRangePush("dst"); auto start = std::chrono::high_resolution_clock::now(); gpu_write<<<gridDim, blockDim>>>(dstPtr, pageSize, count); RT_CHECK(cudaDeviceSynchronize()); auto end = std::chrono::high_resolution_clock::now(); std::chrono::duration<double> txSeconds = end - start; nvtxRangePop(); times.push_back(txSeconds.count()); } const double minTime = *std::min_element(times.begin(), times.end()); const double avgTime = std::accumulate(times.begin(), times.end(), 0.0) / times.size(); printf(",%.2f", count / 1024.0 / 1024.0 / minTime); RT_CHECK(cudaFree(dstPtr)); } int main(void) { const size_t numNodes = numa_max_node(); std::vector<Device> gpus = get_gpus(); if (gpus.size() < 2) { std::cerr << "Not enough GPUs\n"; return 1; } // print header printf("Transfer Size (MB)"); for (const auto dst : gpus) { for (const auto src : gpus) { if (src != dst) { int can; RT_CHECK(cudaDeviceCanAccessPeer(&can, src.id(), dst.id())); if (can) { printf(",%s:%s", src.name().c_str(), dst.name().c_str()); } } } } printf("\n"); auto freeMem = gpu_free_memory(gpus); auto counts = Sequence::geometric(2048, freeMem, 2) | Sequence::geometric(2048 * 1.5, freeMem, 2); for (auto count : counts) { printf("%f", count / 1024.0 / 1024.0); for (const auto dst : gpus) { for (const auto src : gpus) { if (src != dst) { int can; RT_CHECK(cudaDeviceCanAccessPeer(&can, src.id(), dst.id())); if (can) { gpu_gpu_bw(dst, src, count); } } } } printf("\n"); } return 0; }
786a350b380a4790c65839bf69ce268a9b36cc46.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "w1_kernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; double *grads_W1 = NULL; hipMalloc(&grads_W1, XSIZE*YSIZE); double *W1 = NULL; hipMalloc(&W1, XSIZE*YSIZE); double learning_rate = 1; int size = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( w1_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, grads_W1,W1,learning_rate,size); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( w1_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, grads_W1,W1,learning_rate,size); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( w1_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, grads_W1,W1,learning_rate,size); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
786a350b380a4790c65839bf69ce268a9b36cc46.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "w1_kernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; double *grads_W1 = NULL; cudaMalloc(&grads_W1, XSIZE*YSIZE); double *W1 = NULL; cudaMalloc(&W1, XSIZE*YSIZE); double learning_rate = 1; int size = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); w1_kernel<<<gridBlock,threadBlock>>>(grads_W1,W1,learning_rate,size); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { w1_kernel<<<gridBlock,threadBlock>>>(grads_W1,W1,learning_rate,size); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { w1_kernel<<<gridBlock,threadBlock>>>(grads_W1,W1,learning_rate,size); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
5e7b58aa6076b1eec9dae140ae68b1032bee2265.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" #ifdef OCTOTIGER_HAVE_CUDA #endif __global__ void kernel_reconstruct(double *Q, double *D1, double *U_, double *X, double omega) { bool first_thread = (blockIdx.x == 0) && (threadIdx.y == 0) && (threadIdx.z == 0); if (first_thread) printf("Hello reconstruct"); }
5e7b58aa6076b1eec9dae140ae68b1032bee2265.cu
#include "includes.h" #ifdef OCTOTIGER_HAVE_CUDA #endif __global__ void kernel_reconstruct(double *Q, double *D1, double *U_, double *X, double omega) { bool first_thread = (blockIdx.x == 0) && (threadIdx.y == 0) && (threadIdx.z == 0); if (first_thread) printf("Hello reconstruct"); }
10ba78aa6ea2a8c8f3c38d80f223979539cc24a7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "kernel.h" #include <helper_cuda.h> #include <cstdio> //#include <time.h> #define TPB 3200 #define M 100 //Number of times to do the data transfer __device__ float distance(float x1, float x2) { return sqrt((x2 - x1)*(x2 - x1)); } __global__ void distanceKernel(float *d_out, float *d_in, float ref) { const int i = blockIdx.x * blockDim.x + threadIdx.x; const float x = d_in[i]; d_out[i] = distance(x, ref); printf("i = %2d: dist from %f to %f is %f.\n", i, ref, x, d_out[i]); } void distanceArray(float *out, float *in, float ref, int len) { //Create event variables for timing. hipEvent_t startMemcpy, stopMemcpy; hipEvent_t startKernel, stopKernel; hipEventCreate(&startMemcpy); hipEventCreate(&stopMemcpy); hipEventCreate(&startKernel); hipEventCreate(&stopKernel); float *d_in = 0; float *d_out = 0; checkCudaErrors(hipMalloc(&d_in, len * sizeof(float))); checkCudaErrors(hipMalloc(&d_out, len * sizeof(float))); // Record the event that "start the clock" on data transfer hipEventRecord(startMemcpy); //clock_t memcpyBegin = clock(); //Copy input data from host to device M times for (int i = 0; i < M; i++) { checkCudaErrors(hipMemcpy(d_in, in, len * sizeof(float), hipMemcpyHostToDevice)); } //Record the event that "stop the clock" on data transfer hipEventRecord(stopMemcpy); //clock_t memcpyEnd = clock(); //Record the event that "start the clock" on kernel execution hipEventRecord(startKernel); //clock_t kernelBegin = clock(); hipLaunchKernelGGL(( distanceKernel), dim3(len/TPB), dim3(TPB) , 0, 0, d_out, d_in, ref); //Record the event that "stop the clock" on kernel execution checkCudaErrors(hipPeekAtLastError()); checkCudaErrors(hipDeviceSynchronize()); hipEventRecord(stopKernel); //clock_t kernelEnd = clock(); checkCudaErrors(hipMemcpy(out, d_out, len * sizeof(float), hipMemcpyDeviceToHost)); ////Compute time in seconds between clock count readings //double memcpyTime = ((double)(memcpyEnd - memcpyBegin)) / CLOCKS_PER_SEC; //double kernelTime = ((double)(kernelEnd - kernelBegin)) / CLOCKS_PER_SEC; //Ensure timed events have stopped. hipEventSynchronize(stopMemcpy); hipEventSynchronize(stopKernel); //Cnovert event records to time and output. float memcpyTimeInMs = 0; hipEventElapsedTime(&memcpyTimeInMs, startMemcpy, stopMemcpy); float kernelTimeInMs = 0; hipEventElapsedTime(&kernelTimeInMs, startKernel, stopKernel); printf("Kernel time (ms): %f\n", kernelTimeInMs); printf("Data transfer time (ms):%f\n", memcpyTimeInMs); checkCudaErrors(hipFree(d_in)); checkCudaErrors(hipFree(d_out)); }
10ba78aa6ea2a8c8f3c38d80f223979539cc24a7.cu
#include "kernel.h" #include <helper_cuda.h> #include <cstdio> //#include <time.h> #define TPB 3200 #define M 100 //Number of times to do the data transfer __device__ float distance(float x1, float x2) { return sqrt((x2 - x1)*(x2 - x1)); } __global__ void distanceKernel(float *d_out, float *d_in, float ref) { const int i = blockIdx.x * blockDim.x + threadIdx.x; const float x = d_in[i]; d_out[i] = distance(x, ref); printf("i = %2d: dist from %f to %f is %f.\n", i, ref, x, d_out[i]); } void distanceArray(float *out, float *in, float ref, int len) { //Create event variables for timing. cudaEvent_t startMemcpy, stopMemcpy; cudaEvent_t startKernel, stopKernel; cudaEventCreate(&startMemcpy); cudaEventCreate(&stopMemcpy); cudaEventCreate(&startKernel); cudaEventCreate(&stopKernel); float *d_in = 0; float *d_out = 0; checkCudaErrors(cudaMalloc(&d_in, len * sizeof(float))); checkCudaErrors(cudaMalloc(&d_out, len * sizeof(float))); // Record the event that "start the clock" on data transfer cudaEventRecord(startMemcpy); //clock_t memcpyBegin = clock(); //Copy input data from host to device M times for (int i = 0; i < M; i++) { checkCudaErrors(cudaMemcpy(d_in, in, len * sizeof(float), cudaMemcpyHostToDevice)); } //Record the event that "stop the clock" on data transfer cudaEventRecord(stopMemcpy); //clock_t memcpyEnd = clock(); //Record the event that "start the clock" on kernel execution cudaEventRecord(startKernel); //clock_t kernelBegin = clock(); distanceKernel<<< len/TPB, TPB >>> (d_out, d_in, ref); //Record the event that "stop the clock" on kernel execution checkCudaErrors(cudaPeekAtLastError()); checkCudaErrors(cudaDeviceSynchronize()); cudaEventRecord(stopKernel); //clock_t kernelEnd = clock(); checkCudaErrors(cudaMemcpy(out, d_out, len * sizeof(float), cudaMemcpyDeviceToHost)); ////Compute time in seconds between clock count readings //double memcpyTime = ((double)(memcpyEnd - memcpyBegin)) / CLOCKS_PER_SEC; //double kernelTime = ((double)(kernelEnd - kernelBegin)) / CLOCKS_PER_SEC; //Ensure timed events have stopped. cudaEventSynchronize(stopMemcpy); cudaEventSynchronize(stopKernel); //Cnovert event records to time and output. float memcpyTimeInMs = 0; cudaEventElapsedTime(&memcpyTimeInMs, startMemcpy, stopMemcpy); float kernelTimeInMs = 0; cudaEventElapsedTime(&kernelTimeInMs, startKernel, stopKernel); printf("Kernel time (ms): %f\n", kernelTimeInMs); printf("Data transfer time (ms):%f\n", memcpyTimeInMs); checkCudaErrors(cudaFree(d_in)); checkCudaErrors(cudaFree(d_out)); }
fb2a31820985f6cca3e9acfd937eef74e7b1313c.hip
// !!! This is a file automatically generated by hipify!!! /* * ===================================================================================== * * Filename: thrust_sort_policy.cpp * * Description: Implementaion of thrust sort on gpu. * * Version: 1.0 * Created: 2016-06-15 12:42 * Revision: none * Compiler: nvcc * * Author: Michael Tierney (MT), tiernemi@tcd.ie * * ===================================================================================== */ #include <vector> #include <iostream> // Thrust. // #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/unique.h> #include <thrust/device_ptr.h> #include <thrust/tuple.h> #include <thrust/iterator/zip_iterator.h> #include <thrust/iterator/permutation_iterator.h> #include <thrust/sort.h> #include <thrust/execution_policy.h> #include <thrust/iterator/constant_iterator.h> #include <thrust/extrema.h> // Custom Headers // #include "../../inc/cpp_inc/thrust_sort_policy.hpp" #include "../../inc/cpp_inc/strided_iterators.hpp" typedef thrust::tuple<float,float,float> Tuple3f; typedef thrust::device_vector<float>::iterator DevVecIteratorf ; typedef thrust::device_vector<int>::iterator DevVecIteratori ; typedef thrust::tuple<thrust::device_vector<float>::iterator,thrust::device_vector<float>::iterator, thrust::device_vector<float>::iterator> TupleIt ; typedef thrust::zip_iterator<TupleIt> ZipIteratorTuple ; // This functor implements the dot product between 3d vectors struct calcDistance : public thrust::binary_function<Tuple3f,Tuple3f,float> { __host__ __device__ float operator()(const Tuple3f & a, const Tuple3f & b) const { float diff1 = thrust::get<0>(a) - thrust::get<0>(b) ; float diff2 = thrust::get<1>(a) - thrust::get<1>(b) ; float diff3 = thrust::get<2>(a) - thrust::get<2>(b) ; return diff1*diff1 + diff2*diff2 + diff3*diff3 ; } } ; /* * === MEMBER FUNCTION : ThrustSort ================================================== * Name: allocate * Arguments: const std::vector<Centroid> & centroids - Centroid data to allocate. * Returns: Pair of pointers to centroid position data and ids. * Description: Allocates position and id data on the GPU. * ===================================================================================== */ std::pair<float*,int*> ThrustSort::allocate(const std::vector<Centroid> & centroids) { std::pair<float*,int*> ptrs ; // Pre process triangle co-ordinates. // std::vector<float> cenCo(3*centroids.size()) ; std::vector<int> cenIds(centroids.size()) ; for (unsigned int i = 0 ; i < centroids.size() ; ++i) { cenIds[i] = centroids[i].getID() ; const float * coords = centroids[i].getCoords() ; cenCo[i] = coords[0] ; cenCo[i+centroids.size()] = coords[1] ; cenCo[i+2*centroids.size()] = coords[2] ; } hipMalloc((void**) &ptrs.first, sizeof(float)*cenCo.size()) ; hipMalloc((void**) &ptrs.second, sizeof(int)*cenIds.size()) ; hipMemcpy(ptrs.first, cenCo.data(), sizeof(float)*cenCo.size(), hipMemcpyHostToDevice) ; hipMemcpy(ptrs.second, cenIds.data(), sizeof(int)*cenIds.size(), hipMemcpyHostToDevice) ; return ptrs ; } /* * === MEMBER FUNCTION : ThrustSort ================================================== * Name: sort * Arguments: const Camera & camera, - Camera to sort relative to. * std::vector<int> & centroidIDsVec, - Array to write ids to. * int * centroidIDs - Array of centroid ids (GPU). * float * centroidPos - Array of centroid positions (GPU). * Description: Transforms centorid positions to distances and sorts these keys * and ids (values) using thrust sort by key. * ===================================================================================== */ void ThrustSort::sort(const Camera & camera, std::vector<int> & centroidIDsVec, int * centroidIDs, float * centroidPos) { const int numCentroids = centroidIDsVec.size() ; // Get the pointers of the x, y, z data for the triangles. // thrust::device_ptr<float> cenXPtrBegin = thrust::device_pointer_cast(centroidPos) ; thrust::device_ptr<float> cenYPtrBegin = cenXPtrBegin + numCentroids ; thrust::device_ptr<float> cenZPtrBegin = cenYPtrBegin + numCentroids ; thrust::device_ptr<float> cenXPtrEnd = cenXPtrBegin + numCentroids ; thrust::device_ptr<float> cenYPtrEnd = cenYPtrBegin + numCentroids ; thrust::device_ptr<float> cenZPtrEnd = cenZPtrBegin + numCentroids ; // Pre process camera co-ordinates. // std::vector<float> camCo(3) ; const float * coords = camera.getCoords() ; camCo[0] = coords[0] ; camCo[1] = coords[1] ; camCo[2] = coords[2] ; // Initialise device vectors. //` thrust::device_vector<float> devCamCo(camCo.begin(),camCo.end()) ; thrust::device_vector<float> devDists(numCentroids) ; thrust::device_ptr<int> devIDs = thrust::device_malloc<int>(numCentroids); thrust::copy(centroidIDs,centroidIDs+numCentroids,devIDs) ; thrust::device_ptr<float> camXPtrBegin = devCamCo.data() ; thrust::device_ptr<float> camYPtrBegin = devCamCo.data()+1 ; thrust::device_ptr<float> camZPtrBegin = devCamCo.data()+2 ; thrust::device_ptr<float> camXPtrEnd = devCamCo.data() + 1 ; thrust::device_ptr<float> camYPtrEnd = devCamCo.data() + 2 ; thrust::device_ptr<float> camZPtrEnd = devCamCo.data() + 3 ; // Zip the x...y...z vector into tuples of x,y,z // ZipIteratorTuple zipTriBegin = zip(cenXPtrBegin, cenYPtrBegin, cenZPtrBegin) ; ZipIteratorTuple zipTriEnd = zip(cenXPtrEnd, cenYPtrEnd, cenZPtrEnd); ZipIteratorTuple zipCamBegin = zip(camXPtrBegin, camYPtrBegin, camZPtrBegin) ; ZipIteratorTuple zipCamEnd = zip(camXPtrEnd, camYPtrEnd, camZPtrEnd); // Get the device pointers for the device cenangle ids and device distance vector. // thrust::device_ptr<float> devKeyPtr = devDists.data() ; thrust::device_ptr<int> devValPtr = devIDs ; // For each camera get distance and sort ids. // thrust::constant_iterator<Tuple3f> cam(*(zipCamBegin)) ; thrust::permutation_iterator<ZipIteratorTuple,DevVecIteratori> permIter(zipTriBegin,devValPtr) ; thrust::transform(thrust::device, permIter, permIter+numCentroids, cam, devDists.begin(), calcDistance()); thrust::sort_by_key(thrust::device, devKeyPtr,devKeyPtr+numCentroids,devValPtr) ; // GPU copy back to CPU. // thrust::copy(devValPtr,devValPtr+numCentroids,centroidIDsVec.begin()) ; thrust::device_free(devIDs) ; } /* * === MEMBER FUNCTION : ThrustSort ================================================== * Name: benchSort * Arguments: const Camera & camera, - Camera to sort relative to. * std::vector<int> & centroidIDsVec, - Array to write ids to. * int * centroidIDs - Array of centroid ids (GPU). * float * centroidPos - Array of centroid positions (GPU). * std::vector<float> & times - Vector used to store timings. * Description: Transforms centorid positions to distances and sorts these keys * and ids (values) using thrust sort by key. This version benchmarks * aswell. * ===================================================================================== */ void ThrustSort::benchSort(const Camera & camera, std::vector<int> & centroidIDsVec, int * centroidIDs, float * centroidPos, std::vector<float> & times) { hipEvent_t start, stop ; hipEventCreate(&start) ; hipEventCreate(&stop) ; const int numCentroids = centroidIDsVec.size() ; // Get the pointers of the x, y, z data for the triangles. // thrust::device_ptr<float> cenXPtrBegin = thrust::device_pointer_cast(centroidPos) ; thrust::device_ptr<float> cenYPtrBegin = cenXPtrBegin + numCentroids ; thrust::device_ptr<float> cenZPtrBegin = cenYPtrBegin + numCentroids ; thrust::device_ptr<float> cenXPtrEnd = cenXPtrBegin + numCentroids ; thrust::device_ptr<float> cenYPtrEnd = cenYPtrBegin + numCentroids ; thrust::device_ptr<float> cenZPtrEnd = cenZPtrBegin + numCentroids ; // Pre process camera co-ordinates. // std::vector<float> camCo(3) ; const float * coords = camera.getCoords() ; camCo[0] = coords[0] ; camCo[1] = coords[1] ; camCo[2] = coords[2] ; // Initialise device vectors. //` thrust::device_vector<float> devCamCo(camCo.begin(),camCo.end()) ; thrust::device_vector<float> devDists(numCentroids) ; thrust::device_ptr<int> devIDs = thrust::device_malloc<int>(numCentroids); thrust::copy(centroidIDs,centroidIDs+numCentroids,devIDs) ; thrust::device_ptr<float> camXPtrBegin = devCamCo.data() ; thrust::device_ptr<float> camYPtrBegin = devCamCo.data()+1 ; thrust::device_ptr<float> camZPtrBegin = devCamCo.data()+2 ; thrust::device_ptr<float> camXPtrEnd = devCamCo.data() + 1 ; thrust::device_ptr<float> camYPtrEnd = devCamCo.data() + 2 ; thrust::device_ptr<float> camZPtrEnd = devCamCo.data() + 3 ; // Zip the x...y...z vector into tuples of x,y,z // ZipIteratorTuple zipTriBegin = zip(cenXPtrBegin, cenYPtrBegin, cenZPtrBegin) ; ZipIteratorTuple zipTriEnd = zip(cenXPtrEnd, cenYPtrEnd, cenZPtrEnd); ZipIteratorTuple zipCamBegin = zip(camXPtrBegin, camYPtrBegin, camZPtrBegin) ; ZipIteratorTuple zipCamEnd = zip(camXPtrEnd, camYPtrEnd, camZPtrEnd); // Get the device pointers for the device cenangle ids and device distance vector. // thrust::device_ptr<float> devKeyPtr = devDists.data() ; thrust::device_ptr<int> devValPtr = devIDs ; // For each camera get distance and sort ids. // hipEventRecord(start, 0) ; thrust::constant_iterator<Tuple3f> cam(*(zipCamBegin)) ; thrust::permutation_iterator<ZipIteratorTuple,DevVecIteratori> permIter(zipTriBegin,devValPtr) ; thrust::transform(thrust::device, permIter, permIter+numCentroids, cam, devDists.begin(), calcDistance()); hipEventRecord(stop, 0) ; hipEventSynchronize(stop) ; float transformTime ; hipEventElapsedTime(&transformTime , start, stop) ; hipEventRecord(start, 0) ; thrust::sort_by_key(thrust::device, devKeyPtr,devKeyPtr+numCentroids,devValPtr) ; hipEventRecord(stop, 0) ; hipEventSynchronize(stop) ; float sortTime ; hipEventElapsedTime(&sortTime , start, stop) ; // GPU copy back to CPU. // hipEventRecord(start, 0) ; thrust::copy(devValPtr,devValPtr+numCentroids,centroidIDsVec.begin()) ; thrust::device_free(devIDs) ; hipEventRecord(stop, 0) ; hipEventSynchronize(stop) ; float copyTime ; hipEventElapsedTime(&copyTime , start, stop) ; times.push_back(sortTime/1E3) ; times.push_back((sortTime+transformTime)/1E3) ; times.push_back((sortTime+transformTime+copyTime)/1E3) ; } /* * === MEMBER FUNCTION : ThrustSort ================================================== * Name: deAllocate * Arguments: float * centroidPos - Centroid position location. * int * centroidIDs - Centroid ids location. * Description: Frees data sotred at pointers. * ===================================================================================== */ void ThrustSort::deAllocate(float * centroidPos, int * centroidIDs) { hipFree(centroidPos) ; hipFree(centroidIDs) ; }
fb2a31820985f6cca3e9acfd937eef74e7b1313c.cu
/* * ===================================================================================== * * Filename: thrust_sort_policy.cpp * * Description: Implementaion of thrust sort on gpu. * * Version: 1.0 * Created: 2016-06-15 12:42 * Revision: none * Compiler: nvcc * * Author: Michael Tierney (MT), tiernemi@tcd.ie * * ===================================================================================== */ #include <vector> #include <iostream> // Thrust. // #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/unique.h> #include <thrust/device_ptr.h> #include <thrust/tuple.h> #include <thrust/iterator/zip_iterator.h> #include <thrust/iterator/permutation_iterator.h> #include <thrust/sort.h> #include <thrust/execution_policy.h> #include <thrust/iterator/constant_iterator.h> #include <thrust/extrema.h> // Custom Headers // #include "../../inc/cpp_inc/thrust_sort_policy.hpp" #include "../../inc/cpp_inc/strided_iterators.hpp" typedef thrust::tuple<float,float,float> Tuple3f; typedef thrust::device_vector<float>::iterator DevVecIteratorf ; typedef thrust::device_vector<int>::iterator DevVecIteratori ; typedef thrust::tuple<thrust::device_vector<float>::iterator,thrust::device_vector<float>::iterator, thrust::device_vector<float>::iterator> TupleIt ; typedef thrust::zip_iterator<TupleIt> ZipIteratorTuple ; // This functor implements the dot product between 3d vectors struct calcDistance : public thrust::binary_function<Tuple3f,Tuple3f,float> { __host__ __device__ float operator()(const Tuple3f & a, const Tuple3f & b) const { float diff1 = thrust::get<0>(a) - thrust::get<0>(b) ; float diff2 = thrust::get<1>(a) - thrust::get<1>(b) ; float diff3 = thrust::get<2>(a) - thrust::get<2>(b) ; return diff1*diff1 + diff2*diff2 + diff3*diff3 ; } } ; /* * === MEMBER FUNCTION : ThrustSort ================================================== * Name: allocate * Arguments: const std::vector<Centroid> & centroids - Centroid data to allocate. * Returns: Pair of pointers to centroid position data and ids. * Description: Allocates position and id data on the GPU. * ===================================================================================== */ std::pair<float*,int*> ThrustSort::allocate(const std::vector<Centroid> & centroids) { std::pair<float*,int*> ptrs ; // Pre process triangle co-ordinates. // std::vector<float> cenCo(3*centroids.size()) ; std::vector<int> cenIds(centroids.size()) ; for (unsigned int i = 0 ; i < centroids.size() ; ++i) { cenIds[i] = centroids[i].getID() ; const float * coords = centroids[i].getCoords() ; cenCo[i] = coords[0] ; cenCo[i+centroids.size()] = coords[1] ; cenCo[i+2*centroids.size()] = coords[2] ; } cudaMalloc((void**) &ptrs.first, sizeof(float)*cenCo.size()) ; cudaMalloc((void**) &ptrs.second, sizeof(int)*cenIds.size()) ; cudaMemcpy(ptrs.first, cenCo.data(), sizeof(float)*cenCo.size(), cudaMemcpyHostToDevice) ; cudaMemcpy(ptrs.second, cenIds.data(), sizeof(int)*cenIds.size(), cudaMemcpyHostToDevice) ; return ptrs ; } /* * === MEMBER FUNCTION : ThrustSort ================================================== * Name: sort * Arguments: const Camera & camera, - Camera to sort relative to. * std::vector<int> & centroidIDsVec, - Array to write ids to. * int * centroidIDs - Array of centroid ids (GPU). * float * centroidPos - Array of centroid positions (GPU). * Description: Transforms centorid positions to distances and sorts these keys * and ids (values) using thrust sort by key. * ===================================================================================== */ void ThrustSort::sort(const Camera & camera, std::vector<int> & centroidIDsVec, int * centroidIDs, float * centroidPos) { const int numCentroids = centroidIDsVec.size() ; // Get the pointers of the x, y, z data for the triangles. // thrust::device_ptr<float> cenXPtrBegin = thrust::device_pointer_cast(centroidPos) ; thrust::device_ptr<float> cenYPtrBegin = cenXPtrBegin + numCentroids ; thrust::device_ptr<float> cenZPtrBegin = cenYPtrBegin + numCentroids ; thrust::device_ptr<float> cenXPtrEnd = cenXPtrBegin + numCentroids ; thrust::device_ptr<float> cenYPtrEnd = cenYPtrBegin + numCentroids ; thrust::device_ptr<float> cenZPtrEnd = cenZPtrBegin + numCentroids ; // Pre process camera co-ordinates. // std::vector<float> camCo(3) ; const float * coords = camera.getCoords() ; camCo[0] = coords[0] ; camCo[1] = coords[1] ; camCo[2] = coords[2] ; // Initialise device vectors. //` thrust::device_vector<float> devCamCo(camCo.begin(),camCo.end()) ; thrust::device_vector<float> devDists(numCentroids) ; thrust::device_ptr<int> devIDs = thrust::device_malloc<int>(numCentroids); thrust::copy(centroidIDs,centroidIDs+numCentroids,devIDs) ; thrust::device_ptr<float> camXPtrBegin = devCamCo.data() ; thrust::device_ptr<float> camYPtrBegin = devCamCo.data()+1 ; thrust::device_ptr<float> camZPtrBegin = devCamCo.data()+2 ; thrust::device_ptr<float> camXPtrEnd = devCamCo.data() + 1 ; thrust::device_ptr<float> camYPtrEnd = devCamCo.data() + 2 ; thrust::device_ptr<float> camZPtrEnd = devCamCo.data() + 3 ; // Zip the x...y...z vector into tuples of x,y,z // ZipIteratorTuple zipTriBegin = zip(cenXPtrBegin, cenYPtrBegin, cenZPtrBegin) ; ZipIteratorTuple zipTriEnd = zip(cenXPtrEnd, cenYPtrEnd, cenZPtrEnd); ZipIteratorTuple zipCamBegin = zip(camXPtrBegin, camYPtrBegin, camZPtrBegin) ; ZipIteratorTuple zipCamEnd = zip(camXPtrEnd, camYPtrEnd, camZPtrEnd); // Get the device pointers for the device cenangle ids and device distance vector. // thrust::device_ptr<float> devKeyPtr = devDists.data() ; thrust::device_ptr<int> devValPtr = devIDs ; // For each camera get distance and sort ids. // thrust::constant_iterator<Tuple3f> cam(*(zipCamBegin)) ; thrust::permutation_iterator<ZipIteratorTuple,DevVecIteratori> permIter(zipTriBegin,devValPtr) ; thrust::transform(thrust::device, permIter, permIter+numCentroids, cam, devDists.begin(), calcDistance()); thrust::sort_by_key(thrust::device, devKeyPtr,devKeyPtr+numCentroids,devValPtr) ; // GPU copy back to CPU. // thrust::copy(devValPtr,devValPtr+numCentroids,centroidIDsVec.begin()) ; thrust::device_free(devIDs) ; } /* * === MEMBER FUNCTION : ThrustSort ================================================== * Name: benchSort * Arguments: const Camera & camera, - Camera to sort relative to. * std::vector<int> & centroidIDsVec, - Array to write ids to. * int * centroidIDs - Array of centroid ids (GPU). * float * centroidPos - Array of centroid positions (GPU). * std::vector<float> & times - Vector used to store timings. * Description: Transforms centorid positions to distances and sorts these keys * and ids (values) using thrust sort by key. This version benchmarks * aswell. * ===================================================================================== */ void ThrustSort::benchSort(const Camera & camera, std::vector<int> & centroidIDsVec, int * centroidIDs, float * centroidPos, std::vector<float> & times) { cudaEvent_t start, stop ; cudaEventCreate(&start) ; cudaEventCreate(&stop) ; const int numCentroids = centroidIDsVec.size() ; // Get the pointers of the x, y, z data for the triangles. // thrust::device_ptr<float> cenXPtrBegin = thrust::device_pointer_cast(centroidPos) ; thrust::device_ptr<float> cenYPtrBegin = cenXPtrBegin + numCentroids ; thrust::device_ptr<float> cenZPtrBegin = cenYPtrBegin + numCentroids ; thrust::device_ptr<float> cenXPtrEnd = cenXPtrBegin + numCentroids ; thrust::device_ptr<float> cenYPtrEnd = cenYPtrBegin + numCentroids ; thrust::device_ptr<float> cenZPtrEnd = cenZPtrBegin + numCentroids ; // Pre process camera co-ordinates. // std::vector<float> camCo(3) ; const float * coords = camera.getCoords() ; camCo[0] = coords[0] ; camCo[1] = coords[1] ; camCo[2] = coords[2] ; // Initialise device vectors. //` thrust::device_vector<float> devCamCo(camCo.begin(),camCo.end()) ; thrust::device_vector<float> devDists(numCentroids) ; thrust::device_ptr<int> devIDs = thrust::device_malloc<int>(numCentroids); thrust::copy(centroidIDs,centroidIDs+numCentroids,devIDs) ; thrust::device_ptr<float> camXPtrBegin = devCamCo.data() ; thrust::device_ptr<float> camYPtrBegin = devCamCo.data()+1 ; thrust::device_ptr<float> camZPtrBegin = devCamCo.data()+2 ; thrust::device_ptr<float> camXPtrEnd = devCamCo.data() + 1 ; thrust::device_ptr<float> camYPtrEnd = devCamCo.data() + 2 ; thrust::device_ptr<float> camZPtrEnd = devCamCo.data() + 3 ; // Zip the x...y...z vector into tuples of x,y,z // ZipIteratorTuple zipTriBegin = zip(cenXPtrBegin, cenYPtrBegin, cenZPtrBegin) ; ZipIteratorTuple zipTriEnd = zip(cenXPtrEnd, cenYPtrEnd, cenZPtrEnd); ZipIteratorTuple zipCamBegin = zip(camXPtrBegin, camYPtrBegin, camZPtrBegin) ; ZipIteratorTuple zipCamEnd = zip(camXPtrEnd, camYPtrEnd, camZPtrEnd); // Get the device pointers for the device cenangle ids and device distance vector. // thrust::device_ptr<float> devKeyPtr = devDists.data() ; thrust::device_ptr<int> devValPtr = devIDs ; // For each camera get distance and sort ids. // cudaEventRecord(start, 0) ; thrust::constant_iterator<Tuple3f> cam(*(zipCamBegin)) ; thrust::permutation_iterator<ZipIteratorTuple,DevVecIteratori> permIter(zipTriBegin,devValPtr) ; thrust::transform(thrust::device, permIter, permIter+numCentroids, cam, devDists.begin(), calcDistance()); cudaEventRecord(stop, 0) ; cudaEventSynchronize(stop) ; float transformTime ; cudaEventElapsedTime(&transformTime , start, stop) ; cudaEventRecord(start, 0) ; thrust::sort_by_key(thrust::device, devKeyPtr,devKeyPtr+numCentroids,devValPtr) ; cudaEventRecord(stop, 0) ; cudaEventSynchronize(stop) ; float sortTime ; cudaEventElapsedTime(&sortTime , start, stop) ; // GPU copy back to CPU. // cudaEventRecord(start, 0) ; thrust::copy(devValPtr,devValPtr+numCentroids,centroidIDsVec.begin()) ; thrust::device_free(devIDs) ; cudaEventRecord(stop, 0) ; cudaEventSynchronize(stop) ; float copyTime ; cudaEventElapsedTime(&copyTime , start, stop) ; times.push_back(sortTime/1E3) ; times.push_back((sortTime+transformTime)/1E3) ; times.push_back((sortTime+transformTime+copyTime)/1E3) ; } /* * === MEMBER FUNCTION : ThrustSort ================================================== * Name: deAllocate * Arguments: float * centroidPos - Centroid position location. * int * centroidIDs - Centroid ids location. * Description: Frees data sotred at pointers. * ===================================================================================== */ void ThrustSort::deAllocate(float * centroidPos, int * centroidIDs) { cudaFree(centroidPos) ; cudaFree(centroidIDs) ; }
d758615e0cef4959c909f42ff0c73d389ac4c3d7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void kMultDiagonal(float* mat, float* vec, float* tgtMat, unsigned int width) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < width; i += numThreads) { tgtMat[width*i + i] = mat[width*i + i] * vec[i]; } }
d758615e0cef4959c909f42ff0c73d389ac4c3d7.cu
#include "includes.h" __global__ void kMultDiagonal(float* mat, float* vec, float* tgtMat, unsigned int width) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < width; i += numThreads) { tgtMat[width*i + i] = mat[width*i + i] * vec[i]; } }
8acc7377374f5968804fcec7ad086e2de58a346f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // -------------------------------------------------------- // r-fcn align // Written by hw, 2018. // -------------------------------------------------------- #include <algorithm> #include <cfloat> #include <vector> #include "caffe/layers/psroi_align_layer.hpp" #include "caffe/util/gpu_util.cuh" using std::max; using std::min; namespace caffe { template <typename Dtype> __global__ void PSROIRoIAlignForward( const int nthreads, const Dtype* bottom_data, const Dtype spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int sampling_ratio, const Dtype* bottom_rois, const int output_dim, const int group_size, Dtype* top_data, int* mapping_channel) { CUDA_KERNEL_LOOP(index, nthreads) { // The output is in order (n, ctop, ph, pw) int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int ctop = (index / pooled_width / pooled_height) % output_dim; int n = index / pooled_width / pooled_height / output_dim; // [start, end) interval for spatial sampling const Dtype* offset_bottom_rois = bottom_rois + n * 5; int roi_batch_ind = offset_bottom_rois[0]; Dtype roi_start_w = offset_bottom_rois[1] * spatial_scale; Dtype roi_start_h = offset_bottom_rois[2] * spatial_scale; Dtype roi_end_w = offset_bottom_rois[3] * spatial_scale; Dtype roi_end_h = offset_bottom_rois[4] * spatial_scale; // Force too small ROIs to be 1x1 Dtype roi_width = max(roi_end_w - roi_start_w, (Dtype)1.); Dtype roi_height = max(roi_end_h - roi_start_h, (Dtype)1.); Dtype bin_size_h = static_cast<Dtype>(roi_height) / static_cast<Dtype>(pooled_height); Dtype bin_size_w = static_cast<Dtype>(roi_width) / static_cast<Dtype>(pooled_width); int gw = pw; int gh = ph; int c = (ctop*group_size + gh)*group_size + gw; const Dtype* offset_bottom_data = bottom_data + (roi_batch_ind * channels + c) * height * width; int roi_bin_grid_h = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pooled_height); // e.g., = 2 int roi_bin_grid_w = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width); const Dtype count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4 Dtype output_val = 0.; for (int iy = 0; iy < roi_bin_grid_h; iy++) // e.g., iy = 0, 1 { const Dtype y = roi_start_h + ph * bin_size_h + static_cast<Dtype>(iy + .5f) * bin_size_h / static_cast<Dtype>(roi_bin_grid_h); // e.g., 0.5, 1.5 for (int ix = 0; ix < roi_bin_grid_w; ix++) { const Dtype x = roi_start_w + pw * bin_size_w + static_cast<Dtype>(ix + .5f) * bin_size_w / static_cast<Dtype>(roi_bin_grid_w); Dtype val = bilinear_interpolate( offset_bottom_data, height, width, y, x, index); output_val += val; } } output_val /= count; top_data[index] = output_val; mapping_channel[index] = c; } } template <typename Dtype> void PSROIAlignLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* bottom_rois = bottom[1]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); int* mapping_channel_ptr = mapping_channel_.mutable_gpu_data(); int count = top[0]->count(); caffe_gpu_set(count, Dtype(0), top_data); caffe_gpu_set(count, -1, mapping_channel_ptr); // NOLINT_NEXT_LINE(whitespace/operators) PSROIRoIAlignForward<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> > (count, bottom_data, spatial_scale_, channels_, height_, width_, pooled_height_, pooled_width_, sampling_ratio_, bottom_rois, output_dim_, group_size_, top_data, mapping_channel_ptr); CUDA_POST_KERNEL_CHECK; } template <typename Dtype> __global__ void PSROIAlignBackwardAtomic( const int nthreads, const Dtype* top_diff, const int* mapping_channel, const int num_rois, const Dtype spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int output_dim, const int sampling_ratio, Dtype* bottom_diff, const Dtype* bottom_rois) { CUDA_KERNEL_LOOP(index, nthreads) { // The output is in order (n, ctop, ph, pw) int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int n = index / pooled_width / pooled_height / output_dim; // [start, end) interval for spatial sampling const Dtype* offset_bottom_rois = bottom_rois + n * 5; int roi_batch_ind = offset_bottom_rois[0]; // Do not using rounding; this implementation detail is critical Dtype roi_start_w = offset_bottom_rois[1] * spatial_scale; Dtype roi_start_h = offset_bottom_rois[2] * spatial_scale; Dtype roi_end_w = offset_bottom_rois[3] * spatial_scale; Dtype roi_end_h = offset_bottom_rois[4] * spatial_scale; // Force too small ROIs to be 1x1 Dtype roi_width = max(roi_end_w - roi_start_w, (Dtype)1.); Dtype roi_height = max(roi_end_h - roi_start_h, (Dtype)1.); Dtype bin_size_h = static_cast<Dtype>(roi_height) / static_cast<Dtype>(pooled_height); Dtype bin_size_w = static_cast<Dtype>(roi_width) / static_cast<Dtype>(pooled_width); int c = mapping_channel[index]; Dtype* offset_bottom_diff = bottom_diff + (roi_batch_ind * channels + c) * height * width; const Dtype top_diff_this_bin = top_diff[index]; //int top_offset = (n * channels + c) * pooled_height * pooled_width; //const Dtype* offset_top_diff = top_diff + top_offset; //const Dtype top_diff_this_bin = offset_top_diff[ph * pooled_width + pw]; // We use roi_bin_grid to sample the grid and mimic integral int roi_bin_grid_h = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pooled_height); // e.g., = 2 int roi_bin_grid_w = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width); // Compute c at bottom const Dtype count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4 for (int iy = 0; iy < roi_bin_grid_h; iy++) // e.g., iy = 0, 1 { const Dtype y = roi_start_h + ph * bin_size_h + static_cast<Dtype>(iy + .5f) * bin_size_h / static_cast<Dtype>(roi_bin_grid_h); // e.g., 0.5, 1.5 for (int ix = 0; ix < roi_bin_grid_w; ix++) { const Dtype x = roi_start_w + pw * bin_size_w + static_cast<Dtype>(ix + .5f) * bin_size_w / static_cast<Dtype>(roi_bin_grid_w); Dtype w1, w2, w3, w4; int x_low, x_high, y_low, y_high; bilinear_interpolate_gradient( height, width, y, x, w1, w2, w3, w4, x_low, x_high, y_low, y_high, index); Dtype g1 = top_diff_this_bin * w1 / count; Dtype g2 = top_diff_this_bin * w2 / count; Dtype g3 = top_diff_this_bin * w3 / count; Dtype g4 = top_diff_this_bin * w4 / count; if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) { caffe_gpu_atomic_add( static_cast<Dtype>(g1), offset_bottom_diff + y_low * width + x_low); caffe_gpu_atomic_add( static_cast<Dtype>(g2), offset_bottom_diff + y_low * width + x_high); caffe_gpu_atomic_add( static_cast<Dtype>(g3), offset_bottom_diff + y_high * width + x_low); caffe_gpu_atomic_add( static_cast<Dtype>(g4), offset_bottom_diff + y_high * width + x_high); } // if } // ix } // iy } } template <typename Dtype> void PSROIAlignLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (!propagate_down[0]) { return; } const Dtype* bottom_rois = bottom[1]->gpu_data(); const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const int bottom_count = bottom[0]->count(); const int* mapping_channel_ptr = mapping_channel_.gpu_data(); caffe_gpu_set(bottom[1]->count(), Dtype(0), bottom[1]->mutable_gpu_diff()); caffe_gpu_set(bottom_count, Dtype(0), bottom_diff); const int count = top[0]->count(); // NOLINT_NEXT_LINE(whitespace/operators) PSROIAlignBackwardAtomic<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >(count, top_diff, mapping_channel_ptr, top[0]->num(), spatial_scale_, channels_, height_, width_, pooled_height_, pooled_width_, output_dim_, sampling_ratio_,bottom_diff, bottom_rois); CUDA_POST_KERNEL_CHECK; } INSTANTIATE_LAYER_GPU_FUNCS(PSROIAlignLayer); } // namespace caffe
8acc7377374f5968804fcec7ad086e2de58a346f.cu
// -------------------------------------------------------- // r-fcn align // Written by hw, 2018. // -------------------------------------------------------- #include <algorithm> #include <cfloat> #include <vector> #include "caffe/layers/psroi_align_layer.hpp" #include "caffe/util/gpu_util.cuh" using std::max; using std::min; namespace caffe { template <typename Dtype> __global__ void PSROIRoIAlignForward( const int nthreads, const Dtype* bottom_data, const Dtype spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int sampling_ratio, const Dtype* bottom_rois, const int output_dim, const int group_size, Dtype* top_data, int* mapping_channel) { CUDA_KERNEL_LOOP(index, nthreads) { // The output is in order (n, ctop, ph, pw) int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int ctop = (index / pooled_width / pooled_height) % output_dim; int n = index / pooled_width / pooled_height / output_dim; // [start, end) interval for spatial sampling const Dtype* offset_bottom_rois = bottom_rois + n * 5; int roi_batch_ind = offset_bottom_rois[0]; Dtype roi_start_w = offset_bottom_rois[1] * spatial_scale; Dtype roi_start_h = offset_bottom_rois[2] * spatial_scale; Dtype roi_end_w = offset_bottom_rois[3] * spatial_scale; Dtype roi_end_h = offset_bottom_rois[4] * spatial_scale; // Force too small ROIs to be 1x1 Dtype roi_width = max(roi_end_w - roi_start_w, (Dtype)1.); Dtype roi_height = max(roi_end_h - roi_start_h, (Dtype)1.); Dtype bin_size_h = static_cast<Dtype>(roi_height) / static_cast<Dtype>(pooled_height); Dtype bin_size_w = static_cast<Dtype>(roi_width) / static_cast<Dtype>(pooled_width); int gw = pw; int gh = ph; int c = (ctop*group_size + gh)*group_size + gw; const Dtype* offset_bottom_data = bottom_data + (roi_batch_ind * channels + c) * height * width; int roi_bin_grid_h = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pooled_height); // e.g., = 2 int roi_bin_grid_w = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width); const Dtype count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4 Dtype output_val = 0.; for (int iy = 0; iy < roi_bin_grid_h; iy++) // e.g., iy = 0, 1 { const Dtype y = roi_start_h + ph * bin_size_h + static_cast<Dtype>(iy + .5f) * bin_size_h / static_cast<Dtype>(roi_bin_grid_h); // e.g., 0.5, 1.5 for (int ix = 0; ix < roi_bin_grid_w; ix++) { const Dtype x = roi_start_w + pw * bin_size_w + static_cast<Dtype>(ix + .5f) * bin_size_w / static_cast<Dtype>(roi_bin_grid_w); Dtype val = bilinear_interpolate( offset_bottom_data, height, width, y, x, index); output_val += val; } } output_val /= count; top_data[index] = output_val; mapping_channel[index] = c; } } template <typename Dtype> void PSROIAlignLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* bottom_rois = bottom[1]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); int* mapping_channel_ptr = mapping_channel_.mutable_gpu_data(); int count = top[0]->count(); caffe_gpu_set(count, Dtype(0), top_data); caffe_gpu_set(count, -1, mapping_channel_ptr); // NOLINT_NEXT_LINE(whitespace/operators) PSROIRoIAlignForward<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> > (count, bottom_data, spatial_scale_, channels_, height_, width_, pooled_height_, pooled_width_, sampling_ratio_, bottom_rois, output_dim_, group_size_, top_data, mapping_channel_ptr); CUDA_POST_KERNEL_CHECK; } template <typename Dtype> __global__ void PSROIAlignBackwardAtomic( const int nthreads, const Dtype* top_diff, const int* mapping_channel, const int num_rois, const Dtype spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int output_dim, const int sampling_ratio, Dtype* bottom_diff, const Dtype* bottom_rois) { CUDA_KERNEL_LOOP(index, nthreads) { // The output is in order (n, ctop, ph, pw) int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int n = index / pooled_width / pooled_height / output_dim; // [start, end) interval for spatial sampling const Dtype* offset_bottom_rois = bottom_rois + n * 5; int roi_batch_ind = offset_bottom_rois[0]; // Do not using rounding; this implementation detail is critical Dtype roi_start_w = offset_bottom_rois[1] * spatial_scale; Dtype roi_start_h = offset_bottom_rois[2] * spatial_scale; Dtype roi_end_w = offset_bottom_rois[3] * spatial_scale; Dtype roi_end_h = offset_bottom_rois[4] * spatial_scale; // Force too small ROIs to be 1x1 Dtype roi_width = max(roi_end_w - roi_start_w, (Dtype)1.); Dtype roi_height = max(roi_end_h - roi_start_h, (Dtype)1.); Dtype bin_size_h = static_cast<Dtype>(roi_height) / static_cast<Dtype>(pooled_height); Dtype bin_size_w = static_cast<Dtype>(roi_width) / static_cast<Dtype>(pooled_width); int c = mapping_channel[index]; Dtype* offset_bottom_diff = bottom_diff + (roi_batch_ind * channels + c) * height * width; const Dtype top_diff_this_bin = top_diff[index]; //int top_offset = (n * channels + c) * pooled_height * pooled_width; //const Dtype* offset_top_diff = top_diff + top_offset; //const Dtype top_diff_this_bin = offset_top_diff[ph * pooled_width + pw]; // We use roi_bin_grid to sample the grid and mimic integral int roi_bin_grid_h = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pooled_height); // e.g., = 2 int roi_bin_grid_w = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width); // Compute c at bottom const Dtype count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4 for (int iy = 0; iy < roi_bin_grid_h; iy++) // e.g., iy = 0, 1 { const Dtype y = roi_start_h + ph * bin_size_h + static_cast<Dtype>(iy + .5f) * bin_size_h / static_cast<Dtype>(roi_bin_grid_h); // e.g., 0.5, 1.5 for (int ix = 0; ix < roi_bin_grid_w; ix++) { const Dtype x = roi_start_w + pw * bin_size_w + static_cast<Dtype>(ix + .5f) * bin_size_w / static_cast<Dtype>(roi_bin_grid_w); Dtype w1, w2, w3, w4; int x_low, x_high, y_low, y_high; bilinear_interpolate_gradient( height, width, y, x, w1, w2, w3, w4, x_low, x_high, y_low, y_high, index); Dtype g1 = top_diff_this_bin * w1 / count; Dtype g2 = top_diff_this_bin * w2 / count; Dtype g3 = top_diff_this_bin * w3 / count; Dtype g4 = top_diff_this_bin * w4 / count; if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) { caffe_gpu_atomic_add( static_cast<Dtype>(g1), offset_bottom_diff + y_low * width + x_low); caffe_gpu_atomic_add( static_cast<Dtype>(g2), offset_bottom_diff + y_low * width + x_high); caffe_gpu_atomic_add( static_cast<Dtype>(g3), offset_bottom_diff + y_high * width + x_low); caffe_gpu_atomic_add( static_cast<Dtype>(g4), offset_bottom_diff + y_high * width + x_high); } // if } // ix } // iy } } template <typename Dtype> void PSROIAlignLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (!propagate_down[0]) { return; } const Dtype* bottom_rois = bottom[1]->gpu_data(); const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const int bottom_count = bottom[0]->count(); const int* mapping_channel_ptr = mapping_channel_.gpu_data(); caffe_gpu_set(bottom[1]->count(), Dtype(0), bottom[1]->mutable_gpu_diff()); caffe_gpu_set(bottom_count, Dtype(0), bottom_diff); const int count = top[0]->count(); // NOLINT_NEXT_LINE(whitespace/operators) PSROIAlignBackwardAtomic<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >(count, top_diff, mapping_channel_ptr, top[0]->num(), spatial_scale_, channels_, height_, width_, pooled_height_, pooled_width_, output_dim_, sampling_ratio_,bottom_diff, bottom_rois); CUDA_POST_KERNEL_CHECK; } INSTANTIATE_LAYER_GPU_FUNCS(PSROIAlignLayer); } // namespace caffe
76743c1d7c8f0c93bcbde36c205bb21ccb43d4e9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. #include <float.h> #include <math.h> #include <thrust/tuple.h> #include <torch/extension.h> #include <cstdio> #include <tuple> #include "float_math.cuh" #include "geometry_utils.cuh" #include "rasterize_points/bitmask.cuh" #include "rasterize_points/rasterization_utils.cuh" namespace { // A structure for holding details about a pixel. struct Pixel { float z; int64_t idx; float dist; float3 bary; }; __device__ bool operator<(const Pixel& a, const Pixel& b) { return a.z < b.z; } __device__ float FloatMin3(const float p1, const float p2, const float p3) { return fminf(p1, fminf(p2, p3)); } __device__ float FloatMax3(const float p1, const float p2, const float p3) { return fmaxf(p1, fmaxf(p2, p3)); } // Get the xyz coordinates of the three vertices for the face given by the // index face_idx into face_verts. __device__ thrust::tuple<float3, float3, float3> GetSingleFaceVerts( const float* face_verts, int face_idx) { const float x0 = face_verts[face_idx * 9 + 0]; const float y0 = face_verts[face_idx * 9 + 1]; const float z0 = face_verts[face_idx * 9 + 2]; const float x1 = face_verts[face_idx * 9 + 3]; const float y1 = face_verts[face_idx * 9 + 4]; const float z1 = face_verts[face_idx * 9 + 5]; const float x2 = face_verts[face_idx * 9 + 6]; const float y2 = face_verts[face_idx * 9 + 7]; const float z2 = face_verts[face_idx * 9 + 8]; const float3 v0xyz = make_float3(x0, y0, z0); const float3 v1xyz = make_float3(x1, y1, z1); const float3 v2xyz = make_float3(x2, y2, z2); return thrust::make_tuple(v0xyz, v1xyz, v2xyz); } // Get the min/max x/y/z values for the face given by vertices v0, v1, v2. __device__ thrust::tuple<float2, float2, float2> GetFaceBoundingBox(float3 v0, float3 v1, float3 v2) { const float xmin = FloatMin3(v0.x, v1.x, v2.x); const float ymin = FloatMin3(v0.y, v1.y, v2.y); const float zmin = FloatMin3(v0.z, v1.z, v2.z); const float xmax = FloatMax3(v0.x, v1.x, v2.x); const float ymax = FloatMax3(v0.y, v1.y, v2.y); const float zmax = FloatMax3(v0.z, v1.z, v2.z); return thrust::make_tuple( make_float2(xmin, xmax), make_float2(ymin, ymax), make_float2(zmin, zmax)); } // Check if the point (px, py) lies outside the face bounding box face_bbox. // Return true if the point is outside. __device__ bool CheckPointOutsideBoundingBox( float3 v0, float3 v1, float3 v2, float blur_radius, float2 pxy) { const auto bbox = GetFaceBoundingBox(v0, v1, v2); const float2 xlims = thrust::get<0>(bbox); const float2 ylims = thrust::get<1>(bbox); const float2 zlims = thrust::get<2>(bbox); const float x_min = xlims.x - blur_radius; const float y_min = ylims.x - blur_radius; const float x_max = xlims.y + blur_radius; const float y_max = ylims.y + blur_radius; // Check if the current point is oustside the triangle bounding box. return (pxy.x > x_max || pxy.x < x_min || pxy.y > y_max || pxy.y < y_min); } // This function checks if a pixel given by xy location pxy lies within the // face with index face_idx in face_verts. One of the inputs is a list (q) // which contains Pixel structs with the indices of the faces which intersect // with this pixel sorted by closest z distance. If the point pxy lies in the // face, the list (q) is updated and re-orderered in place. In addition // the auxillary variables q_size, q_max_z and q_max_idx are also modified. // This code is shared between RasterizeMeshesNaiveCudaKernel and // RasterizeMeshesFineCudaKernel. template <typename FaceQ> __device__ void CheckPixelInsideFace( const float* face_verts, // (F, 3, 3) const int face_idx, int& q_size, float& q_max_z, int& q_max_idx, FaceQ& q, const float blur_radius, const float2 pxy, // Coordinates of the pixel const int K, const bool perspective_correct) { const auto v012 = GetSingleFaceVerts(face_verts, face_idx); const float3 v0 = thrust::get<0>(v012); const float3 v1 = thrust::get<1>(v012); const float3 v2 = thrust::get<2>(v012); // Only need xy for barycentric coordinates and distance calculations. const float2 v0xy = make_float2(v0.x, v0.y); const float2 v1xy = make_float2(v1.x, v1.y); const float2 v2xy = make_float2(v2.x, v2.y); // Perform checks and skip if: // 1. the face is behind the camera // 2. the face has very small face area // 3. the pixel is outside the face bbox const float zmax = FloatMax3(v0.z, v1.z, v2.z); const bool outside_bbox = CheckPointOutsideBoundingBox( v0, v1, v2, sqrt(blur_radius), pxy); // use sqrt of blur for bbox const float face_area = EdgeFunctionForward(v0xy, v1xy, v2xy); const bool zero_face_area = (face_area <= kEpsilon && face_area >= -1.0f * kEpsilon); if (zmax < 0 || outside_bbox || zero_face_area) { return; } // Calculate barycentric coords and euclidean dist to triangle. const float3 p_bary0 = BarycentricCoordsForward(pxy, v0xy, v1xy, v2xy); const float3 p_bary = !perspective_correct ? p_bary0 : BarycentricPerspectiveCorrectionForward(p_bary0, v0.z, v1.z, v2.z); const float pz = p_bary.x * v0.z + p_bary.y * v1.z + p_bary.z * v2.z; if (pz < 0) { return; // Face is behind the image plane. } // Get abs squared distance const float dist = PointTriangleDistanceForward(pxy, v0xy, v1xy, v2xy); // Use the bary coordinates to determine if the point is inside the face. const bool inside = p_bary.x > 0.0f && p_bary.y > 0.0f && p_bary.z > 0.0f; const float signed_dist = inside ? -dist : dist; // Check if pixel is outside blur region if (!inside && dist >= blur_radius) { return; } if (q_size < K) { // Just insert it. q[q_size] = {pz, face_idx, signed_dist, p_bary}; if (pz > q_max_z) { q_max_z = pz; q_max_idx = q_size; } q_size++; } else if (pz < q_max_z) { // Overwrite the old max, and find the new max. q[q_max_idx] = {pz, face_idx, signed_dist, p_bary}; q_max_z = pz; for (int i = 0; i < K; i++) { if (q[i].z > q_max_z) { q_max_z = q[i].z; q_max_idx = i; } } } } } // namespace // **************************************************************************** // * NAIVE RASTERIZATION * // **************************************************************************** __global__ void RasterizeMeshesNaiveCudaKernel( const float* face_verts, const int64_t* mesh_to_face_first_idx, const int64_t* num_faces_per_mesh, const float blur_radius, const bool perspective_correct, const int N, const int H, const int W, const int K, int64_t* face_idxs, float* zbuf, float* pix_dists, float* bary) { // Simple version: One thread per output pixel int num_threads = gridDim.x * blockDim.x; int tid = blockDim.x * blockIdx.x + threadIdx.x; for (int i = tid; i < N * H * W; i += num_threads) { // Convert linear index to 3D index const int n = i / (H * W); // batch index. const int pix_idx = i % (H * W); // Reverse ordering of X and Y axes const int yi = H - 1 - pix_idx / W; const int xi = W - 1 - pix_idx % W; // screen coordinates to ndc coordiantes of pixel. const float xf = PixToNdc(xi, W); const float yf = PixToNdc(yi, H); const float2 pxy = make_float2(xf, yf); // For keeping track of the K closest points we want a data structure // that (1) gives O(1) access to the closest point for easy comparisons, // and (2) allows insertion of new elements. In the CPU version we use // std::priority_queue; then (2) is O(log K). We can't use STL // containers in CUDA; we could roll our own max heap in an array, but // that would likely have a lot of warp divergence so we do something // simpler instead: keep the elements in an unsorted array, but keep // track of the max value and the index of the max value. Then (1) is // still O(1) time, while (2) is O(K) with a clean loop. Since K <= 8 // this should be fast enough for our purposes. Pixel q[kMaxPointsPerPixel]; int q_size = 0; float q_max_z = -1000; int q_max_idx = -1; // Using the batch index of the thread get the start and stop // indices for the faces. const int64_t face_start_idx = mesh_to_face_first_idx[n]; const int64_t face_stop_idx = face_start_idx + num_faces_per_mesh[n]; // Loop through the faces in the mesh. for (int f = face_start_idx; f < face_stop_idx; ++f) { // Check if the pixel pxy is inside the face bounding box and if it is, // update q, q_size, q_max_z and q_max_idx in place. CheckPixelInsideFace( face_verts, f, q_size, q_max_z, q_max_idx, q, blur_radius, pxy, K, perspective_correct); } // TODO: make sorting an option as only top k is needed, not sorted values. BubbleSort(q, q_size); int idx = n * H * W * K + pix_idx * K; for (int k = 0; k < q_size; ++k) { face_idxs[idx + k] = q[k].idx; zbuf[idx + k] = q[k].z; pix_dists[idx + k] = q[k].dist; bary[(idx + k) * 3 + 0] = q[k].bary.x; bary[(idx + k) * 3 + 1] = q[k].bary.y; bary[(idx + k) * 3 + 2] = q[k].bary.z; } } } std::tuple<torch::Tensor, torch::Tensor, torch::Tensor, torch::Tensor> RasterizeMeshesNaiveCuda( const torch::Tensor& face_verts, const torch::Tensor& mesh_to_faces_packed_first_idx, const torch::Tensor& num_faces_per_mesh, const int image_size, const float blur_radius, const int num_closest, const bool perspective_correct) { if (face_verts.ndimension() != 3 || face_verts.size(1) != 3 || face_verts.size(2) != 3) { AT_ERROR("face_verts must have dimensions (num_faces, 3, 3)"); } if (num_faces_per_mesh.size(0) != mesh_to_faces_packed_first_idx.size(0)) { AT_ERROR( "num_faces_per_mesh must have save size first dimension as mesh_to_faces_packed_first_idx"); } if (num_closest > kMaxPointsPerPixel) { std::stringstream ss; ss << "Must have points_per_pixel <= " << kMaxPointsPerPixel; AT_ERROR(ss.str()); } const int N = num_faces_per_mesh.size(0); // batch size. const int H = image_size; // Assume square images. const int W = image_size; const int K = num_closest; auto long_opts = face_verts.options().dtype(torch::kInt64); auto float_opts = face_verts.options().dtype(torch::kFloat32); torch::Tensor face_idxs = torch::full({N, H, W, K}, -1, long_opts); torch::Tensor zbuf = torch::full({N, H, W, K}, -1, float_opts); torch::Tensor pix_dists = torch::full({N, H, W, K}, -1, float_opts); torch::Tensor bary = torch::full({N, H, W, K, 3}, -1, float_opts); const size_t blocks = 1024; const size_t threads = 64; hipLaunchKernelGGL(( RasterizeMeshesNaiveCudaKernel), dim3(blocks), dim3(threads), 0, 0, face_verts.contiguous().data<float>(), mesh_to_faces_packed_first_idx.contiguous().data<int64_t>(), num_faces_per_mesh.contiguous().data<int64_t>(), blur_radius, perspective_correct, N, H, W, K, face_idxs.contiguous().data<int64_t>(), zbuf.contiguous().data<float>(), pix_dists.contiguous().data<float>(), bary.contiguous().data<float>()); return std::make_tuple(face_idxs, zbuf, bary, pix_dists); } // **************************************************************************** // * BACKWARD PASS * // **************************************************************************** // TODO: benchmark parallelizing over faces_verts instead of over pixels. __global__ void RasterizeMeshesBackwardCudaKernel( const float* face_verts, // (F, 3, 3) const int64_t* pix_to_face, // (N, H, W, K) const bool perspective_correct, const int N, const int H, const int W, const int K, const float* grad_zbuf, // (N, H, W, K) const float* grad_bary, // (N, H, W, K, 3) const float* grad_dists, // (N, H, W, K) float* grad_face_verts) { // (F, 3, 3) // Parallelize over each pixel in images of // size H * W, for each image in the batch of size N. const int num_threads = gridDim.x * blockDim.x; const int tid = blockIdx.x * blockDim.x + threadIdx.x; for (int t_i = tid; t_i < N * H * W; t_i += num_threads) { // Convert linear index to 3D index const int n = t_i / (H * W); // batch index. const int pix_idx = t_i % (H * W); // Reverse ordering of X and Y axes. const int yi = H - 1 - pix_idx / W; const int xi = W - 1 - pix_idx % W; const float xf = PixToNdc(xi, W); const float yf = PixToNdc(yi, H); const float2 pxy = make_float2(xf, yf); // Loop over all the faces for this pixel. for (int k = 0; k < K; k++) { // Index into (N, H, W, K, :) grad tensors // pixel index + top k index int i = n * H * W * K + pix_idx * K + k; const int f = pix_to_face[i]; if (f < 0) { continue; // padded face. } // Get xyz coordinates of the three face vertices. const auto v012 = GetSingleFaceVerts(face_verts, f); const float3 v0 = thrust::get<0>(v012); const float3 v1 = thrust::get<1>(v012); const float3 v2 = thrust::get<2>(v012); // Only neex xy for barycentric coordinate and distance calculations. const float2 v0xy = make_float2(v0.x, v0.y); const float2 v1xy = make_float2(v1.x, v1.y); const float2 v2xy = make_float2(v2.x, v2.y); // Get upstream gradients for the face. const float grad_dist_upstream = grad_dists[i]; const float grad_zbuf_upstream = grad_zbuf[i]; const float grad_bary_upstream_w0 = grad_bary[i * 3 + 0]; const float grad_bary_upstream_w1 = grad_bary[i * 3 + 1]; const float grad_bary_upstream_w2 = grad_bary[i * 3 + 2]; const float3 grad_bary_upstream = make_float3( grad_bary_upstream_w0, grad_bary_upstream_w1, grad_bary_upstream_w2); const float3 bary0 = BarycentricCoordsForward(pxy, v0xy, v1xy, v2xy); const float3 bary = !perspective_correct ? bary0 : BarycentricPerspectiveCorrectionForward(bary0, v0.z, v1.z, v2.z); const bool inside = bary.x > 0.0f && bary.y > 0.0f && bary.z > 0.0f; const float sign = inside ? -1.0f : 1.0f; // TODO(T52813608) Add support for non-square images. auto grad_dist_f = PointTriangleDistanceBackward( pxy, v0xy, v1xy, v2xy, sign * grad_dist_upstream); const float2 ddist_d_v0 = thrust::get<1>(grad_dist_f); const float2 ddist_d_v1 = thrust::get<2>(grad_dist_f); const float2 ddist_d_v2 = thrust::get<3>(grad_dist_f); // Upstream gradient for barycentric coords from zbuf calculation: // zbuf = bary_w0 * z0 + bary_w1 * z1 + bary_w2 * z2 // Therefore // d_zbuf/d_bary_w0 = z0 // d_zbuf/d_bary_w1 = z1 // d_zbuf/d_bary_w2 = z2 const float3 d_zbuf_d_bary = make_float3(v0.z, v1.z, v2.z); // Total upstream barycentric gradients are the sum of // external upstream gradients and contribution from zbuf. const float3 grad_bary_f_sum = (grad_bary_upstream + grad_zbuf_upstream * d_zbuf_d_bary); float3 grad_bary0 = grad_bary_f_sum; float dz0_persp = 0.0f, dz1_persp = 0.0f, dz2_persp = 0.0f; if (perspective_correct) { auto perspective_grads = BarycentricPerspectiveCorrectionBackward( bary0, v0.z, v1.z, v2.z, grad_bary_f_sum); grad_bary0 = thrust::get<0>(perspective_grads); dz0_persp = thrust::get<1>(perspective_grads); dz1_persp = thrust::get<2>(perspective_grads); dz2_persp = thrust::get<3>(perspective_grads); } auto grad_bary_f = BarycentricCoordsBackward(pxy, v0xy, v1xy, v2xy, grad_bary0); const float2 dbary_d_v0 = thrust::get<1>(grad_bary_f); const float2 dbary_d_v1 = thrust::get<2>(grad_bary_f); const float2 dbary_d_v2 = thrust::get<3>(grad_bary_f); atomicAdd(grad_face_verts + f * 9 + 0, dbary_d_v0.x + ddist_d_v0.x); atomicAdd(grad_face_verts + f * 9 + 1, dbary_d_v0.y + ddist_d_v0.y); atomicAdd( grad_face_verts + f * 9 + 2, grad_zbuf_upstream * bary.x + dz0_persp); atomicAdd(grad_face_verts + f * 9 + 3, dbary_d_v1.x + ddist_d_v1.x); atomicAdd(grad_face_verts + f * 9 + 4, dbary_d_v1.y + ddist_d_v1.y); atomicAdd( grad_face_verts + f * 9 + 5, grad_zbuf_upstream * bary.y + dz1_persp); atomicAdd(grad_face_verts + f * 9 + 6, dbary_d_v2.x + ddist_d_v2.x); atomicAdd(grad_face_verts + f * 9 + 7, dbary_d_v2.y + ddist_d_v2.y); atomicAdd( grad_face_verts + f * 9 + 8, grad_zbuf_upstream * bary.z + dz2_persp); } } } torch::Tensor RasterizeMeshesBackwardCuda( const torch::Tensor& face_verts, // (F, 3, 3) const torch::Tensor& pix_to_face, // (N, H, W, K) const torch::Tensor& grad_zbuf, // (N, H, W, K) const torch::Tensor& grad_bary, // (N, H, W, K, 3) const torch::Tensor& grad_dists, // (N, H, W, K) const bool perspective_correct) { const int F = face_verts.size(0); const int N = pix_to_face.size(0); const int H = pix_to_face.size(1); const int W = pix_to_face.size(2); const int K = pix_to_face.size(3); torch::Tensor grad_face_verts = torch::zeros({F, 3, 3}, face_verts.options()); const size_t blocks = 1024; const size_t threads = 64; hipLaunchKernelGGL(( RasterizeMeshesBackwardCudaKernel), dim3(blocks), dim3(threads), 0, 0, face_verts.contiguous().data<float>(), pix_to_face.contiguous().data<int64_t>(), perspective_correct, N, H, W, K, grad_zbuf.contiguous().data<float>(), grad_bary.contiguous().data<float>(), grad_dists.contiguous().data<float>(), grad_face_verts.contiguous().data<float>()); return grad_face_verts; } // **************************************************************************** // * COARSE RASTERIZATION * // **************************************************************************** __global__ void RasterizeMeshesCoarseCudaKernel( const float* face_verts, const int64_t* mesh_to_face_first_idx, const int64_t* num_faces_per_mesh, const float blur_radius, const int N, const int F, const int H, const int W, const int bin_size, const int chunk_size, const int max_faces_per_bin, int* faces_per_bin, int* bin_faces) { extern __shared__ char sbuf[]; const int M = max_faces_per_bin; const int num_bins = 1 + (W - 1) / bin_size; // Integer divide round up const float half_pix = 1.0f / W; // Size of half a pixel in NDC units // This is a boolean array of shape (num_bins, num_bins, chunk_size) // stored in shared memory that will track whether each point in the chunk // falls into each bin of the image. BitMask binmask((unsigned int*)sbuf, num_bins, num_bins, chunk_size); // Have each block handle a chunk of faces const int chunks_per_batch = 1 + (F - 1) / chunk_size; const int num_chunks = N * chunks_per_batch; for (int chunk = blockIdx.x; chunk < num_chunks; chunk += gridDim.x) { const int batch_idx = chunk / chunks_per_batch; // batch index const int chunk_idx = chunk % chunks_per_batch; const int face_start_idx = chunk_idx * chunk_size; binmask.block_clear(); const int64_t mesh_face_start_idx = mesh_to_face_first_idx[batch_idx]; const int64_t mesh_face_stop_idx = mesh_face_start_idx + num_faces_per_mesh[batch_idx]; // Have each thread handle a different face within the chunk for (int f = threadIdx.x; f < chunk_size; f += blockDim.x) { const int f_idx = face_start_idx + f; // Check if face index corresponds to the mesh in the batch given by // batch_idx if (f_idx >= mesh_face_stop_idx || f_idx < mesh_face_start_idx) { continue; } // Get xyz coordinates of the three face vertices. const auto v012 = GetSingleFaceVerts(face_verts, f_idx); const float3 v0 = thrust::get<0>(v012); const float3 v1 = thrust::get<1>(v012); const float3 v2 = thrust::get<2>(v012); // Compute screen-space bbox for the triangle expanded by blur. float xmin = FloatMin3(v0.x, v1.x, v2.x) - sqrt(blur_radius); float ymin = FloatMin3(v0.y, v1.y, v2.y) - sqrt(blur_radius); float xmax = FloatMax3(v0.x, v1.x, v2.x) + sqrt(blur_radius); float ymax = FloatMax3(v0.y, v1.y, v2.y) + sqrt(blur_radius); float zmax = FloatMax3(v0.z, v1.z, v2.z); if (zmax < 0) { continue; // Face is behind the camera. } // Brute-force search over all bins; TODO(T54294966) something smarter. for (int by = 0; by < num_bins; ++by) { // Y coordinate of the top and bottom of the bin. // PixToNdc gives the location of the center of each pixel, so we // need to add/subtract a half pixel to get the true extent of the bin. // Reverse ordering of Y axis so that +Y is upwards in the image. const int yidx = num_bins - by; const float bin_y_max = PixToNdc(yidx * bin_size - 1, H) + half_pix; const float bin_y_min = PixToNdc((yidx - 1) * bin_size, H) - half_pix; const bool y_overlap = (ymin <= bin_y_max) && (bin_y_min < ymax); for (int bx = 0; bx < num_bins; ++bx) { // X coordinate of the left and right of the bin. // Reverse ordering of x axis so that +X is left. const int xidx = num_bins - bx; const float bin_x_max = PixToNdc(xidx * bin_size - 1, W) + half_pix; const float bin_x_min = PixToNdc((xidx - 1) * bin_size, W) - half_pix; const bool x_overlap = (xmin <= bin_x_max) && (bin_x_min < xmax); if (y_overlap && x_overlap) { binmask.set(by, bx, f); } } } } __syncthreads(); // Now we have processed every face in the current chunk. We need to // count the number of faces in each bin so we can write the indices // out to global memory. We have each thread handle a different bin. for (int byx = threadIdx.x; byx < num_bins * num_bins; byx += blockDim.x) { const int by = byx / num_bins; const int bx = byx % num_bins; const int count = binmask.count(by, bx); const int faces_per_bin_idx = batch_idx * num_bins * num_bins + by * num_bins + bx; // This atomically increments the (global) number of faces found // in the current bin, and gets the previous value of the counter; // this effectively allocates space in the bin_faces array for the // faces in the current chunk that fall into this bin. const int start = atomicAdd(faces_per_bin + faces_per_bin_idx, count); // Now loop over the binmask and write the active bits for this bin // out to bin_faces. int next_idx = batch_idx * num_bins * num_bins * M + by * num_bins * M + bx * M + start; for (int f = 0; f < chunk_size; ++f) { if (binmask.get(by, bx, f)) { // TODO(T54296346) find the correct method for handling errors in // CUDA. Throw an error if num_faces_per_bin > max_faces_per_bin. // Either decrease bin size or increase max_faces_per_bin bin_faces[next_idx] = face_start_idx + f; next_idx++; } } } __syncthreads(); } } torch::Tensor RasterizeMeshesCoarseCuda( const torch::Tensor& face_verts, const torch::Tensor& mesh_to_face_first_idx, const torch::Tensor& num_faces_per_mesh, const int image_size, const float blur_radius, const int bin_size, const int max_faces_per_bin) { if (face_verts.ndimension() != 3 || face_verts.size(1) != 3 || face_verts.size(2) != 3) { AT_ERROR("face_verts must have dimensions (num_faces, 3, 3)"); } const int W = image_size; const int H = image_size; const int F = face_verts.size(0); const int N = num_faces_per_mesh.size(0); const int num_bins = 1 + (image_size - 1) / bin_size; // Divide round up. const int M = max_faces_per_bin; if (num_bins >= 22) { std::stringstream ss; ss << "Got " << num_bins << "; that's too many!"; AT_ERROR(ss.str()); } auto opts = face_verts.options().dtype(torch::kInt32); torch::Tensor faces_per_bin = torch::zeros({N, num_bins, num_bins}, opts); torch::Tensor bin_faces = torch::full({N, num_bins, num_bins, M}, -1, opts); const int chunk_size = 512; const size_t shared_size = num_bins * num_bins * chunk_size / 8; const size_t blocks = 64; const size_t threads = 512; hipLaunchKernelGGL(( RasterizeMeshesCoarseCudaKernel), dim3(blocks), dim3(threads), shared_size, 0, face_verts.contiguous().data<float>(), mesh_to_face_first_idx.contiguous().data<int64_t>(), num_faces_per_mesh.contiguous().data<int64_t>(), blur_radius, N, F, H, W, bin_size, chunk_size, M, faces_per_bin.contiguous().data<int32_t>(), bin_faces.contiguous().data<int32_t>()); return bin_faces; } // **************************************************************************** // * FINE RASTERIZATION * // **************************************************************************** __global__ void RasterizeMeshesFineCudaKernel( const float* face_verts, // (F, 3, 3) const int32_t* bin_faces, // (N, B, B, T) const float blur_radius, const int bin_size, const bool perspective_correct, const int N, const int B, const int M, const int H, const int W, const int K, int64_t* face_idxs, // (N, S, S, K) float* zbuf, // (N, S, S, K) float* pix_dists, // (N, S, S, K) float* bary // (N, S, S, K, 3) ) { // This can be more than S^2 if S % bin_size != 0 int num_pixels = N * B * B * bin_size * bin_size; int num_threads = gridDim.x * blockDim.x; int tid = blockIdx.x * blockDim.x + threadIdx.x; for (int pid = tid; pid < num_pixels; pid += num_threads) { // Convert linear index into bin and pixel indices. We make the within // block pixel ids move the fastest, so that adjacent threads will fall // into the same bin; this should give them coalesced memory reads when // they read from faces and bin_faces. int i = pid; const int n = i / (B * B * bin_size * bin_size); i %= B * B * bin_size * bin_size; const int by = i / (B * bin_size * bin_size); i %= B * bin_size * bin_size; const int bx = i / (bin_size * bin_size); i %= bin_size * bin_size; const int yi = i / bin_size + by * bin_size; const int xi = i % bin_size + bx * bin_size; if (yi >= H || xi >= W) continue; // Reverse ordering of the X and Y axis so that // in the image +Y is pointing up and +X is pointing left. const int yidx = H - 1 - yi; const int xidx = W - 1 - xi; const float xf = PixToNdc(xidx, W); const float yf = PixToNdc(yidx, H); const float2 pxy = make_float2(xf, yf); // This part looks like the naive rasterization kernel, except we use // bin_faces to only look at a subset of faces already known to fall // in this bin. TODO abstract out this logic into some data structure // that is shared by both kernels? Pixel q[kMaxPointsPerPixel]; int q_size = 0; float q_max_z = -1000; int q_max_idx = -1; for (int m = 0; m < M; m++) { const int f = bin_faces[n * B * B * M + by * B * M + bx * M + m]; if (f < 0) { continue; // bin_faces uses -1 as a sentinal value. } // Check if the pixel pxy is inside the face bounding box and if it is, // update q, q_size, q_max_z and q_max_idx in place. CheckPixelInsideFace( face_verts, f, q_size, q_max_z, q_max_idx, q, blur_radius, pxy, K, perspective_correct); } // Now we've looked at all the faces for this bin, so we can write // output for the current pixel. // TODO: make sorting an option as only top k is needed, not sorted values. BubbleSort(q, q_size); const int pix_idx = n * H * W * K + yi * H * K + xi * K; for (int k = 0; k < q_size; k++) { face_idxs[pix_idx + k] = q[k].idx; zbuf[pix_idx + k] = q[k].z; pix_dists[pix_idx + k] = q[k].dist; bary[(pix_idx + k) * 3 + 0] = q[k].bary.x; bary[(pix_idx + k) * 3 + 1] = q[k].bary.y; bary[(pix_idx + k) * 3 + 2] = q[k].bary.z; } } } std::tuple<torch::Tensor, torch::Tensor, torch::Tensor, torch::Tensor> RasterizeMeshesFineCuda( const torch::Tensor& face_verts, const torch::Tensor& bin_faces, const int image_size, const float blur_radius, const int bin_size, const int faces_per_pixel, const bool perspective_correct) { if (face_verts.ndimension() != 3 || face_verts.size(1) != 3 || face_verts.size(2) != 3) { AT_ERROR("face_verts must have dimensions (num_faces, 3, 3)"); } if (bin_faces.ndimension() != 4) { AT_ERROR("bin_faces must have 4 dimensions"); } const int N = bin_faces.size(0); const int B = bin_faces.size(1); const int M = bin_faces.size(3); const int K = faces_per_pixel; const int H = image_size; // Assume square images only. const int W = image_size; if (K > kMaxPointsPerPixel) { AT_ERROR("Must have num_closest <= 8"); } auto long_opts = face_verts.options().dtype(torch::kInt64); auto float_opts = face_verts.options().dtype(torch::kFloat32); torch::Tensor face_idxs = torch::full({N, H, W, K}, -1, long_opts); torch::Tensor zbuf = torch::full({N, H, W, K}, -1, float_opts); torch::Tensor pix_dists = torch::full({N, H, W, K}, -1, float_opts); torch::Tensor bary = torch::full({N, H, W, K, 3}, -1, float_opts); const size_t blocks = 1024; const size_t threads = 64; hipLaunchKernelGGL(( RasterizeMeshesFineCudaKernel), dim3(blocks), dim3(threads), 0, 0, face_verts.contiguous().data<float>(), bin_faces.contiguous().data<int32_t>(), blur_radius, bin_size, perspective_correct, N, B, M, H, W, K, face_idxs.contiguous().data<int64_t>(), zbuf.contiguous().data<float>(), pix_dists.contiguous().data<float>(), bary.contiguous().data<float>()); return std::make_tuple(face_idxs, zbuf, bary, pix_dists); }
76743c1d7c8f0c93bcbde36c205bb21ccb43d4e9.cu
// Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. #include <float.h> #include <math.h> #include <thrust/tuple.h> #include <torch/extension.h> #include <cstdio> #include <tuple> #include "float_math.cuh" #include "geometry_utils.cuh" #include "rasterize_points/bitmask.cuh" #include "rasterize_points/rasterization_utils.cuh" namespace { // A structure for holding details about a pixel. struct Pixel { float z; int64_t idx; float dist; float3 bary; }; __device__ bool operator<(const Pixel& a, const Pixel& b) { return a.z < b.z; } __device__ float FloatMin3(const float p1, const float p2, const float p3) { return fminf(p1, fminf(p2, p3)); } __device__ float FloatMax3(const float p1, const float p2, const float p3) { return fmaxf(p1, fmaxf(p2, p3)); } // Get the xyz coordinates of the three vertices for the face given by the // index face_idx into face_verts. __device__ thrust::tuple<float3, float3, float3> GetSingleFaceVerts( const float* face_verts, int face_idx) { const float x0 = face_verts[face_idx * 9 + 0]; const float y0 = face_verts[face_idx * 9 + 1]; const float z0 = face_verts[face_idx * 9 + 2]; const float x1 = face_verts[face_idx * 9 + 3]; const float y1 = face_verts[face_idx * 9 + 4]; const float z1 = face_verts[face_idx * 9 + 5]; const float x2 = face_verts[face_idx * 9 + 6]; const float y2 = face_verts[face_idx * 9 + 7]; const float z2 = face_verts[face_idx * 9 + 8]; const float3 v0xyz = make_float3(x0, y0, z0); const float3 v1xyz = make_float3(x1, y1, z1); const float3 v2xyz = make_float3(x2, y2, z2); return thrust::make_tuple(v0xyz, v1xyz, v2xyz); } // Get the min/max x/y/z values for the face given by vertices v0, v1, v2. __device__ thrust::tuple<float2, float2, float2> GetFaceBoundingBox(float3 v0, float3 v1, float3 v2) { const float xmin = FloatMin3(v0.x, v1.x, v2.x); const float ymin = FloatMin3(v0.y, v1.y, v2.y); const float zmin = FloatMin3(v0.z, v1.z, v2.z); const float xmax = FloatMax3(v0.x, v1.x, v2.x); const float ymax = FloatMax3(v0.y, v1.y, v2.y); const float zmax = FloatMax3(v0.z, v1.z, v2.z); return thrust::make_tuple( make_float2(xmin, xmax), make_float2(ymin, ymax), make_float2(zmin, zmax)); } // Check if the point (px, py) lies outside the face bounding box face_bbox. // Return true if the point is outside. __device__ bool CheckPointOutsideBoundingBox( float3 v0, float3 v1, float3 v2, float blur_radius, float2 pxy) { const auto bbox = GetFaceBoundingBox(v0, v1, v2); const float2 xlims = thrust::get<0>(bbox); const float2 ylims = thrust::get<1>(bbox); const float2 zlims = thrust::get<2>(bbox); const float x_min = xlims.x - blur_radius; const float y_min = ylims.x - blur_radius; const float x_max = xlims.y + blur_radius; const float y_max = ylims.y + blur_radius; // Check if the current point is oustside the triangle bounding box. return (pxy.x > x_max || pxy.x < x_min || pxy.y > y_max || pxy.y < y_min); } // This function checks if a pixel given by xy location pxy lies within the // face with index face_idx in face_verts. One of the inputs is a list (q) // which contains Pixel structs with the indices of the faces which intersect // with this pixel sorted by closest z distance. If the point pxy lies in the // face, the list (q) is updated and re-orderered in place. In addition // the auxillary variables q_size, q_max_z and q_max_idx are also modified. // This code is shared between RasterizeMeshesNaiveCudaKernel and // RasterizeMeshesFineCudaKernel. template <typename FaceQ> __device__ void CheckPixelInsideFace( const float* face_verts, // (F, 3, 3) const int face_idx, int& q_size, float& q_max_z, int& q_max_idx, FaceQ& q, const float blur_radius, const float2 pxy, // Coordinates of the pixel const int K, const bool perspective_correct) { const auto v012 = GetSingleFaceVerts(face_verts, face_idx); const float3 v0 = thrust::get<0>(v012); const float3 v1 = thrust::get<1>(v012); const float3 v2 = thrust::get<2>(v012); // Only need xy for barycentric coordinates and distance calculations. const float2 v0xy = make_float2(v0.x, v0.y); const float2 v1xy = make_float2(v1.x, v1.y); const float2 v2xy = make_float2(v2.x, v2.y); // Perform checks and skip if: // 1. the face is behind the camera // 2. the face has very small face area // 3. the pixel is outside the face bbox const float zmax = FloatMax3(v0.z, v1.z, v2.z); const bool outside_bbox = CheckPointOutsideBoundingBox( v0, v1, v2, sqrt(blur_radius), pxy); // use sqrt of blur for bbox const float face_area = EdgeFunctionForward(v0xy, v1xy, v2xy); const bool zero_face_area = (face_area <= kEpsilon && face_area >= -1.0f * kEpsilon); if (zmax < 0 || outside_bbox || zero_face_area) { return; } // Calculate barycentric coords and euclidean dist to triangle. const float3 p_bary0 = BarycentricCoordsForward(pxy, v0xy, v1xy, v2xy); const float3 p_bary = !perspective_correct ? p_bary0 : BarycentricPerspectiveCorrectionForward(p_bary0, v0.z, v1.z, v2.z); const float pz = p_bary.x * v0.z + p_bary.y * v1.z + p_bary.z * v2.z; if (pz < 0) { return; // Face is behind the image plane. } // Get abs squared distance const float dist = PointTriangleDistanceForward(pxy, v0xy, v1xy, v2xy); // Use the bary coordinates to determine if the point is inside the face. const bool inside = p_bary.x > 0.0f && p_bary.y > 0.0f && p_bary.z > 0.0f; const float signed_dist = inside ? -dist : dist; // Check if pixel is outside blur region if (!inside && dist >= blur_radius) { return; } if (q_size < K) { // Just insert it. q[q_size] = {pz, face_idx, signed_dist, p_bary}; if (pz > q_max_z) { q_max_z = pz; q_max_idx = q_size; } q_size++; } else if (pz < q_max_z) { // Overwrite the old max, and find the new max. q[q_max_idx] = {pz, face_idx, signed_dist, p_bary}; q_max_z = pz; for (int i = 0; i < K; i++) { if (q[i].z > q_max_z) { q_max_z = q[i].z; q_max_idx = i; } } } } } // namespace // **************************************************************************** // * NAIVE RASTERIZATION * // **************************************************************************** __global__ void RasterizeMeshesNaiveCudaKernel( const float* face_verts, const int64_t* mesh_to_face_first_idx, const int64_t* num_faces_per_mesh, const float blur_radius, const bool perspective_correct, const int N, const int H, const int W, const int K, int64_t* face_idxs, float* zbuf, float* pix_dists, float* bary) { // Simple version: One thread per output pixel int num_threads = gridDim.x * blockDim.x; int tid = blockDim.x * blockIdx.x + threadIdx.x; for (int i = tid; i < N * H * W; i += num_threads) { // Convert linear index to 3D index const int n = i / (H * W); // batch index. const int pix_idx = i % (H * W); // Reverse ordering of X and Y axes const int yi = H - 1 - pix_idx / W; const int xi = W - 1 - pix_idx % W; // screen coordinates to ndc coordiantes of pixel. const float xf = PixToNdc(xi, W); const float yf = PixToNdc(yi, H); const float2 pxy = make_float2(xf, yf); // For keeping track of the K closest points we want a data structure // that (1) gives O(1) access to the closest point for easy comparisons, // and (2) allows insertion of new elements. In the CPU version we use // std::priority_queue; then (2) is O(log K). We can't use STL // containers in CUDA; we could roll our own max heap in an array, but // that would likely have a lot of warp divergence so we do something // simpler instead: keep the elements in an unsorted array, but keep // track of the max value and the index of the max value. Then (1) is // still O(1) time, while (2) is O(K) with a clean loop. Since K <= 8 // this should be fast enough for our purposes. Pixel q[kMaxPointsPerPixel]; int q_size = 0; float q_max_z = -1000; int q_max_idx = -1; // Using the batch index of the thread get the start and stop // indices for the faces. const int64_t face_start_idx = mesh_to_face_first_idx[n]; const int64_t face_stop_idx = face_start_idx + num_faces_per_mesh[n]; // Loop through the faces in the mesh. for (int f = face_start_idx; f < face_stop_idx; ++f) { // Check if the pixel pxy is inside the face bounding box and if it is, // update q, q_size, q_max_z and q_max_idx in place. CheckPixelInsideFace( face_verts, f, q_size, q_max_z, q_max_idx, q, blur_radius, pxy, K, perspective_correct); } // TODO: make sorting an option as only top k is needed, not sorted values. BubbleSort(q, q_size); int idx = n * H * W * K + pix_idx * K; for (int k = 0; k < q_size; ++k) { face_idxs[idx + k] = q[k].idx; zbuf[idx + k] = q[k].z; pix_dists[idx + k] = q[k].dist; bary[(idx + k) * 3 + 0] = q[k].bary.x; bary[(idx + k) * 3 + 1] = q[k].bary.y; bary[(idx + k) * 3 + 2] = q[k].bary.z; } } } std::tuple<torch::Tensor, torch::Tensor, torch::Tensor, torch::Tensor> RasterizeMeshesNaiveCuda( const torch::Tensor& face_verts, const torch::Tensor& mesh_to_faces_packed_first_idx, const torch::Tensor& num_faces_per_mesh, const int image_size, const float blur_radius, const int num_closest, const bool perspective_correct) { if (face_verts.ndimension() != 3 || face_verts.size(1) != 3 || face_verts.size(2) != 3) { AT_ERROR("face_verts must have dimensions (num_faces, 3, 3)"); } if (num_faces_per_mesh.size(0) != mesh_to_faces_packed_first_idx.size(0)) { AT_ERROR( "num_faces_per_mesh must have save size first dimension as mesh_to_faces_packed_first_idx"); } if (num_closest > kMaxPointsPerPixel) { std::stringstream ss; ss << "Must have points_per_pixel <= " << kMaxPointsPerPixel; AT_ERROR(ss.str()); } const int N = num_faces_per_mesh.size(0); // batch size. const int H = image_size; // Assume square images. const int W = image_size; const int K = num_closest; auto long_opts = face_verts.options().dtype(torch::kInt64); auto float_opts = face_verts.options().dtype(torch::kFloat32); torch::Tensor face_idxs = torch::full({N, H, W, K}, -1, long_opts); torch::Tensor zbuf = torch::full({N, H, W, K}, -1, float_opts); torch::Tensor pix_dists = torch::full({N, H, W, K}, -1, float_opts); torch::Tensor bary = torch::full({N, H, W, K, 3}, -1, float_opts); const size_t blocks = 1024; const size_t threads = 64; RasterizeMeshesNaiveCudaKernel<<<blocks, threads>>>( face_verts.contiguous().data<float>(), mesh_to_faces_packed_first_idx.contiguous().data<int64_t>(), num_faces_per_mesh.contiguous().data<int64_t>(), blur_radius, perspective_correct, N, H, W, K, face_idxs.contiguous().data<int64_t>(), zbuf.contiguous().data<float>(), pix_dists.contiguous().data<float>(), bary.contiguous().data<float>()); return std::make_tuple(face_idxs, zbuf, bary, pix_dists); } // **************************************************************************** // * BACKWARD PASS * // **************************************************************************** // TODO: benchmark parallelizing over faces_verts instead of over pixels. __global__ void RasterizeMeshesBackwardCudaKernel( const float* face_verts, // (F, 3, 3) const int64_t* pix_to_face, // (N, H, W, K) const bool perspective_correct, const int N, const int H, const int W, const int K, const float* grad_zbuf, // (N, H, W, K) const float* grad_bary, // (N, H, W, K, 3) const float* grad_dists, // (N, H, W, K) float* grad_face_verts) { // (F, 3, 3) // Parallelize over each pixel in images of // size H * W, for each image in the batch of size N. const int num_threads = gridDim.x * blockDim.x; const int tid = blockIdx.x * blockDim.x + threadIdx.x; for (int t_i = tid; t_i < N * H * W; t_i += num_threads) { // Convert linear index to 3D index const int n = t_i / (H * W); // batch index. const int pix_idx = t_i % (H * W); // Reverse ordering of X and Y axes. const int yi = H - 1 - pix_idx / W; const int xi = W - 1 - pix_idx % W; const float xf = PixToNdc(xi, W); const float yf = PixToNdc(yi, H); const float2 pxy = make_float2(xf, yf); // Loop over all the faces for this pixel. for (int k = 0; k < K; k++) { // Index into (N, H, W, K, :) grad tensors // pixel index + top k index int i = n * H * W * K + pix_idx * K + k; const int f = pix_to_face[i]; if (f < 0) { continue; // padded face. } // Get xyz coordinates of the three face vertices. const auto v012 = GetSingleFaceVerts(face_verts, f); const float3 v0 = thrust::get<0>(v012); const float3 v1 = thrust::get<1>(v012); const float3 v2 = thrust::get<2>(v012); // Only neex xy for barycentric coordinate and distance calculations. const float2 v0xy = make_float2(v0.x, v0.y); const float2 v1xy = make_float2(v1.x, v1.y); const float2 v2xy = make_float2(v2.x, v2.y); // Get upstream gradients for the face. const float grad_dist_upstream = grad_dists[i]; const float grad_zbuf_upstream = grad_zbuf[i]; const float grad_bary_upstream_w0 = grad_bary[i * 3 + 0]; const float grad_bary_upstream_w1 = grad_bary[i * 3 + 1]; const float grad_bary_upstream_w2 = grad_bary[i * 3 + 2]; const float3 grad_bary_upstream = make_float3( grad_bary_upstream_w0, grad_bary_upstream_w1, grad_bary_upstream_w2); const float3 bary0 = BarycentricCoordsForward(pxy, v0xy, v1xy, v2xy); const float3 bary = !perspective_correct ? bary0 : BarycentricPerspectiveCorrectionForward(bary0, v0.z, v1.z, v2.z); const bool inside = bary.x > 0.0f && bary.y > 0.0f && bary.z > 0.0f; const float sign = inside ? -1.0f : 1.0f; // TODO(T52813608) Add support for non-square images. auto grad_dist_f = PointTriangleDistanceBackward( pxy, v0xy, v1xy, v2xy, sign * grad_dist_upstream); const float2 ddist_d_v0 = thrust::get<1>(grad_dist_f); const float2 ddist_d_v1 = thrust::get<2>(grad_dist_f); const float2 ddist_d_v2 = thrust::get<3>(grad_dist_f); // Upstream gradient for barycentric coords from zbuf calculation: // zbuf = bary_w0 * z0 + bary_w1 * z1 + bary_w2 * z2 // Therefore // d_zbuf/d_bary_w0 = z0 // d_zbuf/d_bary_w1 = z1 // d_zbuf/d_bary_w2 = z2 const float3 d_zbuf_d_bary = make_float3(v0.z, v1.z, v2.z); // Total upstream barycentric gradients are the sum of // external upstream gradients and contribution from zbuf. const float3 grad_bary_f_sum = (grad_bary_upstream + grad_zbuf_upstream * d_zbuf_d_bary); float3 grad_bary0 = grad_bary_f_sum; float dz0_persp = 0.0f, dz1_persp = 0.0f, dz2_persp = 0.0f; if (perspective_correct) { auto perspective_grads = BarycentricPerspectiveCorrectionBackward( bary0, v0.z, v1.z, v2.z, grad_bary_f_sum); grad_bary0 = thrust::get<0>(perspective_grads); dz0_persp = thrust::get<1>(perspective_grads); dz1_persp = thrust::get<2>(perspective_grads); dz2_persp = thrust::get<3>(perspective_grads); } auto grad_bary_f = BarycentricCoordsBackward(pxy, v0xy, v1xy, v2xy, grad_bary0); const float2 dbary_d_v0 = thrust::get<1>(grad_bary_f); const float2 dbary_d_v1 = thrust::get<2>(grad_bary_f); const float2 dbary_d_v2 = thrust::get<3>(grad_bary_f); atomicAdd(grad_face_verts + f * 9 + 0, dbary_d_v0.x + ddist_d_v0.x); atomicAdd(grad_face_verts + f * 9 + 1, dbary_d_v0.y + ddist_d_v0.y); atomicAdd( grad_face_verts + f * 9 + 2, grad_zbuf_upstream * bary.x + dz0_persp); atomicAdd(grad_face_verts + f * 9 + 3, dbary_d_v1.x + ddist_d_v1.x); atomicAdd(grad_face_verts + f * 9 + 4, dbary_d_v1.y + ddist_d_v1.y); atomicAdd( grad_face_verts + f * 9 + 5, grad_zbuf_upstream * bary.y + dz1_persp); atomicAdd(grad_face_verts + f * 9 + 6, dbary_d_v2.x + ddist_d_v2.x); atomicAdd(grad_face_verts + f * 9 + 7, dbary_d_v2.y + ddist_d_v2.y); atomicAdd( grad_face_verts + f * 9 + 8, grad_zbuf_upstream * bary.z + dz2_persp); } } } torch::Tensor RasterizeMeshesBackwardCuda( const torch::Tensor& face_verts, // (F, 3, 3) const torch::Tensor& pix_to_face, // (N, H, W, K) const torch::Tensor& grad_zbuf, // (N, H, W, K) const torch::Tensor& grad_bary, // (N, H, W, K, 3) const torch::Tensor& grad_dists, // (N, H, W, K) const bool perspective_correct) { const int F = face_verts.size(0); const int N = pix_to_face.size(0); const int H = pix_to_face.size(1); const int W = pix_to_face.size(2); const int K = pix_to_face.size(3); torch::Tensor grad_face_verts = torch::zeros({F, 3, 3}, face_verts.options()); const size_t blocks = 1024; const size_t threads = 64; RasterizeMeshesBackwardCudaKernel<<<blocks, threads>>>( face_verts.contiguous().data<float>(), pix_to_face.contiguous().data<int64_t>(), perspective_correct, N, H, W, K, grad_zbuf.contiguous().data<float>(), grad_bary.contiguous().data<float>(), grad_dists.contiguous().data<float>(), grad_face_verts.contiguous().data<float>()); return grad_face_verts; } // **************************************************************************** // * COARSE RASTERIZATION * // **************************************************************************** __global__ void RasterizeMeshesCoarseCudaKernel( const float* face_verts, const int64_t* mesh_to_face_first_idx, const int64_t* num_faces_per_mesh, const float blur_radius, const int N, const int F, const int H, const int W, const int bin_size, const int chunk_size, const int max_faces_per_bin, int* faces_per_bin, int* bin_faces) { extern __shared__ char sbuf[]; const int M = max_faces_per_bin; const int num_bins = 1 + (W - 1) / bin_size; // Integer divide round up const float half_pix = 1.0f / W; // Size of half a pixel in NDC units // This is a boolean array of shape (num_bins, num_bins, chunk_size) // stored in shared memory that will track whether each point in the chunk // falls into each bin of the image. BitMask binmask((unsigned int*)sbuf, num_bins, num_bins, chunk_size); // Have each block handle a chunk of faces const int chunks_per_batch = 1 + (F - 1) / chunk_size; const int num_chunks = N * chunks_per_batch; for (int chunk = blockIdx.x; chunk < num_chunks; chunk += gridDim.x) { const int batch_idx = chunk / chunks_per_batch; // batch index const int chunk_idx = chunk % chunks_per_batch; const int face_start_idx = chunk_idx * chunk_size; binmask.block_clear(); const int64_t mesh_face_start_idx = mesh_to_face_first_idx[batch_idx]; const int64_t mesh_face_stop_idx = mesh_face_start_idx + num_faces_per_mesh[batch_idx]; // Have each thread handle a different face within the chunk for (int f = threadIdx.x; f < chunk_size; f += blockDim.x) { const int f_idx = face_start_idx + f; // Check if face index corresponds to the mesh in the batch given by // batch_idx if (f_idx >= mesh_face_stop_idx || f_idx < mesh_face_start_idx) { continue; } // Get xyz coordinates of the three face vertices. const auto v012 = GetSingleFaceVerts(face_verts, f_idx); const float3 v0 = thrust::get<0>(v012); const float3 v1 = thrust::get<1>(v012); const float3 v2 = thrust::get<2>(v012); // Compute screen-space bbox for the triangle expanded by blur. float xmin = FloatMin3(v0.x, v1.x, v2.x) - sqrt(blur_radius); float ymin = FloatMin3(v0.y, v1.y, v2.y) - sqrt(blur_radius); float xmax = FloatMax3(v0.x, v1.x, v2.x) + sqrt(blur_radius); float ymax = FloatMax3(v0.y, v1.y, v2.y) + sqrt(blur_radius); float zmax = FloatMax3(v0.z, v1.z, v2.z); if (zmax < 0) { continue; // Face is behind the camera. } // Brute-force search over all bins; TODO(T54294966) something smarter. for (int by = 0; by < num_bins; ++by) { // Y coordinate of the top and bottom of the bin. // PixToNdc gives the location of the center of each pixel, so we // need to add/subtract a half pixel to get the true extent of the bin. // Reverse ordering of Y axis so that +Y is upwards in the image. const int yidx = num_bins - by; const float bin_y_max = PixToNdc(yidx * bin_size - 1, H) + half_pix; const float bin_y_min = PixToNdc((yidx - 1) * bin_size, H) - half_pix; const bool y_overlap = (ymin <= bin_y_max) && (bin_y_min < ymax); for (int bx = 0; bx < num_bins; ++bx) { // X coordinate of the left and right of the bin. // Reverse ordering of x axis so that +X is left. const int xidx = num_bins - bx; const float bin_x_max = PixToNdc(xidx * bin_size - 1, W) + half_pix; const float bin_x_min = PixToNdc((xidx - 1) * bin_size, W) - half_pix; const bool x_overlap = (xmin <= bin_x_max) && (bin_x_min < xmax); if (y_overlap && x_overlap) { binmask.set(by, bx, f); } } } } __syncthreads(); // Now we have processed every face in the current chunk. We need to // count the number of faces in each bin so we can write the indices // out to global memory. We have each thread handle a different bin. for (int byx = threadIdx.x; byx < num_bins * num_bins; byx += blockDim.x) { const int by = byx / num_bins; const int bx = byx % num_bins; const int count = binmask.count(by, bx); const int faces_per_bin_idx = batch_idx * num_bins * num_bins + by * num_bins + bx; // This atomically increments the (global) number of faces found // in the current bin, and gets the previous value of the counter; // this effectively allocates space in the bin_faces array for the // faces in the current chunk that fall into this bin. const int start = atomicAdd(faces_per_bin + faces_per_bin_idx, count); // Now loop over the binmask and write the active bits for this bin // out to bin_faces. int next_idx = batch_idx * num_bins * num_bins * M + by * num_bins * M + bx * M + start; for (int f = 0; f < chunk_size; ++f) { if (binmask.get(by, bx, f)) { // TODO(T54296346) find the correct method for handling errors in // CUDA. Throw an error if num_faces_per_bin > max_faces_per_bin. // Either decrease bin size or increase max_faces_per_bin bin_faces[next_idx] = face_start_idx + f; next_idx++; } } } __syncthreads(); } } torch::Tensor RasterizeMeshesCoarseCuda( const torch::Tensor& face_verts, const torch::Tensor& mesh_to_face_first_idx, const torch::Tensor& num_faces_per_mesh, const int image_size, const float blur_radius, const int bin_size, const int max_faces_per_bin) { if (face_verts.ndimension() != 3 || face_verts.size(1) != 3 || face_verts.size(2) != 3) { AT_ERROR("face_verts must have dimensions (num_faces, 3, 3)"); } const int W = image_size; const int H = image_size; const int F = face_verts.size(0); const int N = num_faces_per_mesh.size(0); const int num_bins = 1 + (image_size - 1) / bin_size; // Divide round up. const int M = max_faces_per_bin; if (num_bins >= 22) { std::stringstream ss; ss << "Got " << num_bins << "; that's too many!"; AT_ERROR(ss.str()); } auto opts = face_verts.options().dtype(torch::kInt32); torch::Tensor faces_per_bin = torch::zeros({N, num_bins, num_bins}, opts); torch::Tensor bin_faces = torch::full({N, num_bins, num_bins, M}, -1, opts); const int chunk_size = 512; const size_t shared_size = num_bins * num_bins * chunk_size / 8; const size_t blocks = 64; const size_t threads = 512; RasterizeMeshesCoarseCudaKernel<<<blocks, threads, shared_size>>>( face_verts.contiguous().data<float>(), mesh_to_face_first_idx.contiguous().data<int64_t>(), num_faces_per_mesh.contiguous().data<int64_t>(), blur_radius, N, F, H, W, bin_size, chunk_size, M, faces_per_bin.contiguous().data<int32_t>(), bin_faces.contiguous().data<int32_t>()); return bin_faces; } // **************************************************************************** // * FINE RASTERIZATION * // **************************************************************************** __global__ void RasterizeMeshesFineCudaKernel( const float* face_verts, // (F, 3, 3) const int32_t* bin_faces, // (N, B, B, T) const float blur_radius, const int bin_size, const bool perspective_correct, const int N, const int B, const int M, const int H, const int W, const int K, int64_t* face_idxs, // (N, S, S, K) float* zbuf, // (N, S, S, K) float* pix_dists, // (N, S, S, K) float* bary // (N, S, S, K, 3) ) { // This can be more than S^2 if S % bin_size != 0 int num_pixels = N * B * B * bin_size * bin_size; int num_threads = gridDim.x * blockDim.x; int tid = blockIdx.x * blockDim.x + threadIdx.x; for (int pid = tid; pid < num_pixels; pid += num_threads) { // Convert linear index into bin and pixel indices. We make the within // block pixel ids move the fastest, so that adjacent threads will fall // into the same bin; this should give them coalesced memory reads when // they read from faces and bin_faces. int i = pid; const int n = i / (B * B * bin_size * bin_size); i %= B * B * bin_size * bin_size; const int by = i / (B * bin_size * bin_size); i %= B * bin_size * bin_size; const int bx = i / (bin_size * bin_size); i %= bin_size * bin_size; const int yi = i / bin_size + by * bin_size; const int xi = i % bin_size + bx * bin_size; if (yi >= H || xi >= W) continue; // Reverse ordering of the X and Y axis so that // in the image +Y is pointing up and +X is pointing left. const int yidx = H - 1 - yi; const int xidx = W - 1 - xi; const float xf = PixToNdc(xidx, W); const float yf = PixToNdc(yidx, H); const float2 pxy = make_float2(xf, yf); // This part looks like the naive rasterization kernel, except we use // bin_faces to only look at a subset of faces already known to fall // in this bin. TODO abstract out this logic into some data structure // that is shared by both kernels? Pixel q[kMaxPointsPerPixel]; int q_size = 0; float q_max_z = -1000; int q_max_idx = -1; for (int m = 0; m < M; m++) { const int f = bin_faces[n * B * B * M + by * B * M + bx * M + m]; if (f < 0) { continue; // bin_faces uses -1 as a sentinal value. } // Check if the pixel pxy is inside the face bounding box and if it is, // update q, q_size, q_max_z and q_max_idx in place. CheckPixelInsideFace( face_verts, f, q_size, q_max_z, q_max_idx, q, blur_radius, pxy, K, perspective_correct); } // Now we've looked at all the faces for this bin, so we can write // output for the current pixel. // TODO: make sorting an option as only top k is needed, not sorted values. BubbleSort(q, q_size); const int pix_idx = n * H * W * K + yi * H * K + xi * K; for (int k = 0; k < q_size; k++) { face_idxs[pix_idx + k] = q[k].idx; zbuf[pix_idx + k] = q[k].z; pix_dists[pix_idx + k] = q[k].dist; bary[(pix_idx + k) * 3 + 0] = q[k].bary.x; bary[(pix_idx + k) * 3 + 1] = q[k].bary.y; bary[(pix_idx + k) * 3 + 2] = q[k].bary.z; } } } std::tuple<torch::Tensor, torch::Tensor, torch::Tensor, torch::Tensor> RasterizeMeshesFineCuda( const torch::Tensor& face_verts, const torch::Tensor& bin_faces, const int image_size, const float blur_radius, const int bin_size, const int faces_per_pixel, const bool perspective_correct) { if (face_verts.ndimension() != 3 || face_verts.size(1) != 3 || face_verts.size(2) != 3) { AT_ERROR("face_verts must have dimensions (num_faces, 3, 3)"); } if (bin_faces.ndimension() != 4) { AT_ERROR("bin_faces must have 4 dimensions"); } const int N = bin_faces.size(0); const int B = bin_faces.size(1); const int M = bin_faces.size(3); const int K = faces_per_pixel; const int H = image_size; // Assume square images only. const int W = image_size; if (K > kMaxPointsPerPixel) { AT_ERROR("Must have num_closest <= 8"); } auto long_opts = face_verts.options().dtype(torch::kInt64); auto float_opts = face_verts.options().dtype(torch::kFloat32); torch::Tensor face_idxs = torch::full({N, H, W, K}, -1, long_opts); torch::Tensor zbuf = torch::full({N, H, W, K}, -1, float_opts); torch::Tensor pix_dists = torch::full({N, H, W, K}, -1, float_opts); torch::Tensor bary = torch::full({N, H, W, K, 3}, -1, float_opts); const size_t blocks = 1024; const size_t threads = 64; RasterizeMeshesFineCudaKernel<<<blocks, threads>>>( face_verts.contiguous().data<float>(), bin_faces.contiguous().data<int32_t>(), blur_radius, bin_size, perspective_correct, N, B, M, H, W, K, face_idxs.contiguous().data<int64_t>(), zbuf.contiguous().data<float>(), pix_dists.contiguous().data<float>(), bary.contiguous().data<float>()); return std::make_tuple(face_idxs, zbuf, bary, pix_dists); }
eb8f8f8375cb664df56646af7bbfc99067492e35.hip
// !!! This is a file automatically generated by hipify!!! #include "cuTrasformation.cuh" #include <math.h> #include <stdio.h> #include "../common/cuMatrix.h" #include "../common/util.h" #include "hip/hip_runtime.h" #include <hiprand/hiprand_kernel.h> #include <time.h> #include "../common/Config.h" #include <helper_functions.h> #include <helper_cuda.h> #include "../common/cuBase.h" #define GAUSSIAN_FIELD_SIZE (21) /* strictly odd number */ #define constDistortion (1.0) hiprandGenerator_t rand_generator_device; const hiprandRngType_t generator_type = HIPRAND_RNG_PSEUDO_DEFAULT; cuMatrix<float>* cuGaussianKernel; cuMatrix<float>* cuDispH; cuMatrix<float>* cuDispV; float * cu_d_randonNumf; float* cu_d_randomNum; float* cu_h_randomNum; float dElasticSigma = 4.0; /* higher numbers are more smooth and less distorted; Simard uses 4.0*/ int getRandomNumLen(int batch, int ImgSize) { return batch * ImgSize * ImgSize * 2 * Config::instance()->getChannels(); } /* * blocks : dim3(1) * threads: dim3(GAUSSIAN_FIELD_SIZE*GAUSSIAN_FIELD_SIZE) */ __global__ void g_createGaussianKernel(float* gaussian, float dElasticSigma) { int iiMid = GAUSSIAN_FIELD_SIZE >> 1; float floatElasticSigma = dElasticSigma * dElasticSigma; int row = threadIdx.x % GAUSSIAN_FIELD_SIZE; int col = threadIdx.x / GAUSSIAN_FIELD_SIZE; float val1 = 1.0 / (dElasticSigma * 2.0 * 3.1415926535897932384626433832795); float val2 = (row-iiMid)*(row-iiMid) + (col-iiMid)*(col-iiMid); gaussian[threadIdx.x] = val1 * exp(-1.0 * val2 / (2.0 * floatElasticSigma)); } void cuInitDistortionMemery(int batch, int ImgSize) { hiprandStatus_t curandstatus; cuGaussianKernel = new cuMatrix<float>(GAUSSIAN_FIELD_SIZE, GAUSSIAN_FIELD_SIZE, 1); if(GAUSSIAN_FIELD_SIZE * GAUSSIAN_FIELD_SIZE > MAX_THREADS) { char logStr[1024]; sprintf(logStr, "g_createGaussianKernel > MAX_THREADS\n"); LOG(logStr, "Result/log.txt"); exit(0); } hipLaunchKernelGGL(( g_createGaussianKernel), dim3(dim3(1)),dim3(dim3(GAUSSIAN_FIELD_SIZE * GAUSSIAN_FIELD_SIZE)), 0, 0, cuGaussianKernel->getDev(), dElasticSigma); hipStreamSynchronize(0); /*cu_d_randomNum*/ checkCudaErrors( MemoryMonitor::instance()->gpuMalloc((void**)&cu_d_randomNum, sizeof(float) * getRandomNumLen(batch, ImgSize)) ); /*cu_d_randonNumf*/ checkCudaErrors( MemoryMonitor::instance()->gpuMalloc((void**)&cu_d_randonNumf, sizeof(float) * getRandomNumLen(batch, ImgSize)) ); /*cu_h_randomNum*/ cu_h_randomNum = (float*)MemoryMonitor::instance()->cpuMalloc(sizeof(float) * getRandomNumLen(batch, ImgSize)); if(!cu_h_randomNum) { char logStr[1024]; sprintf(logStr, "malloc cu_h_randomNum fail\n"); LOG(logStr, "Result/log.txt"); exit(0); } /*hiprandCreateGenerator*/ curandstatus = hiprandCreateGenerator(&rand_generator_device, generator_type); if(curandstatus != HIPRAND_STATUS_SUCCESS) { char logStr[1024]; sprintf(logStr, "hiprandCreateGenerator fail\n"); LOG(logStr, "Result/log.txt"); exit(0); } cuDispV = new cuMatrix<float>(batch, ImgSize * ImgSize, 1); cuDispH = new cuMatrix<float>(batch, ImgSize * ImgSize, 1); } __global__ void g_getRandomUniform(float* r1, float* r2, int len) { for(int i = 0; i < len; i += gridDim.x * blockDim.x) { int id = i + blockDim.x * blockIdx.x + threadIdx.x; if(id < len) { r2[id] = r1[id] * 2.0f - 1.0f; } } } /* * blocks : dim3(batch) * threads : dim3(512) */ __global__ void g_generateDistortionMap( float* _dispH, float* _dispV, float* rand, float* gaussianKernel, float dElasticScaling, float dMaxScaling, float dMaxRotation, int ImgSize) { int ImgSize2 = ImgSize * ImgSize; float* uniformH = rand + blockIdx.x * ImgSize2; float* uniformV = rand + blockIdx.x * ImgSize2 * 2; float* dispH = _dispH + ImgSize2 * blockIdx.x; float* dispV = _dispV + ImgSize2 * blockIdx.x; if(dElasticScaling >= 0.1){ for(int is = 0; is < ImgSize2; is += blockDim.x) { int idx = is + threadIdx.x; if(idx < ImgSize2) { int row = idx / ImgSize; int col = idx % ImgSize; int iiMid = GAUSSIAN_FIELD_SIZE / 2; float fConvolvedH = 0.0; float fConvolvedV = 0.0; float fSampleH, fSampleV; float elasticScale = dElasticScaling; for(int xxx = 0; xxx < GAUSSIAN_FIELD_SIZE; ++xxx) { for(int yyy = 0; yyy < GAUSSIAN_FIELD_SIZE; ++yyy) { int xxxDisp = col - iiMid + xxx; int yyyDisp = row - iiMid + yyy; if(xxxDisp < 0 || xxxDisp >= ImgSize || yyyDisp < 0 || yyyDisp >= ImgSize) { fSampleH = 0.0; fSampleV = 0.0; } else { fSampleH = uniformH[yyyDisp * ImgSize + xxxDisp]; fSampleV = uniformV[yyyDisp * ImgSize + xxxDisp]; } fConvolvedH += fSampleH * gaussianKernel[yyy * GAUSSIAN_FIELD_SIZE + xxx] * constDistortion; fConvolvedV += fSampleV * gaussianKernel[yyy * GAUSSIAN_FIELD_SIZE + xxx] * constDistortion; } } dispH[idx] = elasticScale * fConvolvedH; dispV[idx] = elasticScale * fConvolvedV; } } } else{ for(int is = 0; is < ImgSize2; is += blockDim.x){ int idx = is + threadIdx.x; if(idx < ImgSize2){ dispH[idx] = 0.0; dispV[idx] = 0.0; } } } __syncthreads(); float rand1 = rand[blockIdx.x]; float rand2 = rand[blockIdx.x + 1]; if(fabs(dMaxRotation) >= 0.01){ if(rand1 <= 0.0) rand1 = 0.0; if(rand2 <= 0.0) rand2 = 0.0; } for(int is = 0; is < ImgSize2; is += blockDim.x) { int idx = is + threadIdx.x; if(idx < ImgSize2) { int row = idx / ImgSize; int col = idx % ImgSize; float dSFHoriz = dMaxScaling / 100.0 * rand1; float dSFVert = dMaxScaling / 100.0 * rand2; int iMid = ImgSize / 2; dispH[idx] += dSFHoriz * (col - iMid); dispV[idx] += dSFVert * (row - iMid); } } __syncthreads(); for(int is = 0; is < ImgSize2; is += blockDim.x) { int idx = is + threadIdx.x; if(idx < ImgSize2) { int row = idx / ImgSize; int col = idx % ImgSize; float angle = dMaxRotation * rand[blockIdx.x]; //printf("%f\n",angle); angle = angle * 3.1415926535897932384626433832795 / 180.0; float cosAngle = cos(angle); float sinAngle = sin(angle); int iMid = ImgSize / 2; float xx = row - iMid; float yy = col - iMid; dispH[idx] += yy - yy * cosAngle - xx * sinAngle; dispV[idx] += xx - xx * cosAngle + yy * sinAngle; } } } __global__ void g_scaleAndRotate( float* _dispH, float* _dispV, float scalingx, float scalingy, float rotation, int ImgSize) { int ImgSize2 = ImgSize * ImgSize; float* dispH = _dispH + ImgSize2 * blockIdx.x; float* dispV = _dispV + ImgSize2 * blockIdx.x; for(int is = 0; is < ImgSize2; is += blockDim.x) { int idx = is + threadIdx.x; if(idx < ImgSize2) { dispH[idx] = 0.0; dispV[idx] = 0.0; } } __syncthreads(); for(int is = 0; is < ImgSize2; is += blockDim.x) { int idx = is + threadIdx.x; if(idx < ImgSize2) { int row = idx / ImgSize; int col = idx % ImgSize; float dSFHoriz = scalingx / 100.0; float dSFVert = scalingy / 100.0; int iMid = ImgSize / 2; dispH[idx] += dSFHoriz * (col - iMid); dispV[idx] += dSFVert * (row - iMid); } } __syncthreads(); for(int is = 0; is < ImgSize2; is += blockDim.x) { int idx = is + threadIdx.x; if(idx < ImgSize2) { int row = idx / ImgSize; int col = idx % ImgSize; float angle = rotation; angle = angle * 3.1415926535897932384626433832795 / 180.0; float cosAngle = cos(angle); float sinAngle = sin(angle); int iMid = ImgSize / 2; float xx = row - iMid; float yy = col - iMid; dispH[idx] += yy - yy * cosAngle - xx * sinAngle; dispV[idx] += xx - xx * cosAngle + yy * sinAngle; } } } /* * blocks : dim3(batch, Config::instance()->getChannels()) * threads: dim3(min(512, ImgSize * ImgSize)) */ __global__ void g_applyDistortionMap( float** _inputs, float** _outputs, float* _dispH, float* _dispV, int ImgSize) { extern __shared__ float img[]; int c = blockIdx.y; int ImgSize2 = ImgSize * ImgSize; float* input = _inputs[blockIdx.x] + ImgSize2 * c; float* output= _outputs[blockIdx.x]+ ImgSize2 * c; float* dispV = _dispV + blockIdx.x * ImgSize2; float* dispH = _dispH + blockIdx.x * ImgSize2; for(int is = 0; is < ImgSize2; is += blockDim.x){ int idx = is + threadIdx.x; if(idx < ImgSize2){ img[idx] = input[idx]; } } __syncthreads(); for(int is = 0; is < ImgSize2; is += blockDim.x) { int idx = is + threadIdx.x; if(idx < ImgSize2) { int row = idx / ImgSize; int col = idx % ImgSize; float sourceRow, sourceCol; float fracRow, fracCol; float w1, w2, w3, w4; float sourceValue; int sRow, sCol, sRowp1, sColp1; bool bSkipOutOfBounds; if(fabs(dispV[idx]) < 0.000000001 && fabs(dispH[idx]) < 0.0000000001) { output[idx] = input[idx]; continue; } sourceRow = (float)row - dispV[idx]; sourceCol = (float)col - dispH[idx]; fracRow = sourceRow - (int)sourceRow; fracCol = sourceCol - (int)sourceCol; w1 = ( 1.0 - fracRow ) * ( 1.0 - fracCol ); w2 = ( 1.0 - fracRow ) * fracCol; w3 = fracRow * ( 1.0 - fracCol ); w4 = fracRow * fracCol; bSkipOutOfBounds = false; if ( ((int)sourceRow + 1) >= ImgSize ) bSkipOutOfBounds = true; if ( (int)sourceRow < 0 ) bSkipOutOfBounds = true; if ( ((int)sourceCol + 1) >= ImgSize ) bSkipOutOfBounds = true; if ( (int)sourceCol < 0 ) bSkipOutOfBounds = true; if ( bSkipOutOfBounds == false ) { sRow = (int)sourceRow; sCol = (int)sourceCol; sRowp1 = sRow + 1; sColp1 = sCol + 1; while (sRowp1 >= ImgSize) sRowp1 -= ImgSize; while (sRowp1 < 0) sRowp1 += ImgSize; while (sColp1 >= ImgSize) sColp1 -= ImgSize; while (sColp1 < 0) sColp1 += ImgSize; while (sRow >= ImgSize) sRow -= ImgSize; while (sRow < 0) sRow += ImgSize; while (sCol >= ImgSize) sCol -= ImgSize; while (sCol < 0) sCol += ImgSize; sourceValue = w1 * img[sRow * ImgSize + sCol] + w2 * img[sRow * ImgSize + sColp1] + w3 * img[sRowp1 * ImgSize + sCol] + w4 * img[sRowp1 * ImgSize + sColp1]; } else { sourceValue = -1.0; } output[idx] = sourceValue; } } } void cuApplyRandom(int batch, unsigned long long s, int ImgSize) { hiprandStatus_t hiprandStatus_t; unsigned long long seed = s; hiprandStatus_t = hiprandSetPseudoRandomGeneratorSeed(rand_generator_device, seed); if(hiprandStatus_t != HIPRAND_STATUS_SUCCESS) { char logStr[1024]; sprintf(logStr, "hiprandSetPseudoRandomGeneratorSeed fail\n"); LOG(logStr, "Result/log.txt"); exit(0); } hiprandGenerateUniform(rand_generator_device, cu_d_randonNumf, getRandomNumLen(batch, ImgSize)); hipLaunchKernelGGL(( g_getRandomUniform), dim3(dim3(256)),dim3(dim3(256)), 0, 0, cu_d_randonNumf, cu_d_randomNum, getRandomNumLen(batch, ImgSize)); hipStreamSynchronize(0); getLastCudaError("g_getRandomUniform"); int threads = min(512, ImgSize * ImgSize); hipLaunchKernelGGL(( g_generateDistortionMap), dim3(dim3(batch)),dim3(threads), 0, 0, cuDispH->getDev(), cuDispV->getDev(), cu_d_randomNum, cuGaussianKernel->getDev(), Config::instance()->getDistortion(), Config::instance()->getScale(), Config::instance()->getRotation(), ImgSize); hipStreamSynchronize(0); getLastCudaError("g_generateDistortionMap"); } void cuApplyScaleAndRotate(int batch, int ImgSize, float scalingx, float scalingy, float rotation) { hipLaunchKernelGGL(( g_scaleAndRotate), dim3(dim3(batch)),dim3(dim3(512)), 0, 0, cuDispH->getDev(), cuDispV->getDev(), scalingx, scalingy, rotation, ImgSize); hipStreamSynchronize(0); getLastCudaError("g_generateDistortionMap"); } void cuApplyDistortion(float**inputs, float**outputs, int batch, int ImgSize) { int threadidx = min(ImgSize * ImgSize, 512); hipLaunchKernelGGL(( g_applyDistortionMap), dim3(dim3(batch, Config::instance()->getChannels())), dim3( dim3(threadidx)), sizeof(float) * ImgSize * ImgSize, 0, inputs, outputs, cuDispH->getDev(), cuDispV->getDev(), ImgSize); hipStreamSynchronize(0); getLastCudaError("g_applyDistortionMap"); } /* * blocks : dim3(batch, channels) * threads : dim3(min(ImgSize*ImgSize, 512)) */ __global__ void g_applyCropRandom(float**_inputs, float**_outputs, float* random, int crop, int ImgSize) { int c = blockIdx.y; int outputImgSize = ImgSize; int inputImgSize = ImgSize + crop; int inputImgSize2 = inputImgSize * inputImgSize; int outputImgSize2= outputImgSize* outputImgSize; float* input = _inputs [blockIdx.x] + c * inputImgSize2; float* output= _outputs[blockIdx.x] + c * outputImgSize2; int sx =(int)((random[blockIdx.x] + 1.0) * 0.5 * crop); int sy =(int)((random[blockIdx.x + 1] + 1.0) * 0.5 * crop); if(sx > crop) sx = crop; if(sy > crop) sy = crop; if(sx < 0) sx = 0; if(sy < 0) sy = 0; // if(threadIdx.x == 0) // sprintf(logStr, "%d %d\n", sx, sy); for(int is = 0; is < outputImgSize2; is += blockDim.x) { int idx = is + threadIdx.x; if(idx < outputImgSize2) { int ox = idx / outputImgSize; int oy = idx % outputImgSize; int ix = ox + sx; int iy = oy + sy; cuAssert(ix < inputImgSize && iy < inputImgSize); output[idx] = input[ix * inputImgSize + iy]; } } } /* * blocks : dim3(batch, channels) * threads: dim3(min(ImgSize * ImgSize, 512); */ __global__ void g_applyCrop(float**_inputs, float**_outputs, float* random, int croplen, int ImgSize, int cropr, int cropc) { int c = blockIdx.y; int outputImgSize = ImgSize; int inputImgSize = ImgSize + croplen; int inputImgSize2 = inputImgSize * inputImgSize; int outputImgSize2= outputImgSize* outputImgSize; float* input = _inputs [blockIdx.x]+ c * inputImgSize2 ; float* output= _outputs[blockIdx.x]+ c * outputImgSize2; int sx = cropr; int sy = cropc; for(int is = 0; is < outputImgSize2; is += blockDim.x) { int idx = is + threadIdx.x; if(idx < outputImgSize2) { int ox = idx / outputImgSize; int oy = idx % outputImgSize; int ix = ox + sx; int iy = oy + sy; cuAssert(ix < inputImgSize && iy < inputImgSize); output[idx] = input[ix * inputImgSize + iy]; } } } void cuApplyCropRandom(float**inputs, float**outputs, int batch, int ImgSize) { dim3 block = dim3(batch, Config::instance()->getChannels()); dim3 threads = min(512, ImgSize * ImgSize); hipLaunchKernelGGL(( g_applyCropRandom), dim3(block),dim3(threads), 0, 0, inputs, outputs, cu_d_randomNum, Config::instance()->getCrop(), ImgSize); hipStreamSynchronize(0); getLastCudaError("g_applyCropRandom"); } void cuApplyCrop(float**inputs, float**outputs, int batch, int ImgSize, int cropr, int cropc) { int threads = min(512, ImgSize * ImgSize); hipLaunchKernelGGL(( g_applyCrop), dim3(dim3(batch, Config::instance()->getChannels())), dim3( dim3(threads)), 0, 0, inputs, outputs,cu_d_randomNum, Config::instance()->getCrop(), ImgSize, cropr, cropc); hipStreamSynchronize(0); getLastCudaError("g_applyCrop"); } /* * function: orizontal Reflection * blocks : dim3(batch, Config::instance()->getChannels()), * threads : dim3(threads) * flag : 0. Random * 1. Horizontal * 2. Not Horizontal */ __global__ void g_applyHorizontal(float**_inputs, float**_outputs, float* rand, int ImgSize, int flag) { int c = blockIdx.y; int ImgSize2 = ImgSize * ImgSize ; float* input = _inputs[blockIdx.x] + c * ImgSize2; float* output= _outputs[blockIdx.x]+ c * ImgSize2; int half = ImgSize / 2; for(int is = 0; is < half * ImgSize; is += blockDim.x) { int idx = is + threadIdx.x; if(idx < half * ImgSize) { int ox = idx / half; int oy = idx % half; int ix = ox; int iy = ImgSize - oy - 1; if(flag == RANDOM_HORIZONTAL) { //if(rand[blockIdx.x] <= 0.0){ if(blockIdx.x % 2 == 0){ cuAssert(ix < ImgSize && iy < ImgSize); swap(output[ox * ImgSize + oy], input[ix * ImgSize + iy]); } } else if(flag == HORIZONTAL){ cuAssert(ix < ImgSize && iy < ImgSize); swap(output[ox * ImgSize + oy], input[ix * ImgSize + iy]); } else if(flag == NOT_HORIZONTAL){ } } } } /* * flag : 0. Random * 1. Horizontal * 2. Not Horizontal */ void cuApplyHorizontal(float **inputs, float**outputs, int batch, int ImgSize, int flag) { int threads = ::min(ImgSize * ImgSize / 2, 512); hipLaunchKernelGGL(( g_applyHorizontal), dim3(dim3(batch, Config::instance()->getChannels())), dim3( dim3(threads)), 0, 0, inputs, outputs, cu_d_randomNum, ImgSize, flag); hipStreamSynchronize(0); getLastCudaError("g_applyHorizontal"); } __global__ void g_applyWhiteNoise( float** _inputs, float ** _outputs, float * _random, int ImgSize, float stdev){ int s = blockIdx.x; int c = blockIdx.y; int ImgSize2 = ImgSize * ImgSize; int offset = ImgSize2 * c; float* input = _inputs [s] + offset; float* output= _outputs[s] + offset; float* rand = _random + offset; if(true){ for(int i = 0; i < ImgSize2; i += blockDim.x){ int idx = i + threadIdx.x; if(idx < ImgSize2){ float val = input[idx] + stdev * rand[idx]; output[idx] = val; } } }else{ for(int i = 0; i < ImgSize2; i += blockDim.x){ int idx = i + threadIdx.x; if(idx < ImgSize2){ output[idx] = input[idx]; } } } } /* ref: http://en.wikipedia.org/wiki/White_noise */ void cuApplyWhiteNoise(float **inputs, float**outputs, int batch, int ImgSize, float stdev) { dim3 blocks = dim3(batch, Config::instance()->getChannels()); dim3 threads = dim3(min(ImgSize * ImgSize, 512)); hipLaunchKernelGGL(( g_applyWhiteNoise), dim3(blocks), dim3(threads), 0, 0, inputs, outputs, cu_d_randomNum, ImgSize, stdev); hipStreamSynchronize(0); getLastCudaError("g_applyWhiteNoise"); }
eb8f8f8375cb664df56646af7bbfc99067492e35.cu
#include "cuTrasformation.cuh" #include <math.h> #include <stdio.h> #include "../common/cuMatrix.h" #include "../common/util.h" #include "cuda_runtime.h" #include <curand_kernel.h> #include <time.h> #include "../common/Config.h" #include <helper_functions.h> #include <helper_cuda.h> #include "../common/cuBase.h" #define GAUSSIAN_FIELD_SIZE (21) /* strictly odd number */ #define constDistortion (1.0) curandGenerator_t rand_generator_device; const curandRngType_t generator_type = CURAND_RNG_PSEUDO_DEFAULT; cuMatrix<float>* cuGaussianKernel; cuMatrix<float>* cuDispH; cuMatrix<float>* cuDispV; float * cu_d_randonNumf; float* cu_d_randomNum; float* cu_h_randomNum; float dElasticSigma = 4.0; /* higher numbers are more smooth and less distorted; Simard uses 4.0*/ int getRandomNumLen(int batch, int ImgSize) { return batch * ImgSize * ImgSize * 2 * Config::instance()->getChannels(); } /* * blocks : dim3(1) * threads: dim3(GAUSSIAN_FIELD_SIZE*GAUSSIAN_FIELD_SIZE) */ __global__ void g_createGaussianKernel(float* gaussian, float dElasticSigma) { int iiMid = GAUSSIAN_FIELD_SIZE >> 1; float floatElasticSigma = dElasticSigma * dElasticSigma; int row = threadIdx.x % GAUSSIAN_FIELD_SIZE; int col = threadIdx.x / GAUSSIAN_FIELD_SIZE; float val1 = 1.0 / (dElasticSigma * 2.0 * 3.1415926535897932384626433832795); float val2 = (row-iiMid)*(row-iiMid) + (col-iiMid)*(col-iiMid); gaussian[threadIdx.x] = val1 * exp(-1.0 * val2 / (2.0 * floatElasticSigma)); } void cuInitDistortionMemery(int batch, int ImgSize) { curandStatus_t curandstatus; cuGaussianKernel = new cuMatrix<float>(GAUSSIAN_FIELD_SIZE, GAUSSIAN_FIELD_SIZE, 1); if(GAUSSIAN_FIELD_SIZE * GAUSSIAN_FIELD_SIZE > MAX_THREADS) { char logStr[1024]; sprintf(logStr, "g_createGaussianKernel > MAX_THREADS\n"); LOG(logStr, "Result/log.txt"); exit(0); } g_createGaussianKernel<<<dim3(1),dim3(GAUSSIAN_FIELD_SIZE * GAUSSIAN_FIELD_SIZE)>>>( cuGaussianKernel->getDev(), dElasticSigma); cudaStreamSynchronize(0); /*cu_d_randomNum*/ checkCudaErrors( MemoryMonitor::instance()->gpuMalloc((void**)&cu_d_randomNum, sizeof(float) * getRandomNumLen(batch, ImgSize)) ); /*cu_d_randonNumf*/ checkCudaErrors( MemoryMonitor::instance()->gpuMalloc((void**)&cu_d_randonNumf, sizeof(float) * getRandomNumLen(batch, ImgSize)) ); /*cu_h_randomNum*/ cu_h_randomNum = (float*)MemoryMonitor::instance()->cpuMalloc(sizeof(float) * getRandomNumLen(batch, ImgSize)); if(!cu_h_randomNum) { char logStr[1024]; sprintf(logStr, "malloc cu_h_randomNum fail\n"); LOG(logStr, "Result/log.txt"); exit(0); } /*curandCreateGenerator*/ curandstatus = curandCreateGenerator(&rand_generator_device, generator_type); if(curandstatus != CURAND_STATUS_SUCCESS) { char logStr[1024]; sprintf(logStr, "curandCreateGenerator fail\n"); LOG(logStr, "Result/log.txt"); exit(0); } cuDispV = new cuMatrix<float>(batch, ImgSize * ImgSize, 1); cuDispH = new cuMatrix<float>(batch, ImgSize * ImgSize, 1); } __global__ void g_getRandomUniform(float* r1, float* r2, int len) { for(int i = 0; i < len; i += gridDim.x * blockDim.x) { int id = i + blockDim.x * blockIdx.x + threadIdx.x; if(id < len) { r2[id] = r1[id] * 2.0f - 1.0f; } } } /* * blocks : dim3(batch) * threads : dim3(512) */ __global__ void g_generateDistortionMap( float* _dispH, float* _dispV, float* rand, float* gaussianKernel, float dElasticScaling, float dMaxScaling, float dMaxRotation, int ImgSize) { int ImgSize2 = ImgSize * ImgSize; float* uniformH = rand + blockIdx.x * ImgSize2; float* uniformV = rand + blockIdx.x * ImgSize2 * 2; float* dispH = _dispH + ImgSize2 * blockIdx.x; float* dispV = _dispV + ImgSize2 * blockIdx.x; if(dElasticScaling >= 0.1){ for(int is = 0; is < ImgSize2; is += blockDim.x) { int idx = is + threadIdx.x; if(idx < ImgSize2) { int row = idx / ImgSize; int col = idx % ImgSize; int iiMid = GAUSSIAN_FIELD_SIZE / 2; float fConvolvedH = 0.0; float fConvolvedV = 0.0; float fSampleH, fSampleV; float elasticScale = dElasticScaling; for(int xxx = 0; xxx < GAUSSIAN_FIELD_SIZE; ++xxx) { for(int yyy = 0; yyy < GAUSSIAN_FIELD_SIZE; ++yyy) { int xxxDisp = col - iiMid + xxx; int yyyDisp = row - iiMid + yyy; if(xxxDisp < 0 || xxxDisp >= ImgSize || yyyDisp < 0 || yyyDisp >= ImgSize) { fSampleH = 0.0; fSampleV = 0.0; } else { fSampleH = uniformH[yyyDisp * ImgSize + xxxDisp]; fSampleV = uniformV[yyyDisp * ImgSize + xxxDisp]; } fConvolvedH += fSampleH * gaussianKernel[yyy * GAUSSIAN_FIELD_SIZE + xxx] * constDistortion; fConvolvedV += fSampleV * gaussianKernel[yyy * GAUSSIAN_FIELD_SIZE + xxx] * constDistortion; } } dispH[idx] = elasticScale * fConvolvedH; dispV[idx] = elasticScale * fConvolvedV; } } } else{ for(int is = 0; is < ImgSize2; is += blockDim.x){ int idx = is + threadIdx.x; if(idx < ImgSize2){ dispH[idx] = 0.0; dispV[idx] = 0.0; } } } __syncthreads(); float rand1 = rand[blockIdx.x]; float rand2 = rand[blockIdx.x + 1]; if(fabs(dMaxRotation) >= 0.01){ if(rand1 <= 0.0) rand1 = 0.0; if(rand2 <= 0.0) rand2 = 0.0; } for(int is = 0; is < ImgSize2; is += blockDim.x) { int idx = is + threadIdx.x; if(idx < ImgSize2) { int row = idx / ImgSize; int col = idx % ImgSize; float dSFHoriz = dMaxScaling / 100.0 * rand1; float dSFVert = dMaxScaling / 100.0 * rand2; int iMid = ImgSize / 2; dispH[idx] += dSFHoriz * (col - iMid); dispV[idx] += dSFVert * (row - iMid); } } __syncthreads(); for(int is = 0; is < ImgSize2; is += blockDim.x) { int idx = is + threadIdx.x; if(idx < ImgSize2) { int row = idx / ImgSize; int col = idx % ImgSize; float angle = dMaxRotation * rand[blockIdx.x]; //printf("%f\n",angle); angle = angle * 3.1415926535897932384626433832795 / 180.0; float cosAngle = cos(angle); float sinAngle = sin(angle); int iMid = ImgSize / 2; float xx = row - iMid; float yy = col - iMid; dispH[idx] += yy - yy * cosAngle - xx * sinAngle; dispV[idx] += xx - xx * cosAngle + yy * sinAngle; } } } __global__ void g_scaleAndRotate( float* _dispH, float* _dispV, float scalingx, float scalingy, float rotation, int ImgSize) { int ImgSize2 = ImgSize * ImgSize; float* dispH = _dispH + ImgSize2 * blockIdx.x; float* dispV = _dispV + ImgSize2 * blockIdx.x; for(int is = 0; is < ImgSize2; is += blockDim.x) { int idx = is + threadIdx.x; if(idx < ImgSize2) { dispH[idx] = 0.0; dispV[idx] = 0.0; } } __syncthreads(); for(int is = 0; is < ImgSize2; is += blockDim.x) { int idx = is + threadIdx.x; if(idx < ImgSize2) { int row = idx / ImgSize; int col = idx % ImgSize; float dSFHoriz = scalingx / 100.0; float dSFVert = scalingy / 100.0; int iMid = ImgSize / 2; dispH[idx] += dSFHoriz * (col - iMid); dispV[idx] += dSFVert * (row - iMid); } } __syncthreads(); for(int is = 0; is < ImgSize2; is += blockDim.x) { int idx = is + threadIdx.x; if(idx < ImgSize2) { int row = idx / ImgSize; int col = idx % ImgSize; float angle = rotation; angle = angle * 3.1415926535897932384626433832795 / 180.0; float cosAngle = cos(angle); float sinAngle = sin(angle); int iMid = ImgSize / 2; float xx = row - iMid; float yy = col - iMid; dispH[idx] += yy - yy * cosAngle - xx * sinAngle; dispV[idx] += xx - xx * cosAngle + yy * sinAngle; } } } /* * blocks : dim3(batch, Config::instance()->getChannels()) * threads: dim3(min(512, ImgSize * ImgSize)) */ __global__ void g_applyDistortionMap( float** _inputs, float** _outputs, float* _dispH, float* _dispV, int ImgSize) { extern __shared__ float img[]; int c = blockIdx.y; int ImgSize2 = ImgSize * ImgSize; float* input = _inputs[blockIdx.x] + ImgSize2 * c; float* output= _outputs[blockIdx.x]+ ImgSize2 * c; float* dispV = _dispV + blockIdx.x * ImgSize2; float* dispH = _dispH + blockIdx.x * ImgSize2; for(int is = 0; is < ImgSize2; is += blockDim.x){ int idx = is + threadIdx.x; if(idx < ImgSize2){ img[idx] = input[idx]; } } __syncthreads(); for(int is = 0; is < ImgSize2; is += blockDim.x) { int idx = is + threadIdx.x; if(idx < ImgSize2) { int row = idx / ImgSize; int col = idx % ImgSize; float sourceRow, sourceCol; float fracRow, fracCol; float w1, w2, w3, w4; float sourceValue; int sRow, sCol, sRowp1, sColp1; bool bSkipOutOfBounds; if(fabs(dispV[idx]) < 0.000000001 && fabs(dispH[idx]) < 0.0000000001) { output[idx] = input[idx]; continue; } sourceRow = (float)row - dispV[idx]; sourceCol = (float)col - dispH[idx]; fracRow = sourceRow - (int)sourceRow; fracCol = sourceCol - (int)sourceCol; w1 = ( 1.0 - fracRow ) * ( 1.0 - fracCol ); w2 = ( 1.0 - fracRow ) * fracCol; w3 = fracRow * ( 1.0 - fracCol ); w4 = fracRow * fracCol; bSkipOutOfBounds = false; if ( ((int)sourceRow + 1) >= ImgSize ) bSkipOutOfBounds = true; if ( (int)sourceRow < 0 ) bSkipOutOfBounds = true; if ( ((int)sourceCol + 1) >= ImgSize ) bSkipOutOfBounds = true; if ( (int)sourceCol < 0 ) bSkipOutOfBounds = true; if ( bSkipOutOfBounds == false ) { sRow = (int)sourceRow; sCol = (int)sourceCol; sRowp1 = sRow + 1; sColp1 = sCol + 1; while (sRowp1 >= ImgSize) sRowp1 -= ImgSize; while (sRowp1 < 0) sRowp1 += ImgSize; while (sColp1 >= ImgSize) sColp1 -= ImgSize; while (sColp1 < 0) sColp1 += ImgSize; while (sRow >= ImgSize) sRow -= ImgSize; while (sRow < 0) sRow += ImgSize; while (sCol >= ImgSize) sCol -= ImgSize; while (sCol < 0) sCol += ImgSize; sourceValue = w1 * img[sRow * ImgSize + sCol] + w2 * img[sRow * ImgSize + sColp1] + w3 * img[sRowp1 * ImgSize + sCol] + w4 * img[sRowp1 * ImgSize + sColp1]; } else { sourceValue = -1.0; } output[idx] = sourceValue; } } } void cuApplyRandom(int batch, unsigned long long s, int ImgSize) { curandStatus_t curandStatus; unsigned long long seed = s; curandStatus = curandSetPseudoRandomGeneratorSeed(rand_generator_device, seed); if(curandStatus != CURAND_STATUS_SUCCESS) { char logStr[1024]; sprintf(logStr, "curandSetPseudoRandomGeneratorSeed fail\n"); LOG(logStr, "Result/log.txt"); exit(0); } curandGenerateUniform(rand_generator_device, cu_d_randonNumf, getRandomNumLen(batch, ImgSize)); g_getRandomUniform<<<dim3(256),dim3(256)>>>(cu_d_randonNumf, cu_d_randomNum, getRandomNumLen(batch, ImgSize)); cudaStreamSynchronize(0); getLastCudaError("g_getRandomUniform"); int threads = min(512, ImgSize * ImgSize); g_generateDistortionMap<<<dim3(batch),threads>>>(cuDispH->getDev(), cuDispV->getDev(), cu_d_randomNum, cuGaussianKernel->getDev(), Config::instance()->getDistortion(), Config::instance()->getScale(), Config::instance()->getRotation(), ImgSize); cudaStreamSynchronize(0); getLastCudaError("g_generateDistortionMap"); } void cuApplyScaleAndRotate(int batch, int ImgSize, float scalingx, float scalingy, float rotation) { g_scaleAndRotate<<<dim3(batch),dim3(512)>>>( cuDispH->getDev(), cuDispV->getDev(), scalingx, scalingy, rotation, ImgSize); cudaStreamSynchronize(0); getLastCudaError("g_generateDistortionMap"); } void cuApplyDistortion(float**inputs, float**outputs, int batch, int ImgSize) { int threadidx = min(ImgSize * ImgSize, 512); g_applyDistortionMap<<<dim3(batch, Config::instance()->getChannels()), dim3(threadidx), sizeof(float) * ImgSize * ImgSize>>>(inputs, outputs, cuDispH->getDev(), cuDispV->getDev(), ImgSize); cudaStreamSynchronize(0); getLastCudaError("g_applyDistortionMap"); } /* * blocks : dim3(batch, channels) * threads : dim3(min(ImgSize*ImgSize, 512)) */ __global__ void g_applyCropRandom(float**_inputs, float**_outputs, float* random, int crop, int ImgSize) { int c = blockIdx.y; int outputImgSize = ImgSize; int inputImgSize = ImgSize + crop; int inputImgSize2 = inputImgSize * inputImgSize; int outputImgSize2= outputImgSize* outputImgSize; float* input = _inputs [blockIdx.x] + c * inputImgSize2; float* output= _outputs[blockIdx.x] + c * outputImgSize2; int sx =(int)((random[blockIdx.x] + 1.0) * 0.5 * crop); int sy =(int)((random[blockIdx.x + 1] + 1.0) * 0.5 * crop); if(sx > crop) sx = crop; if(sy > crop) sy = crop; if(sx < 0) sx = 0; if(sy < 0) sy = 0; // if(threadIdx.x == 0) // sprintf(logStr, "%d %d\n", sx, sy); for(int is = 0; is < outputImgSize2; is += blockDim.x) { int idx = is + threadIdx.x; if(idx < outputImgSize2) { int ox = idx / outputImgSize; int oy = idx % outputImgSize; int ix = ox + sx; int iy = oy + sy; cuAssert(ix < inputImgSize && iy < inputImgSize); output[idx] = input[ix * inputImgSize + iy]; } } } /* * blocks : dim3(batch, channels) * threads: dim3(min(ImgSize * ImgSize, 512); */ __global__ void g_applyCrop(float**_inputs, float**_outputs, float* random, int croplen, int ImgSize, int cropr, int cropc) { int c = blockIdx.y; int outputImgSize = ImgSize; int inputImgSize = ImgSize + croplen; int inputImgSize2 = inputImgSize * inputImgSize; int outputImgSize2= outputImgSize* outputImgSize; float* input = _inputs [blockIdx.x]+ c * inputImgSize2 ; float* output= _outputs[blockIdx.x]+ c * outputImgSize2; int sx = cropr; int sy = cropc; for(int is = 0; is < outputImgSize2; is += blockDim.x) { int idx = is + threadIdx.x; if(idx < outputImgSize2) { int ox = idx / outputImgSize; int oy = idx % outputImgSize; int ix = ox + sx; int iy = oy + sy; cuAssert(ix < inputImgSize && iy < inputImgSize); output[idx] = input[ix * inputImgSize + iy]; } } } void cuApplyCropRandom(float**inputs, float**outputs, int batch, int ImgSize) { dim3 block = dim3(batch, Config::instance()->getChannels()); dim3 threads = min(512, ImgSize * ImgSize); g_applyCropRandom<<<block,threads>>>(inputs, outputs, cu_d_randomNum, Config::instance()->getCrop(), ImgSize); cudaStreamSynchronize(0); getLastCudaError("g_applyCropRandom"); } void cuApplyCrop(float**inputs, float**outputs, int batch, int ImgSize, int cropr, int cropc) { int threads = min(512, ImgSize * ImgSize); g_applyCrop<<<dim3(batch, Config::instance()->getChannels()), dim3(threads)>>>(inputs, outputs,cu_d_randomNum, Config::instance()->getCrop(), ImgSize, cropr, cropc); cudaStreamSynchronize(0); getLastCudaError("g_applyCrop"); } /* * function: orizontal Reflection * blocks : dim3(batch, Config::instance()->getChannels()), * threads : dim3(threads) * flag : 0. Random * 1. Horizontal * 2. Not Horizontal */ __global__ void g_applyHorizontal(float**_inputs, float**_outputs, float* rand, int ImgSize, int flag) { int c = blockIdx.y; int ImgSize2 = ImgSize * ImgSize ; float* input = _inputs[blockIdx.x] + c * ImgSize2; float* output= _outputs[blockIdx.x]+ c * ImgSize2; int half = ImgSize / 2; for(int is = 0; is < half * ImgSize; is += blockDim.x) { int idx = is + threadIdx.x; if(idx < half * ImgSize) { int ox = idx / half; int oy = idx % half; int ix = ox; int iy = ImgSize - oy - 1; if(flag == RANDOM_HORIZONTAL) { //if(rand[blockIdx.x] <= 0.0){ if(blockIdx.x % 2 == 0){ cuAssert(ix < ImgSize && iy < ImgSize); swap(output[ox * ImgSize + oy], input[ix * ImgSize + iy]); } } else if(flag == HORIZONTAL){ cuAssert(ix < ImgSize && iy < ImgSize); swap(output[ox * ImgSize + oy], input[ix * ImgSize + iy]); } else if(flag == NOT_HORIZONTAL){ } } } } /* * flag : 0. Random * 1. Horizontal * 2. Not Horizontal */ void cuApplyHorizontal(float **inputs, float**outputs, int batch, int ImgSize, int flag) { int threads = std::min(ImgSize * ImgSize / 2, 512); g_applyHorizontal<<<dim3(batch, Config::instance()->getChannels()), dim3(threads)>>>(inputs, outputs, cu_d_randomNum, ImgSize, flag); cudaStreamSynchronize(0); getLastCudaError("g_applyHorizontal"); } __global__ void g_applyWhiteNoise( float** _inputs, float ** _outputs, float * _random, int ImgSize, float stdev){ int s = blockIdx.x; int c = blockIdx.y; int ImgSize2 = ImgSize * ImgSize; int offset = ImgSize2 * c; float* input = _inputs [s] + offset; float* output= _outputs[s] + offset; float* rand = _random + offset; if(true){ for(int i = 0; i < ImgSize2; i += blockDim.x){ int idx = i + threadIdx.x; if(idx < ImgSize2){ float val = input[idx] + stdev * rand[idx]; output[idx] = val; } } }else{ for(int i = 0; i < ImgSize2; i += blockDim.x){ int idx = i + threadIdx.x; if(idx < ImgSize2){ output[idx] = input[idx]; } } } } /* ref: http://en.wikipedia.org/wiki/White_noise */ void cuApplyWhiteNoise(float **inputs, float**outputs, int batch, int ImgSize, float stdev) { dim3 blocks = dim3(batch, Config::instance()->getChannels()); dim3 threads = dim3(min(ImgSize * ImgSize, 512)); g_applyWhiteNoise<<<blocks, threads>>>(inputs, outputs, cu_d_randomNum, ImgSize, stdev); cudaStreamSynchronize(0); getLastCudaError("g_applyWhiteNoise"); }
e01b7297a01c2b60993c691acd6c4a0949bf6fbb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/ATen.h> #include <ATen/AccumulateType.h> #include <ATen/TensorUtils.h> #include <ATen/hip/HIPContext.h> #include <c10/util/Exception.h> #include <THH/THHDeviceUtils.cuh> #include <THH/THHTensorMathReduce.cuh> #include <THH/THHTensorSort.cuh> #include <THH/THHThrustAllocator.cuh> #include <thrust/execution_policy.h> #include <thrust/unique.h> #include <ATen/native/hip/EmbeddingBackwardKernel.cuh> namespace at { namespace native { namespace { #ifdef __HIP_PLATFORM_HCC__ static const int WARP_SIZE = 64; static const int BLOCKDIMY = 16; #else static const int WARP_SIZE = 32; static const int BLOCKDIMY = 32; #endif template <typename scalar_t, typename accscalar_t> __global__ void embedding_backward_feature_kernel (int64_t* indices, const scalar_t* __restrict__ grad, scalar_t* __restrict__ grad_weight, int n, // OK to pass as int, we don't expect 2 billion+ samples in one shot int64_t stride, int padding_idx) { extern __shared__ char buf[]; accscalar_t* smem = (accscalar_t*)buf; accscalar_t* my_s = smem + WARP_SIZE*threadIdx.y; int* indices_batch = (int*)(buf + sizeof(accscalar_t)*WARP_SIZE*blockDim.y); const int s = (int)stride; // OK to make int, we don't expect 2 billion+ embedding row size const int f = threadIdx.x + blockIdx.x*blockDim.x; // feature_dim for(int batch_start = 0; batch_start < n; batch_start += blockDim.x*blockDim.y) { // Entire block cooperates to load a batch of 1024 indices to process int tid = threadIdx.x + threadIdx.y*blockDim.x; if(batch_start + tid < n) indices_batch[tid] = (int)indices[batch_start + tid]; int batch_end = batch_start + blockDim.x*blockDim.y < n ? batch_start + blockDim.x*blockDim.y : n; // Loop over the batch of <= 1024 loaded indices in chunks of blockDim.y = 32 for(int chunk_start = batch_start; chunk_start < batch_end; chunk_start += blockDim.y) { // This does double duty: it makes sure indices_batch is ready, and it makes sure match-group // leaders are done with their accumulates before other warps start loading again. __syncthreads(); int n_this_chunk = (batch_end - chunk_start) < blockDim.y ? (batch_end - chunk_start) : blockDim.y; int src_row = chunk_start + threadIdx.y; int dst_row = indices_batch[src_row - batch_start]; // This warp's target row in grad_weight // All warps load their smem segments with incoming grad data if(src_row < n && f < s && dst_row != padding_idx) my_s[threadIdx.x] = static_cast<accscalar_t>(grad[src_row*stride + f]); __syncthreads(); // To ensure determinism, we can't just have each warp add its grad data to its dst_row. // We need to check if any other warps pulled grad data targeting dst_row. // If so, we elect the first warp in each matching group as the leader. // Each leader warp serializes the accumulates targeting dst_row in shared memory, // then finishes by adding the accumulated buffer to dst_row in grad_weight. if(dst_row != padding_idx && src_row < n) // Per-warp exit condition, safe with ballot_sync { int match_found_this_thread = (dst_row == indices_batch[chunk_start - batch_start + threadIdx.x]); if(threadIdx.x >= n_this_chunk) match_found_this_thread = 0; #ifdef __HIP_PLATFORM_HCC__ unsigned long long int matchmask = WARP_BALLOT(match_found_this_thread); int first_remaining_peer = __ffsll(matchmask) - 1; #else unsigned int matchmask = WARP_BALLOT(match_found_this_thread); int first_remaining_peer = __ffs(matchmask) - 1; #endif if(threadIdx.y == first_remaining_peer) // Nominate lowest-indexed warp as the leader { matchmask ^= (1 << first_remaining_peer); while(matchmask) { #ifdef __HIP_PLATFORM_HCC__ first_remaining_peer = __ffsll(matchmask) - 1; #else first_remaining_peer = __ffs(matchmask) - 1; #endif my_s[threadIdx.x] += smem[threadIdx.x + WARP_SIZE*first_remaining_peer]; matchmask ^= (1 << first_remaining_peer); } if(f < s) grad_weight[dst_row*stride + f] += static_cast<scalar_t>(my_s[threadIdx.x]); } } } } } template <typename scalar_t> __global__ void embedding_backward_kernel( int64_t* input, int64_t* indices, scalar_t* grad_output, scalar_t* grad_weight, int64_t* count, int64_t numel, int64_t stride, int padding_idx) { using accscalar_t = acc_type<scalar_t, true>; int idx = blockIdx.x * 4 + threadIdx.y; // Each warp is responsible for an input into the LookupTable. // If the preceding input has the same as this input, then the warp // exits immediately. The warp also processes subsequent inputs with the // same value. // // Input Warp // 1 <warp 1> // 1 <warp 1> (<warp 2> exits without doing any work) // 5 <warp 3> // 8 <warp 4> // Number of values proceessed by each thread (grain size) const int SZ = 4; if (idx < numel && (idx == 0 || input[idx] != input[idx - 1]) && input[idx] != padding_idx) { do { const int start_feature = threadIdx.x + blockIdx.y * blockDim.x * SZ; const int weight_row = ((int) input[idx]) * stride; const int grad_row = ((int) indices[idx]) * stride; const accscalar_t scale = count ? (accscalar_t)1.0 / count[idx] : 1.0; accscalar_t gradient[SZ]; accscalar_t weight[SZ]; #pragma unroll for (int ii = 0; ii < SZ; ii++) { int feature_dim = start_feature + ii * WARP_SIZE; if (feature_dim < stride) { gradient[ii] = static_cast<accscalar_t>(grad_output[grad_row + feature_dim]); weight[ii] = static_cast<accscalar_t>(grad_weight[weight_row + feature_dim]); } } #pragma unroll for (int ii = 0; ii < SZ; ii++) { weight[ii] += gradient[ii] * scale; } #pragma unroll for (int ii = 0; ii < SZ; ii++) { int feature_dim = start_feature + ii * WARP_SIZE; if (feature_dim < stride) { grad_weight[weight_row + feature_dim] = static_cast<scalar_t>(weight[ii]); } } idx++; } while (idx < numel && input[idx] == input[idx - 1]); } } /* Calculate norms of the rows of weight_ptr given by idx_ptr and capture them in norms */ template <typename scalar_t, typename accscalar_t> __global__ void renorm_kernel( scalar_t* weights, int64_t* indices, accscalar_t max_norm, accscalar_t norm_type, int64_t dim, int64_t weights_stride0, int64_t weights_stride1) { // Some casting hacks since dynamic shared memory and templates don't work together: extern __shared__ unsigned char smem[]; auto sdata = reinterpret_cast<accscalar_t*>(smem); int tid = threadIdx.x; int base_index = indices[blockIdx.x] * weights_stride0; accscalar_t v = 0; for (int i = tid; i < dim; i += blockDim.x) { auto x = static_cast<accscalar_t>(weights[base_index + i * weights_stride1]); if (norm_type == 1) { v += std::abs(x); } else if (norm_type == 2) { v += x * x; } else { v += ::pow(x, norm_type); } } using Op = ReduceAdd<accscalar_t>; v = reduceBlock<accscalar_t>(sdata, blockDim.x, v, Op(), 0); if (tid == 0) { sdata[0] = ::pow(v, static_cast<accscalar_t>(1.0 / norm_type)); } __syncthreads(); // now we renormalize the blocks that need it if (sdata[0] > max_norm) { auto factor = static_cast<scalar_t>(max_norm / (sdata[0] + 1e-7)); for (int i = tid; i < dim; i += blockDim.x) { weights[base_index + i * weights_stride1] *= factor; } } } } // anonymous namespace Tensor embedding_dense_backward_cuda(const Tensor & grad_, const Tensor & indices, int64_t num_weights, int64_t padding_idx, bool scale_grad_by_freq) { auto grad_arg = TensorArg(grad_, "grad", 1); auto indices_arg = TensorArg(indices, "indices", 1); checkScalarType("embedding_backward", indices_arg, kLong); checkSameGPU("embedding_backward", grad_arg, indices_arg); auto num_indices = indices.numel(); auto grad = grad_.contiguous().view({num_indices, grad_.size(-1)}); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); if (num_indices <= 768 && !scale_grad_by_freq) { auto indices_contig = indices.contiguous(); auto grad_weight = at::zeros({num_weights, grad_.size(-1)}, grad_.options()); int64_t stride = grad_weight.stride(0); dim3 grid(THCCeilDiv(stride, (int64_t)WARP_SIZE)); dim3 block(WARP_SIZE, BLOCKDIMY); AT_DISPATCH_FLOATING_TYPES_AND_HALF (grad.scalar_type(), "embedding_backward", [&] { using accscalar_t = acc_type<scalar_t, true>; hipLaunchKernelGGL(( embedding_backward_feature_kernel<scalar_t, accscalar_t>) , dim3(grid), dim3(block), sizeof(accscalar_t)*WARP_SIZE*BLOCKDIMY + sizeof(int)*WARP_SIZE*BLOCKDIMY, stream, indices_contig.data<int64_t>(), grad.data<scalar_t>(), grad_weight.data<scalar_t>(), static_cast<int>(num_indices), static_cast<int64_t>(stride), static_cast<int>(padding_idx)); }); THCudaCheck(hipGetLastError()); return grad_weight; } auto sorted_indices = at::empty_like(indices); auto orig_indices = at::empty_like(indices); using device_ptr = thrust::device_ptr<int64_t>; // Sort the inputs into sorted with the corresponding indices; we // don't need a stable or multidimensional sort, so just use Thrust // directly { sorted_indices.copy_(indices); auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA()); auto policy = thrust::hip::par(allocator).on(stream); // Fill sortedOrigIndices with sequential indices auto count_iter = thrust::counting_iterator<int64_t>(0); auto orig_data = device_ptr(orig_indices.data<int64_t>()); thrust::copy(policy, count_iter, count_iter + num_indices, orig_data); // Sort; a stable sort is not required auto sorted_data = device_ptr(sorted_indices.data<int64_t>()); thrust::sort_by_key(policy, sorted_data, sorted_data + num_indices, orig_data, ThrustLTOp<int64_t>()); } Tensor count; if (scale_grad_by_freq) { count = at::empty_like(indices); auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA()); auto policy = thrust::hip::par(allocator).on(stream); // Compute an increasing sequence per unique item in sortedIndices: // sorted: 2 5 5 5 7 7 8 9 9 // count: 1 1 2 3 1 2 1 1 2 auto sorted_data = device_ptr(sorted_indices.data<int64_t>()); auto count_data = device_ptr(count.data<int64_t>()); thrust::inclusive_scan_by_key( policy, sorted_data, sorted_data + num_indices, thrust::make_constant_iterator(1), count_data ); // Take the maximum of each count per unique key in reverse: // sorted: 2 5 5 5 7 7 8 9 9 // count: 1 3 3 3 2 2 1 2 2 thrust::inclusive_scan_by_key( policy, thrust::make_reverse_iterator(sorted_data + num_indices), thrust::make_reverse_iterator(sorted_data), thrust::make_reverse_iterator(count_data + num_indices), thrust::make_reverse_iterator(count_data + num_indices), thrust::equal_to<int64_t>(), thrust::maximum<int64_t>() ); } return embedding_backward_cuda_kernel(grad, orig_indices, sorted_indices, count, num_weights, padding_idx); } Tensor & embedding_renorm_cuda_(Tensor & self, const Tensor & indices, double max_norm, double norm_type) { auto self_arg = TensorArg(self, "self", 1); auto indices_arg = TensorArg(indices, "indices", 1); checkDim("embedding_renorm_", self_arg, 2); checkSameGPU("embedding_renorm", self_arg, indices_arg); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA()); auto policy = thrust::hip::par(allocator).on(stream); using device_ptr = thrust::device_ptr<int64_t>; auto num_indices = indices.numel(); auto indices_contig = indices.contiguous(); auto indices_data = device_ptr(indices_contig.data<int64_t>()); // FIXME: thrust::unique only removes consecutive elements that are equal. // We have race conditions when indices contain duplicates which are not // adjacent auto unique_indices = at::empty(indices.numel(), indices.options()); auto unique_data = device_ptr(unique_indices.data<int64_t>()); auto end = thrust::unique_copy(policy, indices_data, indices_data + num_indices, unique_data); auto num_unique_indices = static_cast<int>(end - unique_data); dim3 grid(num_unique_indices); dim3 block(128); int dim = self.stride(0); AT_DISPATCH_FLOATING_TYPES_AND_HALF(self.scalar_type(), "embedding_backward", [&] { using accscalar_t = acc_type<scalar_t, true>; hipLaunchKernelGGL(( renorm_kernel), dim3(grid), dim3(block), 128 * sizeof(accscalar_t), stream, self.data<scalar_t>(), unique_indices.data<int64_t>(), static_cast<accscalar_t>(max_norm), static_cast<accscalar_t>(norm_type), dim, self.stride(0), self.stride(1)); }); THCudaCheck(hipGetLastError()); return self; } }} // namespace at::native
e01b7297a01c2b60993c691acd6c4a0949bf6fbb.cu
#include <ATen/ATen.h> #include <ATen/AccumulateType.h> #include <ATen/TensorUtils.h> #include <ATen/cuda/CUDAContext.h> #include <c10/util/Exception.h> #include <THC/THCDeviceUtils.cuh> #include <THC/THCTensorMathReduce.cuh> #include <THC/THCTensorSort.cuh> #include <THC/THCThrustAllocator.cuh> #include <thrust/execution_policy.h> #include <thrust/unique.h> #include <ATen/native/cuda/EmbeddingBackwardKernel.cuh> namespace at { namespace native { namespace { #ifdef __HIP_PLATFORM_HCC__ static const int WARP_SIZE = 64; static const int BLOCKDIMY = 16; #else static const int WARP_SIZE = 32; static const int BLOCKDIMY = 32; #endif template <typename scalar_t, typename accscalar_t> __global__ void embedding_backward_feature_kernel (int64_t* indices, const scalar_t* __restrict__ grad, scalar_t* __restrict__ grad_weight, int n, // OK to pass as int, we don't expect 2 billion+ samples in one shot int64_t stride, int padding_idx) { extern __shared__ char buf[]; accscalar_t* smem = (accscalar_t*)buf; accscalar_t* my_s = smem + WARP_SIZE*threadIdx.y; int* indices_batch = (int*)(buf + sizeof(accscalar_t)*WARP_SIZE*blockDim.y); const int s = (int)stride; // OK to make int, we don't expect 2 billion+ embedding row size const int f = threadIdx.x + blockIdx.x*blockDim.x; // feature_dim for(int batch_start = 0; batch_start < n; batch_start += blockDim.x*blockDim.y) { // Entire block cooperates to load a batch of 1024 indices to process int tid = threadIdx.x + threadIdx.y*blockDim.x; if(batch_start + tid < n) indices_batch[tid] = (int)indices[batch_start + tid]; int batch_end = batch_start + blockDim.x*blockDim.y < n ? batch_start + blockDim.x*blockDim.y : n; // Loop over the batch of <= 1024 loaded indices in chunks of blockDim.y = 32 for(int chunk_start = batch_start; chunk_start < batch_end; chunk_start += blockDim.y) { // This does double duty: it makes sure indices_batch is ready, and it makes sure match-group // leaders are done with their accumulates before other warps start loading again. __syncthreads(); int n_this_chunk = (batch_end - chunk_start) < blockDim.y ? (batch_end - chunk_start) : blockDim.y; int src_row = chunk_start + threadIdx.y; int dst_row = indices_batch[src_row - batch_start]; // This warp's target row in grad_weight // All warps load their smem segments with incoming grad data if(src_row < n && f < s && dst_row != padding_idx) my_s[threadIdx.x] = static_cast<accscalar_t>(grad[src_row*stride + f]); __syncthreads(); // To ensure determinism, we can't just have each warp add its grad data to its dst_row. // We need to check if any other warps pulled grad data targeting dst_row. // If so, we elect the first warp in each matching group as the leader. // Each leader warp serializes the accumulates targeting dst_row in shared memory, // then finishes by adding the accumulated buffer to dst_row in grad_weight. if(dst_row != padding_idx && src_row < n) // Per-warp exit condition, safe with ballot_sync { int match_found_this_thread = (dst_row == indices_batch[chunk_start - batch_start + threadIdx.x]); if(threadIdx.x >= n_this_chunk) match_found_this_thread = 0; #ifdef __HIP_PLATFORM_HCC__ unsigned long long int matchmask = WARP_BALLOT(match_found_this_thread); int first_remaining_peer = __ffsll(matchmask) - 1; #else unsigned int matchmask = WARP_BALLOT(match_found_this_thread); int first_remaining_peer = __ffs(matchmask) - 1; #endif if(threadIdx.y == first_remaining_peer) // Nominate lowest-indexed warp as the leader { matchmask ^= (1 << first_remaining_peer); while(matchmask) { #ifdef __HIP_PLATFORM_HCC__ first_remaining_peer = __ffsll(matchmask) - 1; #else first_remaining_peer = __ffs(matchmask) - 1; #endif my_s[threadIdx.x] += smem[threadIdx.x + WARP_SIZE*first_remaining_peer]; matchmask ^= (1 << first_remaining_peer); } if(f < s) grad_weight[dst_row*stride + f] += static_cast<scalar_t>(my_s[threadIdx.x]); } } } } } template <typename scalar_t> __global__ void embedding_backward_kernel( int64_t* input, int64_t* indices, scalar_t* grad_output, scalar_t* grad_weight, int64_t* count, int64_t numel, int64_t stride, int padding_idx) { using accscalar_t = acc_type<scalar_t, true>; int idx = blockIdx.x * 4 + threadIdx.y; // Each warp is responsible for an input into the LookupTable. // If the preceding input has the same as this input, then the warp // exits immediately. The warp also processes subsequent inputs with the // same value. // // Input Warp // 1 <warp 1> // 1 <warp 1> (<warp 2> exits without doing any work) // 5 <warp 3> // 8 <warp 4> // Number of values proceessed by each thread (grain size) const int SZ = 4; if (idx < numel && (idx == 0 || input[idx] != input[idx - 1]) && input[idx] != padding_idx) { do { const int start_feature = threadIdx.x + blockIdx.y * blockDim.x * SZ; const int weight_row = ((int) input[idx]) * stride; const int grad_row = ((int) indices[idx]) * stride; const accscalar_t scale = count ? (accscalar_t)1.0 / count[idx] : 1.0; accscalar_t gradient[SZ]; accscalar_t weight[SZ]; #pragma unroll for (int ii = 0; ii < SZ; ii++) { int feature_dim = start_feature + ii * WARP_SIZE; if (feature_dim < stride) { gradient[ii] = static_cast<accscalar_t>(grad_output[grad_row + feature_dim]); weight[ii] = static_cast<accscalar_t>(grad_weight[weight_row + feature_dim]); } } #pragma unroll for (int ii = 0; ii < SZ; ii++) { weight[ii] += gradient[ii] * scale; } #pragma unroll for (int ii = 0; ii < SZ; ii++) { int feature_dim = start_feature + ii * WARP_SIZE; if (feature_dim < stride) { grad_weight[weight_row + feature_dim] = static_cast<scalar_t>(weight[ii]); } } idx++; } while (idx < numel && input[idx] == input[idx - 1]); } } /* Calculate norms of the rows of weight_ptr given by idx_ptr and capture them in norms */ template <typename scalar_t, typename accscalar_t> __global__ void renorm_kernel( scalar_t* weights, int64_t* indices, accscalar_t max_norm, accscalar_t norm_type, int64_t dim, int64_t weights_stride0, int64_t weights_stride1) { // Some casting hacks since dynamic shared memory and templates don't work together: extern __shared__ unsigned char smem[]; auto sdata = reinterpret_cast<accscalar_t*>(smem); int tid = threadIdx.x; int base_index = indices[blockIdx.x] * weights_stride0; accscalar_t v = 0; for (int i = tid; i < dim; i += blockDim.x) { auto x = static_cast<accscalar_t>(weights[base_index + i * weights_stride1]); if (norm_type == 1) { v += std::abs(x); } else if (norm_type == 2) { v += x * x; } else { v += std::pow(x, norm_type); } } using Op = ReduceAdd<accscalar_t>; v = reduceBlock<accscalar_t>(sdata, blockDim.x, v, Op(), 0); if (tid == 0) { sdata[0] = std::pow(v, static_cast<accscalar_t>(1.0 / norm_type)); } __syncthreads(); // now we renormalize the blocks that need it if (sdata[0] > max_norm) { auto factor = static_cast<scalar_t>(max_norm / (sdata[0] + 1e-7)); for (int i = tid; i < dim; i += blockDim.x) { weights[base_index + i * weights_stride1] *= factor; } } } } // anonymous namespace Tensor embedding_dense_backward_cuda(const Tensor & grad_, const Tensor & indices, int64_t num_weights, int64_t padding_idx, bool scale_grad_by_freq) { auto grad_arg = TensorArg(grad_, "grad", 1); auto indices_arg = TensorArg(indices, "indices", 1); checkScalarType("embedding_backward", indices_arg, kLong); checkSameGPU("embedding_backward", grad_arg, indices_arg); auto num_indices = indices.numel(); auto grad = grad_.contiguous().view({num_indices, grad_.size(-1)}); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); if (num_indices <= 768 && !scale_grad_by_freq) { auto indices_contig = indices.contiguous(); auto grad_weight = at::zeros({num_weights, grad_.size(-1)}, grad_.options()); int64_t stride = grad_weight.stride(0); dim3 grid(THCCeilDiv(stride, (int64_t)WARP_SIZE)); dim3 block(WARP_SIZE, BLOCKDIMY); AT_DISPATCH_FLOATING_TYPES_AND_HALF (grad.scalar_type(), "embedding_backward", [&] { using accscalar_t = acc_type<scalar_t, true>; embedding_backward_feature_kernel<scalar_t, accscalar_t> <<<grid, block, sizeof(accscalar_t)*WARP_SIZE*BLOCKDIMY + sizeof(int)*WARP_SIZE*BLOCKDIMY, stream>>> (indices_contig.data<int64_t>(), grad.data<scalar_t>(), grad_weight.data<scalar_t>(), static_cast<int>(num_indices), static_cast<int64_t>(stride), static_cast<int>(padding_idx)); }); THCudaCheck(cudaGetLastError()); return grad_weight; } auto sorted_indices = at::empty_like(indices); auto orig_indices = at::empty_like(indices); using device_ptr = thrust::device_ptr<int64_t>; // Sort the inputs into sorted with the corresponding indices; we // don't need a stable or multidimensional sort, so just use Thrust // directly { sorted_indices.copy_(indices); auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA()); auto policy = thrust::cuda::par(allocator).on(stream); // Fill sortedOrigIndices with sequential indices auto count_iter = thrust::counting_iterator<int64_t>(0); auto orig_data = device_ptr(orig_indices.data<int64_t>()); thrust::copy(policy, count_iter, count_iter + num_indices, orig_data); // Sort; a stable sort is not required auto sorted_data = device_ptr(sorted_indices.data<int64_t>()); thrust::sort_by_key(policy, sorted_data, sorted_data + num_indices, orig_data, ThrustLTOp<int64_t>()); } Tensor count; if (scale_grad_by_freq) { count = at::empty_like(indices); auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA()); auto policy = thrust::cuda::par(allocator).on(stream); // Compute an increasing sequence per unique item in sortedIndices: // sorted: 2 5 5 5 7 7 8 9 9 // count: 1 1 2 3 1 2 1 1 2 auto sorted_data = device_ptr(sorted_indices.data<int64_t>()); auto count_data = device_ptr(count.data<int64_t>()); thrust::inclusive_scan_by_key( policy, sorted_data, sorted_data + num_indices, thrust::make_constant_iterator(1), count_data ); // Take the maximum of each count per unique key in reverse: // sorted: 2 5 5 5 7 7 8 9 9 // count: 1 3 3 3 2 2 1 2 2 thrust::inclusive_scan_by_key( policy, thrust::make_reverse_iterator(sorted_data + num_indices), thrust::make_reverse_iterator(sorted_data), thrust::make_reverse_iterator(count_data + num_indices), thrust::make_reverse_iterator(count_data + num_indices), thrust::equal_to<int64_t>(), thrust::maximum<int64_t>() ); } return embedding_backward_cuda_kernel(grad, orig_indices, sorted_indices, count, num_weights, padding_idx); } Tensor & embedding_renorm_cuda_(Tensor & self, const Tensor & indices, double max_norm, double norm_type) { auto self_arg = TensorArg(self, "self", 1); auto indices_arg = TensorArg(indices, "indices", 1); checkDim("embedding_renorm_", self_arg, 2); checkSameGPU("embedding_renorm", self_arg, indices_arg); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA()); auto policy = thrust::cuda::par(allocator).on(stream); using device_ptr = thrust::device_ptr<int64_t>; auto num_indices = indices.numel(); auto indices_contig = indices.contiguous(); auto indices_data = device_ptr(indices_contig.data<int64_t>()); // FIXME: thrust::unique only removes consecutive elements that are equal. // We have race conditions when indices contain duplicates which are not // adjacent auto unique_indices = at::empty(indices.numel(), indices.options()); auto unique_data = device_ptr(unique_indices.data<int64_t>()); auto end = thrust::unique_copy(policy, indices_data, indices_data + num_indices, unique_data); auto num_unique_indices = static_cast<int>(end - unique_data); dim3 grid(num_unique_indices); dim3 block(128); int dim = self.stride(0); AT_DISPATCH_FLOATING_TYPES_AND_HALF(self.scalar_type(), "embedding_backward", [&] { using accscalar_t = acc_type<scalar_t, true>; renorm_kernel<<<grid, block, 128 * sizeof(accscalar_t), stream>>>( self.data<scalar_t>(), unique_indices.data<int64_t>(), static_cast<accscalar_t>(max_norm), static_cast<accscalar_t>(norm_type), dim, self.stride(0), self.stride(1)); }); THCudaCheck(cudaGetLastError()); return self; } }} // namespace at::native
69187574936bb4457ac5a1f84990ca802a2f9644.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "hip/hip_runtime_api.h" #include <c10/core/impl/DeviceGuardImplInterface.h> #include <c10/macros/Macros.h> #include <c10/util/Exception.h> #include <c10/hip/HIPException.h> #include <c10/hip/HIPStream.h> #include <c10/hip/HIPFunctions.h> //#include "hip/hcc_detail/hip_atomic.h" /*! ******************* BEGIN Caffe Copyright Notice and Disclaimer **************** * * COPYRIGHT * * All contributions by the University of California: * Copyright (c) 2014-2017 The Regents of the University of California (Regents) * All rights reserved. * * All other contributions: * Copyright (c) 2014-2017, the respective contributors * All rights reserved. * * Caffe uses a shared copyright model: each contributor holds copyright over * their contributions to Caffe. The project versioning records all such * contribution and copyright details. If a contributor wants to further mark * their specific copyright on a particular contribution, they should indicate * their copyright solely in the commit message of the change when it is * committed. * * LICENSE * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * CONTRIBUTION AGREEMENT * * By contributing to the BVLC/caffe repository through pull-request, comment, * or otherwise, the contributor releases their content to the * license and copyright terms herein. * ***************** END Caffe Copyright Notice and Disclaimer ******************** * * Copyright (c) 2018 Microsoft * Licensed under The MIT License [see LICENSE for details] * \file modulated_deformable_im2col.cuh * \brief Function definitions of converting an image to * column matrix based on kernel, padding, dilation, and offset. * These functions are mainly used in deformable convolution operators. * \ref: https://arxiv.org/abs/1703.06211 * \author Yuwen Xiong, Haozhi Qi, Jifeng Dai, Xizhou Zhu, Han Hu, Dazhi Cheng */ // modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/blob/mmdetection/mmdet/ops/dcn/src/deform_conv_cuda_kernel.cu #include <ATen/ATen.h> //#include <ATen/hip/HIPContext.h> //#include <THH/THHAtomics.cuh> #include <stdio.h> #include <math.h> #include <float.h> using namespace at; #define CUDA_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \ i += blockDim.x * gridDim.x) const int CUDA_NUM_THREADS = 1024; const int kMaxGridNum = 65535; inline int GET_BLOCKS(const int N) { return ::min(kMaxGridNum, (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS); } template <typename scalar_t> __device__ scalar_t deformable_im2col_bilinear(const scalar_t *bottom_data, const int data_width, const int height, const int width, scalar_t h, scalar_t w) { int h_low = floor(h); int w_low = floor(w); int h_high = h_low + 1; int w_high = w_low + 1; scalar_t lh = h - h_low; scalar_t lw = w - w_low; scalar_t hh = 1 - lh, hw = 1 - lw; scalar_t v1 = 0; if (h_low >= 0 && w_low >= 0) v1 = bottom_data[h_low * data_width + w_low]; scalar_t v2 = 0; if (h_low >= 0 && w_high <= width - 1) v2 = bottom_data[h_low * data_width + w_high]; scalar_t v3 = 0; if (h_high <= height - 1 && w_low >= 0) v3 = bottom_data[h_high * data_width + w_low]; scalar_t v4 = 0; if (h_high <= height - 1 && w_high <= width - 1) v4 = bottom_data[h_high * data_width + w_high]; scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw; scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); return val; } template <typename scalar_t> __global__ void deformable_im2col_gpu_kernel(const int n, const scalar_t *data_im, const scalar_t *data_offset, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int channel_per_deformable_group, const int batch_size, const int num_channels, const int deformable_group, const int height_col, const int width_col, scalar_t *data_col) { CUDA_KERNEL_LOOP(index, n) { // index index of output matrix const int w_col = index % width_col; const int h_col = (index / width_col) % height_col; const int b_col = (index / width_col / height_col) % batch_size; const int c_im = (index / width_col / height_col) / batch_size; const int c_col = c_im * kernel_h * kernel_w; // compute deformable group index const int deformable_group_index = c_im / channel_per_deformable_group; const int h_in = h_col * stride_h - pad_h; const int w_in = w_col * stride_w - pad_w; scalar_t *data_col_ptr = data_col + ((c_col * batch_size + b_col) * height_col + h_col) * width_col + w_col; //const scalar_t* data_im_ptr = data_im + ((b_col * num_channels + c_im) * height + h_in) * width + w_in; const scalar_t *data_im_ptr = data_im + (b_col * num_channels + c_im) * height * width; const scalar_t *data_offset_ptr = data_offset + (b_col * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; for (int i = 0; i < kernel_h; ++i) { for (int j = 0; j < kernel_w; ++j) { const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col; const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col + w_col; const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; scalar_t val = static_cast<scalar_t>(0); const scalar_t h_im = h_in + i * dilation_h + offset_h; const scalar_t w_im = w_in + j * dilation_w + offset_w; if (h_im > -1 && w_im > -1 && h_im < height && w_im < width) { //const scalar_t map_h = i * dilation_h + offset_h; //const scalar_t map_w = j * dilation_w + offset_w; //const int cur_height = height - h_in; //const int cur_width = width - w_in; //val = deformable_im2col_bilinear(data_im_ptr, width, cur_height, cur_width, map_h, map_w); val = deformable_im2col_bilinear(data_im_ptr, width, height, width, h_im, w_im); } *data_col_ptr = val; data_col_ptr += batch_size * height_col * width_col; } } } } template <typename scalar_t> __device__ scalar_t get_gradient_weight(scalar_t argmax_h, scalar_t argmax_w, const int h, const int w, const int height, const int width) { if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width) { //empty return 0; } int argmax_h_low = floor(argmax_h); int argmax_w_low = floor(argmax_w); int argmax_h_high = argmax_h_low + 1; int argmax_w_high = argmax_w_low + 1; scalar_t weight = 0; if (h == argmax_h_low && w == argmax_w_low) weight = (h + 1 - argmax_h) * (w + 1 - argmax_w); if (h == argmax_h_low && w == argmax_w_high) weight = (h + 1 - argmax_h) * (argmax_w + 1 - w); if (h == argmax_h_high && w == argmax_w_low) weight = (argmax_h + 1 - h) * (w + 1 - argmax_w); if (h == argmax_h_high && w == argmax_w_high) weight = (argmax_h + 1 - h) * (argmax_w + 1 - w); return weight; } template <typename scalar_t> __device__ scalar_t get_coordinate_weight(scalar_t argmax_h, scalar_t argmax_w, const int height, const int width, const scalar_t *im_data, const int data_width, const int bp_dir) { if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width) { //empty return 0; } int argmax_h_low = floor(argmax_h); int argmax_w_low = floor(argmax_w); int argmax_h_high = argmax_h_low + 1; int argmax_w_high = argmax_w_low + 1; scalar_t weight = 0; if (bp_dir == 0) { if (argmax_h_low >= 0 && argmax_w_low >= 0) weight += -1 * (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_low * data_width + argmax_w_low]; if (argmax_h_low >= 0 && argmax_w_high <= width - 1) weight += -1 * (argmax_w - argmax_w_low) * im_data[argmax_h_low * data_width + argmax_w_high]; if (argmax_h_high <= height - 1 && argmax_w_low >= 0) weight += (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_high * data_width + argmax_w_low]; if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1) weight += (argmax_w - argmax_w_low) * im_data[argmax_h_high * data_width + argmax_w_high]; } else if (bp_dir == 1) { if (argmax_h_low >= 0 && argmax_w_low >= 0) weight += -1 * (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_low]; if (argmax_h_low >= 0 && argmax_w_high <= width - 1) weight += (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_high]; if (argmax_h_high <= height - 1 && argmax_w_low >= 0) weight += -1 * (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_low]; if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1) weight += (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_high]; } return weight; } void deformable_im2col( const at::Tensor data_im, const at::Tensor data_offset, const int channels, const int height, const int width, const int ksize_h, const int ksize_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int parallel_imgs, const int deformable_group, at::Tensor data_col) { // num_axes should be smaller than block size // todo: check parallel_imgs is correctly passed in int height_col = (height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1; int width_col = (width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1; int num_kernels = channels * height_col * width_col * parallel_imgs; int channel_per_deformable_group = channels / deformable_group; AT_DISPATCH_FLOATING_TYPES( data_im.scalar_type(), "deformable_im2col_gpu", ([&] { const scalar_t *data_im_ = data_im.data<scalar_t>(); const scalar_t *data_offset_ = data_offset.data<scalar_t>(); scalar_t *data_col_ = data_col.data<scalar_t>(); hipLaunchKernelGGL(deformable_im2col_gpu_kernel, dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, at::hip::getCurrentHIPStream(), num_kernels, data_im_, data_offset_, height, width, ksize_h, ksize_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, parallel_imgs, channels, deformable_group, height_col, width_col, data_col_); })); hipError_t err = hipGetLastError(); if (err != hipSuccess) { printf("error in deformable_im2col: %s\n", hipGetErrorString(err)); } } template <typename scalar_t> __global__ void deformable_col2im_gpu_kernel( const int n, const scalar_t *data_col, const scalar_t *data_offset, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int channel_per_deformable_group, const int batch_size, const int deformable_group, const int height_col, const int width_col, scalar_t *grad_im) { CUDA_KERNEL_LOOP(index, n) { const int j = (index / width_col / height_col / batch_size) % kernel_w; const int i = (index / width_col / height_col / batch_size / kernel_w) % kernel_h; const int c = index / width_col / height_col / batch_size / kernel_w / kernel_h; // compute the start and end of the output const int deformable_group_index = c / channel_per_deformable_group; int w_out = index % width_col; int h_out = (index / width_col) % height_col; int b = (index / width_col / height_col) % batch_size; int w_in = w_out * stride_w - pad_w; int h_in = h_out * stride_h - pad_h; const scalar_t *data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out; const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out; const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; const scalar_t cur_inv_h_data = h_in + i * dilation_h + offset_h; const scalar_t cur_inv_w_data = w_in + j * dilation_w + offset_w; const scalar_t cur_top_grad = data_col[index]; const int cur_h = (int)cur_inv_h_data; const int cur_w = (int)cur_inv_w_data; for (int dy = -2; dy <= 2; dy++) { for (int dx = -2; dx <= 2; dx++) { if (cur_h + dy >= 0 && cur_h + dy < height && cur_w + dx >= 0 && cur_w + dx < width && abs(cur_inv_h_data - (cur_h + dy)) < 1 && abs(cur_inv_w_data - (cur_w + dx)) < 1) { int cur_bottom_grad_pos = ((b * channels + c) * height + cur_h + dy) * width + cur_w + dx; scalar_t weight = get_gradient_weight(cur_inv_h_data, cur_inv_w_data, cur_h + dy, cur_w + dx, height, width); atomicAdd(grad_im + cur_bottom_grad_pos, weight * cur_top_grad); } } } } } void deformable_col2im( const at::Tensor data_col, const at::Tensor data_offset, const int channels, const int height, const int width, const int ksize_h, const int ksize_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int parallel_imgs, const int deformable_group, at::Tensor grad_im) { // todo: make sure parallel_imgs is passed in correctly int height_col = (height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1; int width_col = (width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1; int num_kernels = channels * ksize_h * ksize_w * height_col * width_col * parallel_imgs; int channel_per_deformable_group = channels / deformable_group; AT_DISPATCH_FLOATING_TYPES( data_col.scalar_type(), "deformable_col2im_gpu", ([&] { const scalar_t *data_col_ = data_col.data<scalar_t>(); const scalar_t *data_offset_ = data_offset.data<scalar_t>(); scalar_t *grad_im_ = grad_im.data<scalar_t>(); hipLaunchKernelGGL(deformable_col2im_gpu_kernel, dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, at::hip::getCurrentHIPStream(), num_kernels, data_col_, data_offset_, channels, height, width, ksize_h, ksize_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, parallel_imgs, deformable_group, height_col, width_col, grad_im_); })); hipError_t err = hipGetLastError(); if (err != hipSuccess) { printf("error in deformable_col2im: %s\n", hipGetErrorString(err)); } } template <typename scalar_t> __global__ void deformable_col2im_coord_gpu_kernel(const int n, const scalar_t *data_col, const scalar_t *data_im, const scalar_t *data_offset, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int channel_per_deformable_group, const int batch_size, const int offset_channels, const int deformable_group, const int height_col, const int width_col, scalar_t *grad_offset) { CUDA_KERNEL_LOOP(index, n) { scalar_t val = 0; int w = index % width_col; int h = (index / width_col) % height_col; int c = (index / width_col / height_col) % offset_channels; int b = (index / width_col / height_col) / offset_channels; // compute the start and end of the output const int deformable_group_index = c / (2 * kernel_h * kernel_w); const int col_step = kernel_h * kernel_w; int cnt = 0; const scalar_t *data_col_ptr = data_col + deformable_group_index * channel_per_deformable_group * batch_size * width_col * height_col; const scalar_t *data_im_ptr = data_im + (b * deformable_group + deformable_group_index) * channel_per_deformable_group / kernel_h / kernel_w * height * width; const scalar_t *data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; const int offset_c = c - deformable_group_index * 2 * kernel_h * kernel_w; for (int col_c = (offset_c / 2); col_c < channel_per_deformable_group; col_c += col_step) { const int col_pos = (((col_c * batch_size + b) * height_col) + h) * width_col + w; const int bp_dir = offset_c % 2; int j = (col_pos / width_col / height_col / batch_size) % kernel_w; int i = (col_pos / width_col / height_col / batch_size / kernel_w) % kernel_h; int w_out = col_pos % width_col; int h_out = (col_pos / width_col) % height_col; int w_in = w_out * stride_w - pad_w; int h_in = h_out * stride_h - pad_h; const int data_offset_h_ptr = (((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out); const int data_offset_w_ptr = (((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out); const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; scalar_t inv_h = h_in + i * dilation_h + offset_h; scalar_t inv_w = w_in + j * dilation_w + offset_w; if (inv_h <= -1 || inv_w <= -1 || inv_h >= height || inv_w >= width) { inv_h = inv_w = -2; } const scalar_t weight = get_coordinate_weight( inv_h, inv_w, height, width, data_im_ptr + cnt * height * width, width, bp_dir); val += weight * data_col_ptr[col_pos]; cnt += 1; } grad_offset[index] = val; } } void deformable_col2im_coord( const at::Tensor data_col, const at::Tensor data_im, const at::Tensor data_offset, const int channels, const int height, const int width, const int ksize_h, const int ksize_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int parallel_imgs, const int deformable_group, at::Tensor grad_offset) { int height_col = (height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1; int width_col = (width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1; int num_kernels = height_col * width_col * 2 * ksize_h * ksize_w * deformable_group * parallel_imgs; int channel_per_deformable_group = channels * ksize_h * ksize_w / deformable_group; AT_DISPATCH_FLOATING_TYPES( data_col.scalar_type(), "deformable_col2im_coord_gpu", ([&] { const scalar_t *data_col_ = data_col.data<scalar_t>(); const scalar_t *data_im_ = data_im.data<scalar_t>(); const scalar_t *data_offset_ = data_offset.data<scalar_t>(); scalar_t *grad_offset_ = grad_offset.data<scalar_t>(); hipLaunchKernelGGL(deformable_col2im_coord_gpu_kernel, dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, at::hip::getCurrentHIPStream(), num_kernels, data_col_, data_im_, data_offset_, channels, height, width, ksize_h, ksize_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, parallel_imgs, 2 * ksize_h * ksize_w * deformable_group, deformable_group, height_col, width_col, grad_offset_); })); } template <typename scalar_t> __device__ scalar_t dmcn_im2col_bilinear(const scalar_t *bottom_data, const int data_width, const int height, const int width, scalar_t h, scalar_t w) { int h_low = floor(h); int w_low = floor(w); int h_high = h_low + 1; int w_high = w_low + 1; scalar_t lh = h - h_low; scalar_t lw = w - w_low; scalar_t hh = 1 - lh, hw = 1 - lw; scalar_t v1 = 0; if (h_low >= 0 && w_low >= 0) v1 = bottom_data[h_low * data_width + w_low]; scalar_t v2 = 0; if (h_low >= 0 && w_high <= width - 1) v2 = bottom_data[h_low * data_width + w_high]; scalar_t v3 = 0; if (h_high <= height - 1 && w_low >= 0) v3 = bottom_data[h_high * data_width + w_low]; scalar_t v4 = 0; if (h_high <= height - 1 && w_high <= width - 1) v4 = bottom_data[h_high * data_width + w_high]; scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw; scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); return val; } template <typename scalar_t> __device__ scalar_t dmcn_get_gradient_weight(scalar_t argmax_h, scalar_t argmax_w, const int h, const int w, const int height, const int width) { if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width) { //empty return 0; } int argmax_h_low = floor(argmax_h); int argmax_w_low = floor(argmax_w); int argmax_h_high = argmax_h_low + 1; int argmax_w_high = argmax_w_low + 1; scalar_t weight = 0; if (h == argmax_h_low && w == argmax_w_low) weight = (h + 1 - argmax_h) * (w + 1 - argmax_w); if (h == argmax_h_low && w == argmax_w_high) weight = (h + 1 - argmax_h) * (argmax_w + 1 - w); if (h == argmax_h_high && w == argmax_w_low) weight = (argmax_h + 1 - h) * (w + 1 - argmax_w); if (h == argmax_h_high && w == argmax_w_high) weight = (argmax_h + 1 - h) * (argmax_w + 1 - w); return weight; } template <typename scalar_t> __device__ scalar_t dmcn_get_coordinate_weight(scalar_t argmax_h, scalar_t argmax_w, const int height, const int width, const scalar_t *im_data, const int data_width, const int bp_dir) { if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width) { //empty return 0; } int argmax_h_low = floor(argmax_h); int argmax_w_low = floor(argmax_w); int argmax_h_high = argmax_h_low + 1; int argmax_w_high = argmax_w_low + 1; scalar_t weight = 0; if (bp_dir == 0) { if (argmax_h_low >= 0 && argmax_w_low >= 0) weight += -1 * (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_low * data_width + argmax_w_low]; if (argmax_h_low >= 0 && argmax_w_high <= width - 1) weight += -1 * (argmax_w - argmax_w_low) * im_data[argmax_h_low * data_width + argmax_w_high]; if (argmax_h_high <= height - 1 && argmax_w_low >= 0) weight += (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_high * data_width + argmax_w_low]; if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1) weight += (argmax_w - argmax_w_low) * im_data[argmax_h_high * data_width + argmax_w_high]; } else if (bp_dir == 1) { if (argmax_h_low >= 0 && argmax_w_low >= 0) weight += -1 * (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_low]; if (argmax_h_low >= 0 && argmax_w_high <= width - 1) weight += (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_high]; if (argmax_h_high <= height - 1 && argmax_w_low >= 0) weight += -1 * (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_low]; if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1) weight += (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_high]; } return weight; } template <typename scalar_t> __global__ void modulated_deformable_im2col_gpu_kernel(const int n, const scalar_t *data_im, const scalar_t *data_offset, const scalar_t *data_mask, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int channel_per_deformable_group, const int batch_size, const int num_channels, const int deformable_group, const int height_col, const int width_col, scalar_t *data_col) { CUDA_KERNEL_LOOP(index, n) { // index index of output matrix const int w_col = index % width_col; const int h_col = (index / width_col) % height_col; const int b_col = (index / width_col / height_col) % batch_size; const int c_im = (index / width_col / height_col) / batch_size; const int c_col = c_im * kernel_h * kernel_w; // compute deformable group index const int deformable_group_index = c_im / channel_per_deformable_group; const int h_in = h_col * stride_h - pad_h; const int w_in = w_col * stride_w - pad_w; scalar_t *data_col_ptr = data_col + ((c_col * batch_size + b_col) * height_col + h_col) * width_col + w_col; //const float* data_im_ptr = data_im + ((b_col * num_channels + c_im) * height + h_in) * width + w_in; const scalar_t *data_im_ptr = data_im + (b_col * num_channels + c_im) * height * width; const scalar_t *data_offset_ptr = data_offset + (b_col * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; const scalar_t *data_mask_ptr = data_mask + (b_col * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col; for (int i = 0; i < kernel_h; ++i) { for (int j = 0; j < kernel_w; ++j) { const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col; const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col + w_col; const int data_mask_hw_ptr = ((i * kernel_w + j) * height_col + h_col) * width_col + w_col; const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; const scalar_t mask = data_mask_ptr[data_mask_hw_ptr]; scalar_t val = static_cast<scalar_t>(0); const scalar_t h_im = h_in + i * dilation_h + offset_h; const scalar_t w_im = w_in + j * dilation_w + offset_w; //if (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) { if (h_im > -1 && w_im > -1 && h_im < height && w_im < width) { //const float map_h = i * dilation_h + offset_h; //const float map_w = j * dilation_w + offset_w; //const int cur_height = height - h_in; //const int cur_width = width - w_in; //val = dmcn_im2col_bilinear(data_im_ptr, width, cur_height, cur_width, map_h, map_w); val = dmcn_im2col_bilinear(data_im_ptr, width, height, width, h_im, w_im); } *data_col_ptr = val * mask; data_col_ptr += batch_size * height_col * width_col; //data_col_ptr += height_col * width_col; } } } } template <typename scalar_t> __global__ void modulated_deformable_col2im_gpu_kernel(const int n, const scalar_t *data_col, const scalar_t *data_offset, const scalar_t *data_mask, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int channel_per_deformable_group, const int batch_size, const int deformable_group, const int height_col, const int width_col, scalar_t *grad_im) { CUDA_KERNEL_LOOP(index, n) { const int j = (index / width_col / height_col / batch_size) % kernel_w; const int i = (index / width_col / height_col / batch_size / kernel_w) % kernel_h; const int c = index / width_col / height_col / batch_size / kernel_w / kernel_h; // compute the start and end of the output const int deformable_group_index = c / channel_per_deformable_group; int w_out = index % width_col; int h_out = (index / width_col) % height_col; int b = (index / width_col / height_col) % batch_size; int w_in = w_out * stride_w - pad_w; int h_in = h_out * stride_h - pad_h; const scalar_t *data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; const scalar_t *data_mask_ptr = data_mask + (b * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col; const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out; const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out; const int data_mask_hw_ptr = ((i * kernel_w + j) * height_col + h_out) * width_col + w_out; const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; const scalar_t mask = data_mask_ptr[data_mask_hw_ptr]; const scalar_t cur_inv_h_data = h_in + i * dilation_h + offset_h; const scalar_t cur_inv_w_data = w_in + j * dilation_w + offset_w; const scalar_t cur_top_grad = data_col[index] * mask; const int cur_h = (int)cur_inv_h_data; const int cur_w = (int)cur_inv_w_data; for (int dy = -2; dy <= 2; dy++) { for (int dx = -2; dx <= 2; dx++) { if (cur_h + dy >= 0 && cur_h + dy < height && cur_w + dx >= 0 && cur_w + dx < width && abs(cur_inv_h_data - (cur_h + dy)) < 1 && abs(cur_inv_w_data - (cur_w + dx)) < 1) { int cur_bottom_grad_pos = ((b * channels + c) * height + cur_h + dy) * width + cur_w + dx; scalar_t weight = dmcn_get_gradient_weight(cur_inv_h_data, cur_inv_w_data, cur_h + dy, cur_w + dx, height, width); atomicAdd(grad_im + cur_bottom_grad_pos, weight * cur_top_grad); } } } } } template <typename scalar_t> __global__ void modulated_deformable_col2im_coord_gpu_kernel(const int n, const scalar_t *data_col, const scalar_t *data_im, const scalar_t *data_offset, const scalar_t *data_mask, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int channel_per_deformable_group, const int batch_size, const int offset_channels, const int deformable_group, const int height_col, const int width_col, scalar_t *grad_offset, scalar_t *grad_mask) { CUDA_KERNEL_LOOP(index, n) { scalar_t val = 0, mval = 0; int w = index % width_col; int h = (index / width_col) % height_col; int c = (index / width_col / height_col) % offset_channels; int b = (index / width_col / height_col) / offset_channels; // compute the start and end of the output const int deformable_group_index = c / (2 * kernel_h * kernel_w); const int col_step = kernel_h * kernel_w; int cnt = 0; const scalar_t *data_col_ptr = data_col + deformable_group_index * channel_per_deformable_group * batch_size * width_col * height_col; const scalar_t *data_im_ptr = data_im + (b * deformable_group + deformable_group_index) * channel_per_deformable_group / kernel_h / kernel_w * height * width; const scalar_t *data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; const scalar_t *data_mask_ptr = data_mask + (b * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col; const int offset_c = c - deformable_group_index * 2 * kernel_h * kernel_w; for (int col_c = (offset_c / 2); col_c < channel_per_deformable_group; col_c += col_step) { const int col_pos = (((col_c * batch_size + b) * height_col) + h) * width_col + w; const int bp_dir = offset_c % 2; int j = (col_pos / width_col / height_col / batch_size) % kernel_w; int i = (col_pos / width_col / height_col / batch_size / kernel_w) % kernel_h; int w_out = col_pos % width_col; int h_out = (col_pos / width_col) % height_col; int w_in = w_out * stride_w - pad_w; int h_in = h_out * stride_h - pad_h; const int data_offset_h_ptr = (((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out); const int data_offset_w_ptr = (((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out); const int data_mask_hw_ptr = (((i * kernel_w + j) * height_col + h_out) * width_col + w_out); const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; const scalar_t mask = data_mask_ptr[data_mask_hw_ptr]; scalar_t inv_h = h_in + i * dilation_h + offset_h; scalar_t inv_w = w_in + j * dilation_w + offset_w; if (inv_h <= -1 || inv_w <= -1 || inv_h >= height || inv_w >= width) { inv_h = inv_w = -2; } else { mval += data_col_ptr[col_pos] * dmcn_im2col_bilinear(data_im_ptr + cnt * height * width, width, height, width, inv_h, inv_w); } const scalar_t weight = dmcn_get_coordinate_weight( inv_h, inv_w, height, width, data_im_ptr + cnt * height * width, width, bp_dir); val += weight * data_col_ptr[col_pos] * mask; cnt += 1; } // KERNEL_ASSIGN(grad_offset[index], offset_req, val); grad_offset[index] = val; if (offset_c % 2 == 0) // KERNEL_ASSIGN(grad_mask[(((b * deformable_group + deformable_group_index) * kernel_h * kernel_w + offset_c / 2) * height_col + h) * width_col + w], mask_req, mval); grad_mask[(((b * deformable_group + deformable_group_index) * kernel_h * kernel_w + offset_c / 2) * height_col + h) * width_col + w] = mval; } } void modulated_deformable_im2col_cuda( const at::Tensor data_im, const at::Tensor data_offset, const at::Tensor data_mask, const int batch_size, const int channels, const int height_im, const int width_im, const int height_col, const int width_col, const int kernel_h, const int kenerl_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int deformable_group, at::Tensor data_col) { // num_axes should be smaller than block size const int channel_per_deformable_group = channels / deformable_group; const int num_kernels = channels * batch_size * height_col * width_col; AT_DISPATCH_FLOATING_TYPES( data_im.scalar_type(), "modulated_deformable_im2col_gpu", ([&] { const scalar_t *data_im_ = data_im.data<scalar_t>(); const scalar_t *data_offset_ = data_offset.data<scalar_t>(); const scalar_t *data_mask_ = data_mask.data<scalar_t>(); scalar_t *data_col_ = data_col.data<scalar_t>(); hipLaunchKernelGGL(modulated_deformable_im2col_gpu_kernel, dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, at::hip::getCurrentHIPStream(), num_kernels, data_im_, data_offset_, data_mask_, height_im, width_im, kernel_h, kenerl_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, batch_size, channels, deformable_group, height_col, width_col, data_col_); })); hipError_t err = hipGetLastError(); if (err != hipSuccess) { printf("error in modulated_deformable_im2col_cuda: %s\n", hipGetErrorString(err)); } } void modulated_deformable_col2im_cuda( const at::Tensor data_col, const at::Tensor data_offset, const at::Tensor data_mask, const int batch_size, const int channels, const int height_im, const int width_im, const int height_col, const int width_col, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int deformable_group, at::Tensor grad_im) { const int channel_per_deformable_group = channels / deformable_group; const int num_kernels = channels * kernel_h * kernel_w * batch_size * height_col * width_col; AT_DISPATCH_FLOATING_TYPES( data_col.scalar_type(), "modulated_deformable_col2im_gpu", ([&] { const scalar_t *data_col_ = data_col.data<scalar_t>(); const scalar_t *data_offset_ = data_offset.data<scalar_t>(); const scalar_t *data_mask_ = data_mask.data<scalar_t>(); scalar_t *grad_im_ = grad_im.data<scalar_t>(); hipLaunchKernelGGL(modulated_deformable_col2im_gpu_kernel, dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, at::hip::getCurrentHIPStream(), num_kernels, data_col_, data_offset_, data_mask_, channels, height_im, width_im, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, batch_size, deformable_group, height_col, width_col, grad_im_); })); hipError_t err = hipGetLastError(); if (err != hipSuccess) { printf("error in modulated_deformable_col2im_cuda: %s\n", hipGetErrorString(err)); } } void modulated_deformable_col2im_coord_cuda( const at::Tensor data_col, const at::Tensor data_im, const at::Tensor data_offset, const at::Tensor data_mask, const int batch_size, const int channels, const int height_im, const int width_im, const int height_col, const int width_col, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int deformable_group, at::Tensor grad_offset, at::Tensor grad_mask) { const int num_kernels = batch_size * height_col * width_col * 2 * kernel_h * kernel_w * deformable_group; const int channel_per_deformable_group = channels * kernel_h * kernel_w / deformable_group; AT_DISPATCH_FLOATING_TYPES( data_col.scalar_type(), "modulated_deformable_col2im_coord_gpu", ([&] { const scalar_t *data_col_ = data_col.data<scalar_t>(); const scalar_t *data_im_ = data_im.data<scalar_t>(); const scalar_t *data_offset_ = data_offset.data<scalar_t>(); const scalar_t *data_mask_ = data_mask.data<scalar_t>(); scalar_t *grad_offset_ = grad_offset.data<scalar_t>(); scalar_t *grad_mask_ = grad_mask.data<scalar_t>(); hipLaunchKernelGGL(modulated_deformable_col2im_coord_gpu_kernel, dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, at::hip::getCurrentHIPStream(), num_kernels, data_col_, data_im_, data_offset_, data_mask_, channels, height_im, width_im, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, batch_size, 2 * kernel_h * kernel_w * deformable_group, deformable_group, height_col, width_col, grad_offset_, grad_mask_); })); hipError_t err = hipGetLastError(); if (err != hipSuccess) { printf("error in modulated_deformable_col2im_coord_cuda: %s\n", hipGetErrorString(err)); } }
69187574936bb4457ac5a1f84990ca802a2f9644.cu
#include "hip/hip_runtime.h" #include "hip/hip_runtime_api.h" #include <c10/core/impl/DeviceGuardImplInterface.h> #include <c10/macros/Macros.h> #include <c10/util/Exception.h> #include <c10/hip/HIPException.h> #include <c10/hip/HIPStream.h> #include <c10/hip/HIPFunctions.h> //#include "hip/hcc_detail/hip_atomic.h" /*! ******************* BEGIN Caffe Copyright Notice and Disclaimer **************** * * COPYRIGHT * * All contributions by the University of California: * Copyright (c) 2014-2017 The Regents of the University of California (Regents) * All rights reserved. * * All other contributions: * Copyright (c) 2014-2017, the respective contributors * All rights reserved. * * Caffe uses a shared copyright model: each contributor holds copyright over * their contributions to Caffe. The project versioning records all such * contribution and copyright details. If a contributor wants to further mark * their specific copyright on a particular contribution, they should indicate * their copyright solely in the commit message of the change when it is * committed. * * LICENSE * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * CONTRIBUTION AGREEMENT * * By contributing to the BVLC/caffe repository through pull-request, comment, * or otherwise, the contributor releases their content to the * license and copyright terms herein. * ***************** END Caffe Copyright Notice and Disclaimer ******************** * * Copyright (c) 2018 Microsoft * Licensed under The MIT License [see LICENSE for details] * \file modulated_deformable_im2col.cuh * \brief Function definitions of converting an image to * column matrix based on kernel, padding, dilation, and offset. * These functions are mainly used in deformable convolution operators. * \ref: https://arxiv.org/abs/1703.06211 * \author Yuwen Xiong, Haozhi Qi, Jifeng Dai, Xizhou Zhu, Han Hu, Dazhi Cheng */ // modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/blob/mmdetection/mmdet/ops/dcn/src/deform_conv_cuda_kernel.cu #include <ATen/ATen.h> //#include <ATen/cuda/HIPContext.h> //#include <THC/THCAtomics.cuh> #include <stdio.h> #include <math.h> #include <float.h> using namespace at; #define CUDA_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \ i += blockDim.x * gridDim.x) const int CUDA_NUM_THREADS = 1024; const int kMaxGridNum = 65535; inline int GET_BLOCKS(const int N) { return std::min(kMaxGridNum, (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS); } template <typename scalar_t> __device__ scalar_t deformable_im2col_bilinear(const scalar_t *bottom_data, const int data_width, const int height, const int width, scalar_t h, scalar_t w) { int h_low = floor(h); int w_low = floor(w); int h_high = h_low + 1; int w_high = w_low + 1; scalar_t lh = h - h_low; scalar_t lw = w - w_low; scalar_t hh = 1 - lh, hw = 1 - lw; scalar_t v1 = 0; if (h_low >= 0 && w_low >= 0) v1 = bottom_data[h_low * data_width + w_low]; scalar_t v2 = 0; if (h_low >= 0 && w_high <= width - 1) v2 = bottom_data[h_low * data_width + w_high]; scalar_t v3 = 0; if (h_high <= height - 1 && w_low >= 0) v3 = bottom_data[h_high * data_width + w_low]; scalar_t v4 = 0; if (h_high <= height - 1 && w_high <= width - 1) v4 = bottom_data[h_high * data_width + w_high]; scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw; scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); return val; } template <typename scalar_t> __global__ void deformable_im2col_gpu_kernel(const int n, const scalar_t *data_im, const scalar_t *data_offset, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int channel_per_deformable_group, const int batch_size, const int num_channels, const int deformable_group, const int height_col, const int width_col, scalar_t *data_col) { CUDA_KERNEL_LOOP(index, n) { // index index of output matrix const int w_col = index % width_col; const int h_col = (index / width_col) % height_col; const int b_col = (index / width_col / height_col) % batch_size; const int c_im = (index / width_col / height_col) / batch_size; const int c_col = c_im * kernel_h * kernel_w; // compute deformable group index const int deformable_group_index = c_im / channel_per_deformable_group; const int h_in = h_col * stride_h - pad_h; const int w_in = w_col * stride_w - pad_w; scalar_t *data_col_ptr = data_col + ((c_col * batch_size + b_col) * height_col + h_col) * width_col + w_col; //const scalar_t* data_im_ptr = data_im + ((b_col * num_channels + c_im) * height + h_in) * width + w_in; const scalar_t *data_im_ptr = data_im + (b_col * num_channels + c_im) * height * width; const scalar_t *data_offset_ptr = data_offset + (b_col * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; for (int i = 0; i < kernel_h; ++i) { for (int j = 0; j < kernel_w; ++j) { const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col; const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col + w_col; const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; scalar_t val = static_cast<scalar_t>(0); const scalar_t h_im = h_in + i * dilation_h + offset_h; const scalar_t w_im = w_in + j * dilation_w + offset_w; if (h_im > -1 && w_im > -1 && h_im < height && w_im < width) { //const scalar_t map_h = i * dilation_h + offset_h; //const scalar_t map_w = j * dilation_w + offset_w; //const int cur_height = height - h_in; //const int cur_width = width - w_in; //val = deformable_im2col_bilinear(data_im_ptr, width, cur_height, cur_width, map_h, map_w); val = deformable_im2col_bilinear(data_im_ptr, width, height, width, h_im, w_im); } *data_col_ptr = val; data_col_ptr += batch_size * height_col * width_col; } } } } template <typename scalar_t> __device__ scalar_t get_gradient_weight(scalar_t argmax_h, scalar_t argmax_w, const int h, const int w, const int height, const int width) { if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width) { //empty return 0; } int argmax_h_low = floor(argmax_h); int argmax_w_low = floor(argmax_w); int argmax_h_high = argmax_h_low + 1; int argmax_w_high = argmax_w_low + 1; scalar_t weight = 0; if (h == argmax_h_low && w == argmax_w_low) weight = (h + 1 - argmax_h) * (w + 1 - argmax_w); if (h == argmax_h_low && w == argmax_w_high) weight = (h + 1 - argmax_h) * (argmax_w + 1 - w); if (h == argmax_h_high && w == argmax_w_low) weight = (argmax_h + 1 - h) * (w + 1 - argmax_w); if (h == argmax_h_high && w == argmax_w_high) weight = (argmax_h + 1 - h) * (argmax_w + 1 - w); return weight; } template <typename scalar_t> __device__ scalar_t get_coordinate_weight(scalar_t argmax_h, scalar_t argmax_w, const int height, const int width, const scalar_t *im_data, const int data_width, const int bp_dir) { if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width) { //empty return 0; } int argmax_h_low = floor(argmax_h); int argmax_w_low = floor(argmax_w); int argmax_h_high = argmax_h_low + 1; int argmax_w_high = argmax_w_low + 1; scalar_t weight = 0; if (bp_dir == 0) { if (argmax_h_low >= 0 && argmax_w_low >= 0) weight += -1 * (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_low * data_width + argmax_w_low]; if (argmax_h_low >= 0 && argmax_w_high <= width - 1) weight += -1 * (argmax_w - argmax_w_low) * im_data[argmax_h_low * data_width + argmax_w_high]; if (argmax_h_high <= height - 1 && argmax_w_low >= 0) weight += (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_high * data_width + argmax_w_low]; if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1) weight += (argmax_w - argmax_w_low) * im_data[argmax_h_high * data_width + argmax_w_high]; } else if (bp_dir == 1) { if (argmax_h_low >= 0 && argmax_w_low >= 0) weight += -1 * (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_low]; if (argmax_h_low >= 0 && argmax_w_high <= width - 1) weight += (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_high]; if (argmax_h_high <= height - 1 && argmax_w_low >= 0) weight += -1 * (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_low]; if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1) weight += (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_high]; } return weight; } void deformable_im2col( const at::Tensor data_im, const at::Tensor data_offset, const int channels, const int height, const int width, const int ksize_h, const int ksize_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int parallel_imgs, const int deformable_group, at::Tensor data_col) { // num_axes should be smaller than block size // todo: check parallel_imgs is correctly passed in int height_col = (height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1; int width_col = (width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1; int num_kernels = channels * height_col * width_col * parallel_imgs; int channel_per_deformable_group = channels / deformable_group; AT_DISPATCH_FLOATING_TYPES( data_im.scalar_type(), "deformable_im2col_gpu", ([&] { const scalar_t *data_im_ = data_im.data<scalar_t>(); const scalar_t *data_offset_ = data_offset.data<scalar_t>(); scalar_t *data_col_ = data_col.data<scalar_t>(); hipLaunchKernelGGL(deformable_im2col_gpu_kernel, dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, at::hip::getCurrentHIPStream(), num_kernels, data_im_, data_offset_, height, width, ksize_h, ksize_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, parallel_imgs, channels, deformable_group, height_col, width_col, data_col_); })); hipError_t err = hipGetLastError(); if (err != hipSuccess) { printf("error in deformable_im2col: %s\n", hipGetErrorString(err)); } } template <typename scalar_t> __global__ void deformable_col2im_gpu_kernel( const int n, const scalar_t *data_col, const scalar_t *data_offset, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int channel_per_deformable_group, const int batch_size, const int deformable_group, const int height_col, const int width_col, scalar_t *grad_im) { CUDA_KERNEL_LOOP(index, n) { const int j = (index / width_col / height_col / batch_size) % kernel_w; const int i = (index / width_col / height_col / batch_size / kernel_w) % kernel_h; const int c = index / width_col / height_col / batch_size / kernel_w / kernel_h; // compute the start and end of the output const int deformable_group_index = c / channel_per_deformable_group; int w_out = index % width_col; int h_out = (index / width_col) % height_col; int b = (index / width_col / height_col) % batch_size; int w_in = w_out * stride_w - pad_w; int h_in = h_out * stride_h - pad_h; const scalar_t *data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out; const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out; const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; const scalar_t cur_inv_h_data = h_in + i * dilation_h + offset_h; const scalar_t cur_inv_w_data = w_in + j * dilation_w + offset_w; const scalar_t cur_top_grad = data_col[index]; const int cur_h = (int)cur_inv_h_data; const int cur_w = (int)cur_inv_w_data; for (int dy = -2; dy <= 2; dy++) { for (int dx = -2; dx <= 2; dx++) { if (cur_h + dy >= 0 && cur_h + dy < height && cur_w + dx >= 0 && cur_w + dx < width && abs(cur_inv_h_data - (cur_h + dy)) < 1 && abs(cur_inv_w_data - (cur_w + dx)) < 1) { int cur_bottom_grad_pos = ((b * channels + c) * height + cur_h + dy) * width + cur_w + dx; scalar_t weight = get_gradient_weight(cur_inv_h_data, cur_inv_w_data, cur_h + dy, cur_w + dx, height, width); atomicAdd(grad_im + cur_bottom_grad_pos, weight * cur_top_grad); } } } } } void deformable_col2im( const at::Tensor data_col, const at::Tensor data_offset, const int channels, const int height, const int width, const int ksize_h, const int ksize_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int parallel_imgs, const int deformable_group, at::Tensor grad_im) { // todo: make sure parallel_imgs is passed in correctly int height_col = (height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1; int width_col = (width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1; int num_kernels = channels * ksize_h * ksize_w * height_col * width_col * parallel_imgs; int channel_per_deformable_group = channels / deformable_group; AT_DISPATCH_FLOATING_TYPES( data_col.scalar_type(), "deformable_col2im_gpu", ([&] { const scalar_t *data_col_ = data_col.data<scalar_t>(); const scalar_t *data_offset_ = data_offset.data<scalar_t>(); scalar_t *grad_im_ = grad_im.data<scalar_t>(); hipLaunchKernelGGL(deformable_col2im_gpu_kernel, dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, at::hip::getCurrentHIPStream(), num_kernels, data_col_, data_offset_, channels, height, width, ksize_h, ksize_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, parallel_imgs, deformable_group, height_col, width_col, grad_im_); })); hipError_t err = hipGetLastError(); if (err != hipSuccess) { printf("error in deformable_col2im: %s\n", hipGetErrorString(err)); } } template <typename scalar_t> __global__ void deformable_col2im_coord_gpu_kernel(const int n, const scalar_t *data_col, const scalar_t *data_im, const scalar_t *data_offset, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int channel_per_deformable_group, const int batch_size, const int offset_channels, const int deformable_group, const int height_col, const int width_col, scalar_t *grad_offset) { CUDA_KERNEL_LOOP(index, n) { scalar_t val = 0; int w = index % width_col; int h = (index / width_col) % height_col; int c = (index / width_col / height_col) % offset_channels; int b = (index / width_col / height_col) / offset_channels; // compute the start and end of the output const int deformable_group_index = c / (2 * kernel_h * kernel_w); const int col_step = kernel_h * kernel_w; int cnt = 0; const scalar_t *data_col_ptr = data_col + deformable_group_index * channel_per_deformable_group * batch_size * width_col * height_col; const scalar_t *data_im_ptr = data_im + (b * deformable_group + deformable_group_index) * channel_per_deformable_group / kernel_h / kernel_w * height * width; const scalar_t *data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; const int offset_c = c - deformable_group_index * 2 * kernel_h * kernel_w; for (int col_c = (offset_c / 2); col_c < channel_per_deformable_group; col_c += col_step) { const int col_pos = (((col_c * batch_size + b) * height_col) + h) * width_col + w; const int bp_dir = offset_c % 2; int j = (col_pos / width_col / height_col / batch_size) % kernel_w; int i = (col_pos / width_col / height_col / batch_size / kernel_w) % kernel_h; int w_out = col_pos % width_col; int h_out = (col_pos / width_col) % height_col; int w_in = w_out * stride_w - pad_w; int h_in = h_out * stride_h - pad_h; const int data_offset_h_ptr = (((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out); const int data_offset_w_ptr = (((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out); const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; scalar_t inv_h = h_in + i * dilation_h + offset_h; scalar_t inv_w = w_in + j * dilation_w + offset_w; if (inv_h <= -1 || inv_w <= -1 || inv_h >= height || inv_w >= width) { inv_h = inv_w = -2; } const scalar_t weight = get_coordinate_weight( inv_h, inv_w, height, width, data_im_ptr + cnt * height * width, width, bp_dir); val += weight * data_col_ptr[col_pos]; cnt += 1; } grad_offset[index] = val; } } void deformable_col2im_coord( const at::Tensor data_col, const at::Tensor data_im, const at::Tensor data_offset, const int channels, const int height, const int width, const int ksize_h, const int ksize_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int parallel_imgs, const int deformable_group, at::Tensor grad_offset) { int height_col = (height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1; int width_col = (width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1; int num_kernels = height_col * width_col * 2 * ksize_h * ksize_w * deformable_group * parallel_imgs; int channel_per_deformable_group = channels * ksize_h * ksize_w / deformable_group; AT_DISPATCH_FLOATING_TYPES( data_col.scalar_type(), "deformable_col2im_coord_gpu", ([&] { const scalar_t *data_col_ = data_col.data<scalar_t>(); const scalar_t *data_im_ = data_im.data<scalar_t>(); const scalar_t *data_offset_ = data_offset.data<scalar_t>(); scalar_t *grad_offset_ = grad_offset.data<scalar_t>(); hipLaunchKernelGGL(deformable_col2im_coord_gpu_kernel, dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, at::hip::getCurrentHIPStream(), num_kernels, data_col_, data_im_, data_offset_, channels, height, width, ksize_h, ksize_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, parallel_imgs, 2 * ksize_h * ksize_w * deformable_group, deformable_group, height_col, width_col, grad_offset_); })); } template <typename scalar_t> __device__ scalar_t dmcn_im2col_bilinear(const scalar_t *bottom_data, const int data_width, const int height, const int width, scalar_t h, scalar_t w) { int h_low = floor(h); int w_low = floor(w); int h_high = h_low + 1; int w_high = w_low + 1; scalar_t lh = h - h_low; scalar_t lw = w - w_low; scalar_t hh = 1 - lh, hw = 1 - lw; scalar_t v1 = 0; if (h_low >= 0 && w_low >= 0) v1 = bottom_data[h_low * data_width + w_low]; scalar_t v2 = 0; if (h_low >= 0 && w_high <= width - 1) v2 = bottom_data[h_low * data_width + w_high]; scalar_t v3 = 0; if (h_high <= height - 1 && w_low >= 0) v3 = bottom_data[h_high * data_width + w_low]; scalar_t v4 = 0; if (h_high <= height - 1 && w_high <= width - 1) v4 = bottom_data[h_high * data_width + w_high]; scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw; scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); return val; } template <typename scalar_t> __device__ scalar_t dmcn_get_gradient_weight(scalar_t argmax_h, scalar_t argmax_w, const int h, const int w, const int height, const int width) { if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width) { //empty return 0; } int argmax_h_low = floor(argmax_h); int argmax_w_low = floor(argmax_w); int argmax_h_high = argmax_h_low + 1; int argmax_w_high = argmax_w_low + 1; scalar_t weight = 0; if (h == argmax_h_low && w == argmax_w_low) weight = (h + 1 - argmax_h) * (w + 1 - argmax_w); if (h == argmax_h_low && w == argmax_w_high) weight = (h + 1 - argmax_h) * (argmax_w + 1 - w); if (h == argmax_h_high && w == argmax_w_low) weight = (argmax_h + 1 - h) * (w + 1 - argmax_w); if (h == argmax_h_high && w == argmax_w_high) weight = (argmax_h + 1 - h) * (argmax_w + 1 - w); return weight; } template <typename scalar_t> __device__ scalar_t dmcn_get_coordinate_weight(scalar_t argmax_h, scalar_t argmax_w, const int height, const int width, const scalar_t *im_data, const int data_width, const int bp_dir) { if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width) { //empty return 0; } int argmax_h_low = floor(argmax_h); int argmax_w_low = floor(argmax_w); int argmax_h_high = argmax_h_low + 1; int argmax_w_high = argmax_w_low + 1; scalar_t weight = 0; if (bp_dir == 0) { if (argmax_h_low >= 0 && argmax_w_low >= 0) weight += -1 * (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_low * data_width + argmax_w_low]; if (argmax_h_low >= 0 && argmax_w_high <= width - 1) weight += -1 * (argmax_w - argmax_w_low) * im_data[argmax_h_low * data_width + argmax_w_high]; if (argmax_h_high <= height - 1 && argmax_w_low >= 0) weight += (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_high * data_width + argmax_w_low]; if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1) weight += (argmax_w - argmax_w_low) * im_data[argmax_h_high * data_width + argmax_w_high]; } else if (bp_dir == 1) { if (argmax_h_low >= 0 && argmax_w_low >= 0) weight += -1 * (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_low]; if (argmax_h_low >= 0 && argmax_w_high <= width - 1) weight += (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_high]; if (argmax_h_high <= height - 1 && argmax_w_low >= 0) weight += -1 * (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_low]; if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1) weight += (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_high]; } return weight; } template <typename scalar_t> __global__ void modulated_deformable_im2col_gpu_kernel(const int n, const scalar_t *data_im, const scalar_t *data_offset, const scalar_t *data_mask, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int channel_per_deformable_group, const int batch_size, const int num_channels, const int deformable_group, const int height_col, const int width_col, scalar_t *data_col) { CUDA_KERNEL_LOOP(index, n) { // index index of output matrix const int w_col = index % width_col; const int h_col = (index / width_col) % height_col; const int b_col = (index / width_col / height_col) % batch_size; const int c_im = (index / width_col / height_col) / batch_size; const int c_col = c_im * kernel_h * kernel_w; // compute deformable group index const int deformable_group_index = c_im / channel_per_deformable_group; const int h_in = h_col * stride_h - pad_h; const int w_in = w_col * stride_w - pad_w; scalar_t *data_col_ptr = data_col + ((c_col * batch_size + b_col) * height_col + h_col) * width_col + w_col; //const float* data_im_ptr = data_im + ((b_col * num_channels + c_im) * height + h_in) * width + w_in; const scalar_t *data_im_ptr = data_im + (b_col * num_channels + c_im) * height * width; const scalar_t *data_offset_ptr = data_offset + (b_col * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; const scalar_t *data_mask_ptr = data_mask + (b_col * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col; for (int i = 0; i < kernel_h; ++i) { for (int j = 0; j < kernel_w; ++j) { const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col; const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col + w_col; const int data_mask_hw_ptr = ((i * kernel_w + j) * height_col + h_col) * width_col + w_col; const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; const scalar_t mask = data_mask_ptr[data_mask_hw_ptr]; scalar_t val = static_cast<scalar_t>(0); const scalar_t h_im = h_in + i * dilation_h + offset_h; const scalar_t w_im = w_in + j * dilation_w + offset_w; //if (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) { if (h_im > -1 && w_im > -1 && h_im < height && w_im < width) { //const float map_h = i * dilation_h + offset_h; //const float map_w = j * dilation_w + offset_w; //const int cur_height = height - h_in; //const int cur_width = width - w_in; //val = dmcn_im2col_bilinear(data_im_ptr, width, cur_height, cur_width, map_h, map_w); val = dmcn_im2col_bilinear(data_im_ptr, width, height, width, h_im, w_im); } *data_col_ptr = val * mask; data_col_ptr += batch_size * height_col * width_col; //data_col_ptr += height_col * width_col; } } } } template <typename scalar_t> __global__ void modulated_deformable_col2im_gpu_kernel(const int n, const scalar_t *data_col, const scalar_t *data_offset, const scalar_t *data_mask, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int channel_per_deformable_group, const int batch_size, const int deformable_group, const int height_col, const int width_col, scalar_t *grad_im) { CUDA_KERNEL_LOOP(index, n) { const int j = (index / width_col / height_col / batch_size) % kernel_w; const int i = (index / width_col / height_col / batch_size / kernel_w) % kernel_h; const int c = index / width_col / height_col / batch_size / kernel_w / kernel_h; // compute the start and end of the output const int deformable_group_index = c / channel_per_deformable_group; int w_out = index % width_col; int h_out = (index / width_col) % height_col; int b = (index / width_col / height_col) % batch_size; int w_in = w_out * stride_w - pad_w; int h_in = h_out * stride_h - pad_h; const scalar_t *data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; const scalar_t *data_mask_ptr = data_mask + (b * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col; const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out; const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out; const int data_mask_hw_ptr = ((i * kernel_w + j) * height_col + h_out) * width_col + w_out; const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; const scalar_t mask = data_mask_ptr[data_mask_hw_ptr]; const scalar_t cur_inv_h_data = h_in + i * dilation_h + offset_h; const scalar_t cur_inv_w_data = w_in + j * dilation_w + offset_w; const scalar_t cur_top_grad = data_col[index] * mask; const int cur_h = (int)cur_inv_h_data; const int cur_w = (int)cur_inv_w_data; for (int dy = -2; dy <= 2; dy++) { for (int dx = -2; dx <= 2; dx++) { if (cur_h + dy >= 0 && cur_h + dy < height && cur_w + dx >= 0 && cur_w + dx < width && abs(cur_inv_h_data - (cur_h + dy)) < 1 && abs(cur_inv_w_data - (cur_w + dx)) < 1) { int cur_bottom_grad_pos = ((b * channels + c) * height + cur_h + dy) * width + cur_w + dx; scalar_t weight = dmcn_get_gradient_weight(cur_inv_h_data, cur_inv_w_data, cur_h + dy, cur_w + dx, height, width); atomicAdd(grad_im + cur_bottom_grad_pos, weight * cur_top_grad); } } } } } template <typename scalar_t> __global__ void modulated_deformable_col2im_coord_gpu_kernel(const int n, const scalar_t *data_col, const scalar_t *data_im, const scalar_t *data_offset, const scalar_t *data_mask, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int channel_per_deformable_group, const int batch_size, const int offset_channels, const int deformable_group, const int height_col, const int width_col, scalar_t *grad_offset, scalar_t *grad_mask) { CUDA_KERNEL_LOOP(index, n) { scalar_t val = 0, mval = 0; int w = index % width_col; int h = (index / width_col) % height_col; int c = (index / width_col / height_col) % offset_channels; int b = (index / width_col / height_col) / offset_channels; // compute the start and end of the output const int deformable_group_index = c / (2 * kernel_h * kernel_w); const int col_step = kernel_h * kernel_w; int cnt = 0; const scalar_t *data_col_ptr = data_col + deformable_group_index * channel_per_deformable_group * batch_size * width_col * height_col; const scalar_t *data_im_ptr = data_im + (b * deformable_group + deformable_group_index) * channel_per_deformable_group / kernel_h / kernel_w * height * width; const scalar_t *data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; const scalar_t *data_mask_ptr = data_mask + (b * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col; const int offset_c = c - deformable_group_index * 2 * kernel_h * kernel_w; for (int col_c = (offset_c / 2); col_c < channel_per_deformable_group; col_c += col_step) { const int col_pos = (((col_c * batch_size + b) * height_col) + h) * width_col + w; const int bp_dir = offset_c % 2; int j = (col_pos / width_col / height_col / batch_size) % kernel_w; int i = (col_pos / width_col / height_col / batch_size / kernel_w) % kernel_h; int w_out = col_pos % width_col; int h_out = (col_pos / width_col) % height_col; int w_in = w_out * stride_w - pad_w; int h_in = h_out * stride_h - pad_h; const int data_offset_h_ptr = (((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out); const int data_offset_w_ptr = (((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out); const int data_mask_hw_ptr = (((i * kernel_w + j) * height_col + h_out) * width_col + w_out); const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; const scalar_t mask = data_mask_ptr[data_mask_hw_ptr]; scalar_t inv_h = h_in + i * dilation_h + offset_h; scalar_t inv_w = w_in + j * dilation_w + offset_w; if (inv_h <= -1 || inv_w <= -1 || inv_h >= height || inv_w >= width) { inv_h = inv_w = -2; } else { mval += data_col_ptr[col_pos] * dmcn_im2col_bilinear(data_im_ptr + cnt * height * width, width, height, width, inv_h, inv_w); } const scalar_t weight = dmcn_get_coordinate_weight( inv_h, inv_w, height, width, data_im_ptr + cnt * height * width, width, bp_dir); val += weight * data_col_ptr[col_pos] * mask; cnt += 1; } // KERNEL_ASSIGN(grad_offset[index], offset_req, val); grad_offset[index] = val; if (offset_c % 2 == 0) // KERNEL_ASSIGN(grad_mask[(((b * deformable_group + deformable_group_index) * kernel_h * kernel_w + offset_c / 2) * height_col + h) * width_col + w], mask_req, mval); grad_mask[(((b * deformable_group + deformable_group_index) * kernel_h * kernel_w + offset_c / 2) * height_col + h) * width_col + w] = mval; } } void modulated_deformable_im2col_cuda( const at::Tensor data_im, const at::Tensor data_offset, const at::Tensor data_mask, const int batch_size, const int channels, const int height_im, const int width_im, const int height_col, const int width_col, const int kernel_h, const int kenerl_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int deformable_group, at::Tensor data_col) { // num_axes should be smaller than block size const int channel_per_deformable_group = channels / deformable_group; const int num_kernels = channels * batch_size * height_col * width_col; AT_DISPATCH_FLOATING_TYPES( data_im.scalar_type(), "modulated_deformable_im2col_gpu", ([&] { const scalar_t *data_im_ = data_im.data<scalar_t>(); const scalar_t *data_offset_ = data_offset.data<scalar_t>(); const scalar_t *data_mask_ = data_mask.data<scalar_t>(); scalar_t *data_col_ = data_col.data<scalar_t>(); hipLaunchKernelGGL(modulated_deformable_im2col_gpu_kernel, dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, at::hip::getCurrentHIPStream(), num_kernels, data_im_, data_offset_, data_mask_, height_im, width_im, kernel_h, kenerl_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, batch_size, channels, deformable_group, height_col, width_col, data_col_); })); hipError_t err = hipGetLastError(); if (err != hipSuccess) { printf("error in modulated_deformable_im2col_cuda: %s\n", hipGetErrorString(err)); } } void modulated_deformable_col2im_cuda( const at::Tensor data_col, const at::Tensor data_offset, const at::Tensor data_mask, const int batch_size, const int channels, const int height_im, const int width_im, const int height_col, const int width_col, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int deformable_group, at::Tensor grad_im) { const int channel_per_deformable_group = channels / deformable_group; const int num_kernels = channels * kernel_h * kernel_w * batch_size * height_col * width_col; AT_DISPATCH_FLOATING_TYPES( data_col.scalar_type(), "modulated_deformable_col2im_gpu", ([&] { const scalar_t *data_col_ = data_col.data<scalar_t>(); const scalar_t *data_offset_ = data_offset.data<scalar_t>(); const scalar_t *data_mask_ = data_mask.data<scalar_t>(); scalar_t *grad_im_ = grad_im.data<scalar_t>(); hipLaunchKernelGGL(modulated_deformable_col2im_gpu_kernel, dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, at::hip::getCurrentHIPStream(), num_kernels, data_col_, data_offset_, data_mask_, channels, height_im, width_im, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, batch_size, deformable_group, height_col, width_col, grad_im_); })); hipError_t err = hipGetLastError(); if (err != hipSuccess) { printf("error in modulated_deformable_col2im_cuda: %s\n", hipGetErrorString(err)); } } void modulated_deformable_col2im_coord_cuda( const at::Tensor data_col, const at::Tensor data_im, const at::Tensor data_offset, const at::Tensor data_mask, const int batch_size, const int channels, const int height_im, const int width_im, const int height_col, const int width_col, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int deformable_group, at::Tensor grad_offset, at::Tensor grad_mask) { const int num_kernels = batch_size * height_col * width_col * 2 * kernel_h * kernel_w * deformable_group; const int channel_per_deformable_group = channels * kernel_h * kernel_w / deformable_group; AT_DISPATCH_FLOATING_TYPES( data_col.scalar_type(), "modulated_deformable_col2im_coord_gpu", ([&] { const scalar_t *data_col_ = data_col.data<scalar_t>(); const scalar_t *data_im_ = data_im.data<scalar_t>(); const scalar_t *data_offset_ = data_offset.data<scalar_t>(); const scalar_t *data_mask_ = data_mask.data<scalar_t>(); scalar_t *grad_offset_ = grad_offset.data<scalar_t>(); scalar_t *grad_mask_ = grad_mask.data<scalar_t>(); hipLaunchKernelGGL(modulated_deformable_col2im_coord_gpu_kernel, dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, at::hip::getCurrentHIPStream(), num_kernels, data_col_, data_im_, data_offset_, data_mask_, channels, height_im, width_im, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, batch_size, 2 * kernel_h * kernel_w * deformable_group, deformable_group, height_col, width_col, grad_offset_, grad_mask_); })); hipError_t err = hipGetLastError(); if (err != hipSuccess) { printf("error in modulated_deformable_col2im_coord_cuda: %s\n", hipGetErrorString(err)); } }
e63f02d6d38fb9f0ce4d07727a5224de2427a14e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "common.h" #include "dreyfus-wagner.h" #include <chrono> #include <iostream> #include <stdint.h> #include <vector> static int hostTable[(int)1e8]; static const int INF = 1e9; static const int MAX_THREADS = 1024; static __global__ void dreyfusWagnerFirstStep(const int *distances, int *dynamicTable, int *masks, const int nodes, const int masksStart, const int masksEnd) { int maskIndex = threadIdx.x, nodeIndex = blockIdx.x, tmp; if (maskIndex >= masksEnd - masksStart) return; int mask = masks[masksStart + maskIndex]; for (int subMask = (mask - 1) & mask; subMask; subMask = (subMask - 1) & mask) { tmp = dynamicTable[subMask * nodes + nodeIndex] + dynamicTable[(mask ^ subMask) * nodes + nodeIndex]; dynamicTable[mask * nodes + nodeIndex] = tmp < dynamicTable[mask * nodes + nodeIndex] ? tmp : dynamicTable[mask * nodes + nodeIndex]; } } static __global__ void dreyfusWagnerSecondStep(const int *distances, int *dynamicTable, int *masks, const int nodes, const int masksStart, const int masksEnd) { int maskIndex = threadIdx.x, nodeIndex = blockIdx.x, tmp; if (maskIndex >= masksEnd - masksStart) return; int mask = masks[masksStart + maskIndex]; for (int node2 = 0; node2 < nodes; node2++) { tmp = dynamicTable[mask * nodes + node2] + distances[nodeIndex * nodes + node2]; dynamicTable[mask * nodes + nodeIndex] = tmp < dynamicTable[mask * nodes + nodeIndex] ? tmp : dynamicTable[mask * nodes + nodeIndex]; } } static int * copyDistancesToDevice(const std::vector<std::vector<int>> &distances) { for (int i = 0; i < distances.size(); i++) { for (int j = 0; j < distances.size(); j++) { hostTable[i * distances.size() + j] = distances[i][j]; } } int *cudaDistances; hipMalloc(&cudaDistances, distances.size() * distances.size() * sizeof(int)); hipMemcpy(cudaDistances, hostTable, distances.size() * distances.size() * sizeof(int), hipMemcpyHostToDevice); return cudaDistances; } static int * copyDynamicTableToDevice(const std::vector<std::vector<int>> &distances, const std::vector<int> &terminals, const int &fullMask) { for (int i = 0; i < (fullMask + 1); i++) { for (int j = 0; j < distances.size(); j++) { hostTable[i * distances.size() + j] = INF; } } for (int i = 0; i < terminals.size() - 1; i++) { hostTable[(1 << i) * distances.size() + terminals[i]] = 0; } int *cudaDynamicTable; hipMalloc(&cudaDynamicTable, (fullMask + 1) * distances.size() * sizeof(int)); hipMemcpy(cudaDynamicTable, hostTable, (fullMask + 1) * distances.size() * sizeof(int), hipMemcpyHostToDevice); return cudaDynamicTable; } static int *copyMasksToDevice(const int &terminals, const int &fullMask, std::vector<int> &masksBeginings) { std::vector<std::vector<int>> masks(terminals); for (int mask = 1; mask <= fullMask; mask++) { masks[__builtin_popcount(mask) - 1].push_back(mask); } int *cudaMasksTable; hipMalloc(&cudaMasksTable, (fullMask + 1) * sizeof(int)); for (int i = 0, j = 0; i < masks.size(); j += masks[i].size(), i++) { masksBeginings.push_back(j); hipMemcpy(cudaMasksTable + j, &masks[i][0], masks[i].size() * sizeof(int), hipMemcpyHostToDevice); } masksBeginings.push_back( masksBeginings[masksBeginings.size() - 1] + masks[masks.size() - 1].size()); // could be just +1 return cudaMasksTable; } DreyfusWagnerStatistics dreyfusWagner(std::vector<std::vector<int>> &distances, const std::vector<std::vector<std::pair<int, int>>> &graph, const std::vector<int> &terminals) { hipFree(NULL); DreyfusWagnerStatistics statistics = {0, 0, 0, 0, 0}; if (terminals.size() <= 1) { return statistics; } const int fullMask = (1 << (terminals.size() - 1)) - 1; std::vector<int> masksBeginings; auto beforeFloydWarshall = std::chrono::steady_clock::now(); compouteDistances(distances, graph); auto afterFloydWarshall = std::chrono::steady_clock::now(); statistics.distancesDuration = std::chrono::duration_cast<std::chrono::milliseconds>( afterFloydWarshall - beforeFloydWarshall) .count(); auto beforeCopy = std::chrono::steady_clock::now(); int *cudaDistances = copyDistancesToDevice(distances); int *cudaDynamicTable = copyDynamicTableToDevice(distances, terminals, fullMask); int *cudaMasksTable = copyMasksToDevice(terminals.size() - 1, fullMask, masksBeginings); auto afterCopy = std::chrono::steady_clock::now(); statistics.copyDuration = std::chrono::duration_cast<std::chrono::milliseconds>(afterCopy - beforeCopy) .count(); for (int maskSize = 1; maskSize < masksBeginings.size(); maskSize++) { for (int i = 0, block_size; i < masksBeginings[maskSize] - masksBeginings[maskSize - 1]; i += MAX_THREADS) { block_size = ::min( (masksBeginings[maskSize] - masksBeginings[maskSize - 1] - i), MAX_THREADS); hipLaunchKernelGGL(( dreyfusWagnerFirstStep), dim3(distances.size()), dim3(block_size), 0, 0, cudaDistances, cudaDynamicTable, cudaMasksTable, distances.size(), masksBeginings[maskSize - 1] + i, masksBeginings[maskSize] + i); hipLaunchKernelGGL(( dreyfusWagnerSecondStep), dim3(distances.size()), dim3(block_size), 0, 0, cudaDistances, cudaDynamicTable, cudaMasksTable, distances.size(), masksBeginings[maskSize - 1] + i, masksBeginings[maskSize] + i); } } hipMemcpy(&statistics.result, cudaDynamicTable + (fullMask * distances.size() + terminals.back()), 1 * sizeof(int), hipMemcpyDeviceToHost); auto end = std::chrono::steady_clock::now(); statistics.dreyfusWagnerDuration = std::chrono::duration_cast<std::chrono::milliseconds>(end - afterCopy) .count(); statistics.everythingDuration = std::chrono::duration_cast<std::chrono::milliseconds>( end - beforeFloydWarshall) .count(); hipFree(cudaDistances); hipFree(cudaDynamicTable); hipFree(cudaMasksTable); return statistics; }
e63f02d6d38fb9f0ce4d07727a5224de2427a14e.cu
#include "common.h" #include "dreyfus-wagner.h" #include <chrono> #include <iostream> #include <stdint.h> #include <vector> static int hostTable[(int)1e8]; static const int INF = 1e9; static const int MAX_THREADS = 1024; static __global__ void dreyfusWagnerFirstStep(const int *distances, int *dynamicTable, int *masks, const int nodes, const int masksStart, const int masksEnd) { int maskIndex = threadIdx.x, nodeIndex = blockIdx.x, tmp; if (maskIndex >= masksEnd - masksStart) return; int mask = masks[masksStart + maskIndex]; for (int subMask = (mask - 1) & mask; subMask; subMask = (subMask - 1) & mask) { tmp = dynamicTable[subMask * nodes + nodeIndex] + dynamicTable[(mask ^ subMask) * nodes + nodeIndex]; dynamicTable[mask * nodes + nodeIndex] = tmp < dynamicTable[mask * nodes + nodeIndex] ? tmp : dynamicTable[mask * nodes + nodeIndex]; } } static __global__ void dreyfusWagnerSecondStep(const int *distances, int *dynamicTable, int *masks, const int nodes, const int masksStart, const int masksEnd) { int maskIndex = threadIdx.x, nodeIndex = blockIdx.x, tmp; if (maskIndex >= masksEnd - masksStart) return; int mask = masks[masksStart + maskIndex]; for (int node2 = 0; node2 < nodes; node2++) { tmp = dynamicTable[mask * nodes + node2] + distances[nodeIndex * nodes + node2]; dynamicTable[mask * nodes + nodeIndex] = tmp < dynamicTable[mask * nodes + nodeIndex] ? tmp : dynamicTable[mask * nodes + nodeIndex]; } } static int * copyDistancesToDevice(const std::vector<std::vector<int>> &distances) { for (int i = 0; i < distances.size(); i++) { for (int j = 0; j < distances.size(); j++) { hostTable[i * distances.size() + j] = distances[i][j]; } } int *cudaDistances; cudaMalloc(&cudaDistances, distances.size() * distances.size() * sizeof(int)); cudaMemcpy(cudaDistances, hostTable, distances.size() * distances.size() * sizeof(int), cudaMemcpyHostToDevice); return cudaDistances; } static int * copyDynamicTableToDevice(const std::vector<std::vector<int>> &distances, const std::vector<int> &terminals, const int &fullMask) { for (int i = 0; i < (fullMask + 1); i++) { for (int j = 0; j < distances.size(); j++) { hostTable[i * distances.size() + j] = INF; } } for (int i = 0; i < terminals.size() - 1; i++) { hostTable[(1 << i) * distances.size() + terminals[i]] = 0; } int *cudaDynamicTable; cudaMalloc(&cudaDynamicTable, (fullMask + 1) * distances.size() * sizeof(int)); cudaMemcpy(cudaDynamicTable, hostTable, (fullMask + 1) * distances.size() * sizeof(int), cudaMemcpyHostToDevice); return cudaDynamicTable; } static int *copyMasksToDevice(const int &terminals, const int &fullMask, std::vector<int> &masksBeginings) { std::vector<std::vector<int>> masks(terminals); for (int mask = 1; mask <= fullMask; mask++) { masks[__builtin_popcount(mask) - 1].push_back(mask); } int *cudaMasksTable; cudaMalloc(&cudaMasksTable, (fullMask + 1) * sizeof(int)); for (int i = 0, j = 0; i < masks.size(); j += masks[i].size(), i++) { masksBeginings.push_back(j); cudaMemcpy(cudaMasksTable + j, &masks[i][0], masks[i].size() * sizeof(int), cudaMemcpyHostToDevice); } masksBeginings.push_back( masksBeginings[masksBeginings.size() - 1] + masks[masks.size() - 1].size()); // could be just +1 return cudaMasksTable; } DreyfusWagnerStatistics dreyfusWagner(std::vector<std::vector<int>> &distances, const std::vector<std::vector<std::pair<int, int>>> &graph, const std::vector<int> &terminals) { cudaFree(NULL); DreyfusWagnerStatistics statistics = {0, 0, 0, 0, 0}; if (terminals.size() <= 1) { return statistics; } const int fullMask = (1 << (terminals.size() - 1)) - 1; std::vector<int> masksBeginings; auto beforeFloydWarshall = std::chrono::steady_clock::now(); compouteDistances(distances, graph); auto afterFloydWarshall = std::chrono::steady_clock::now(); statistics.distancesDuration = std::chrono::duration_cast<std::chrono::milliseconds>( afterFloydWarshall - beforeFloydWarshall) .count(); auto beforeCopy = std::chrono::steady_clock::now(); int *cudaDistances = copyDistancesToDevice(distances); int *cudaDynamicTable = copyDynamicTableToDevice(distances, terminals, fullMask); int *cudaMasksTable = copyMasksToDevice(terminals.size() - 1, fullMask, masksBeginings); auto afterCopy = std::chrono::steady_clock::now(); statistics.copyDuration = std::chrono::duration_cast<std::chrono::milliseconds>(afterCopy - beforeCopy) .count(); for (int maskSize = 1; maskSize < masksBeginings.size(); maskSize++) { for (int i = 0, block_size; i < masksBeginings[maskSize] - masksBeginings[maskSize - 1]; i += MAX_THREADS) { block_size = std::min( (masksBeginings[maskSize] - masksBeginings[maskSize - 1] - i), MAX_THREADS); dreyfusWagnerFirstStep<<<distances.size(), block_size>>>( cudaDistances, cudaDynamicTable, cudaMasksTable, distances.size(), masksBeginings[maskSize - 1] + i, masksBeginings[maskSize] + i); dreyfusWagnerSecondStep<<<distances.size(), block_size>>>( cudaDistances, cudaDynamicTable, cudaMasksTable, distances.size(), masksBeginings[maskSize - 1] + i, masksBeginings[maskSize] + i); } } cudaMemcpy(&statistics.result, cudaDynamicTable + (fullMask * distances.size() + terminals.back()), 1 * sizeof(int), cudaMemcpyDeviceToHost); auto end = std::chrono::steady_clock::now(); statistics.dreyfusWagnerDuration = std::chrono::duration_cast<std::chrono::milliseconds>(end - afterCopy) .count(); statistics.everythingDuration = std::chrono::duration_cast<std::chrono::milliseconds>( end - beforeFloydWarshall) .count(); cudaFree(cudaDistances); cudaFree(cudaDynamicTable); cudaFree(cudaMasksTable); return statistics; }
b7e80d92d37a4e4d482e8fb0cdad91d7ca172e47.hip
// !!! This is a file automatically generated by hipify!!! /** * Yuri Gorokhov * lab 10 - Cuda Host Alloc */ #include <stdio.h> #include <hip/hip_runtime.h> #include <math.h> #include "../include/cuda_util.h" #define SIZE 10*1024*1024 #define ITERATIONS 100 float cuda_malloc_test(int size, bool up); float cuda_host_alloc_test(int size, bool up); int main(void) { float elapsedTime; elapsedTime = cuda_malloc_test(SIZE, true); printf("Time using hipMalloc (copy up): %f\n", elapsedTime); elapsedTime = cuda_malloc_test(SIZE, false); printf("Time using hipMalloc (copy down): %f\n", elapsedTime); elapsedTime = cuda_host_alloc_test(SIZE, true); printf("Time using hipHostMalloc (copy up): %f\n", elapsedTime); elapsedTime = cuda_host_alloc_test(SIZE, false); printf("Time using hipHostMalloc (copy down): %f\n", elapsedTime); } float cuda_malloc_test(int size, bool up) { hipEvent_t start, stop; int *a, *dev_a; float elapsedTime; hipEventCreate(&start); hipEventCreate(&stop); a = (int*)malloc(size*sizeof(*a)); hipMalloc((void**)&dev_a, size * sizeof(*dev_a)); hipEventRecord(start, 0); for(int i = 0; i < ITERATIONS; i++) { if (up) hipMemcpy( dev_a, a, size * sizeof( *dev_a ), hipMemcpyHostToDevice ); else hipMemcpy( a, dev_a, size * sizeof( *dev_a ), hipMemcpyDeviceToHost ); } hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsedTime, start, stop); free(a); hipFree(dev_a); hipEventDestroy(start); hipEventDestroy(stop); return elapsedTime; } float cuda_host_alloc_test(int size, bool up) { hipEvent_t start, stop; int *a, *dev_a; float elapsedTime; hipEventCreate(&start); hipEventCreate(&stop); hipHostMalloc((void**)&a, size * sizeof(*a), hipHostMallocDefault); hipMalloc((void**)&dev_a, size * sizeof(*dev_a)); hipEventRecord(start, 0); for(int i = 0; i < ITERATIONS; i++) { if (up) hipMemcpy( dev_a, a, size * sizeof( *dev_a ), hipMemcpyHostToDevice ); else hipMemcpy( a, dev_a, size * sizeof( *dev_a ), hipMemcpyDeviceToHost ); } hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsedTime, start, stop); hipHostFree(a); hipFree(dev_a); hipEventDestroy(start); hipEventDestroy(stop); return elapsedTime; }
b7e80d92d37a4e4d482e8fb0cdad91d7ca172e47.cu
/** * Yuri Gorokhov * lab 10 - Cuda Host Alloc */ #include <stdio.h> #include <cuda.h> #include <math.h> #include "../include/cuda_util.h" #define SIZE 10*1024*1024 #define ITERATIONS 100 float cuda_malloc_test(int size, bool up); float cuda_host_alloc_test(int size, bool up); int main(void) { float elapsedTime; elapsedTime = cuda_malloc_test(SIZE, true); printf("Time using cudaMalloc (copy up): %f\n", elapsedTime); elapsedTime = cuda_malloc_test(SIZE, false); printf("Time using cudaMalloc (copy down): %f\n", elapsedTime); elapsedTime = cuda_host_alloc_test(SIZE, true); printf("Time using cudaHostAlloc (copy up): %f\n", elapsedTime); elapsedTime = cuda_host_alloc_test(SIZE, false); printf("Time using cudaHostAlloc (copy down): %f\n", elapsedTime); } float cuda_malloc_test(int size, bool up) { cudaEvent_t start, stop; int *a, *dev_a; float elapsedTime; cudaEventCreate(&start); cudaEventCreate(&stop); a = (int*)malloc(size*sizeof(*a)); cudaMalloc((void**)&dev_a, size * sizeof(*dev_a)); cudaEventRecord(start, 0); for(int i = 0; i < ITERATIONS; i++) { if (up) cudaMemcpy( dev_a, a, size * sizeof( *dev_a ), cudaMemcpyHostToDevice ); else cudaMemcpy( a, dev_a, size * sizeof( *dev_a ), cudaMemcpyDeviceToHost ); } cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime, start, stop); free(a); cudaFree(dev_a); cudaEventDestroy(start); cudaEventDestroy(stop); return elapsedTime; } float cuda_host_alloc_test(int size, bool up) { cudaEvent_t start, stop; int *a, *dev_a; float elapsedTime; cudaEventCreate(&start); cudaEventCreate(&stop); cudaHostAlloc((void**)&a, size * sizeof(*a), cudaHostAllocDefault); cudaMalloc((void**)&dev_a, size * sizeof(*dev_a)); cudaEventRecord(start, 0); for(int i = 0; i < ITERATIONS; i++) { if (up) cudaMemcpy( dev_a, a, size * sizeof( *dev_a ), cudaMemcpyHostToDevice ); else cudaMemcpy( a, dev_a, size * sizeof( *dev_a ), cudaMemcpyDeviceToHost ); } cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime, start, stop); cudaFreeHost(a); cudaFree(dev_a); cudaEventDestroy(start); cudaEventDestroy(stop); return elapsedTime; }
6043b4c1d57f9557002b162d5b9827b0db146434.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "kernel.h" #include "bboxUtils.h" pluginStatus_t detectionInference( hipStream_t stream, const int N, const int C1, const int C2, const bool shareLocation, const bool varianceEncodedInTarget, const int backgroundLabelId, const int numPredsPerClass, const int numClasses, const int topK, const int keepTopK, const float confidenceThreshold, const float nmsThreshold, const CodeTypeSSD codeType, const DataType DT_BBOX, const void* locData, const void* priorData, const DataType DT_SCORE, const void* confData, void* keepCount, void* topDetections, void* workspace, bool isNormalized, bool confSigmoid) { // Batch size * number bbox per sample * 4 = total number of bounding boxes * 4 const int locCount = N * C1; // Do not clip the bounding box that goes outside the image const bool clipBBox = false; /* * shareLocation * Bounding box are shared among all classes, i.e., a bounding box could be classified as any candidate class. * Otherwise * Bounding box are designed for specific classes, i.e., a bounding box could be classified as one certain class or not (binary classification). */ const int numLocClasses = shareLocation ? 1 : numClasses; size_t bboxDataSize = detectionForwardBBoxDataSize(N, C1, DataType::kFLOAT); void* bboxDataRaw = workspace; pluginStatus_t status = decodeBBoxes(stream, locCount, codeType, varianceEncodedInTarget, numPredsPerClass, shareLocation, numLocClasses, backgroundLabelId, clipBBox, DataType::kFLOAT, locData, priorData, bboxDataRaw); ASSERT_FAILURE(status == STATUS_SUCCESS); /* * bboxDataRaw format: * [batch size, numPriors (per sample), numLocClasses, 4] */ // float for now void* bboxData; size_t bboxPermuteSize = detectionForwardBBoxPermuteSize(shareLocation, N, C1, DataType::kFLOAT); void* bboxPermute = nextWorkspacePtr((int8_t*) bboxDataRaw, bboxDataSize); /* * After permutation, bboxData format: * [batch_size, numLocClasses, numPriors (per sample) (numPredsPerClass), 4] * This is equivalent to swapping axis */ if (!shareLocation) { status = permuteData(stream, locCount, numLocClasses, numPredsPerClass, 4, DataType::kFLOAT, false, bboxDataRaw, bboxPermute); ASSERT_FAILURE(status == STATUS_SUCCESS); bboxData = bboxPermute; } /* * If shareLocation, numLocClasses = 1 * No need to permute data on linear memory */ else { bboxData = bboxDataRaw; } /* * Conf data format * [batch size, numPriors * param.numClasses, 1, 1] */ const int numScores = N * C2; size_t scoresSize = detectionForwardPreNMSSize(N, C2); void* scores = nextWorkspacePtr((int8_t*) bboxPermute, bboxPermuteSize); // need a conf_scores /* * After permutation, bboxData format: * [batch_size, numClasses, numPredsPerClass, 1] */ status = permuteData(stream, numScores, numClasses, numPredsPerClass, 1, DataType::kFLOAT, confSigmoid, confData, scores); ASSERT_FAILURE(status == STATUS_SUCCESS); size_t indicesSize = detectionForwardPreNMSSize(N, C2); void* indices = nextWorkspacePtr((int8_t*) scores, scoresSize); size_t postNMSScoresSize = detectionForwardPostNMSSize(N, numClasses, topK); size_t postNMSIndicesSize = detectionForwardPostNMSSize(N, numClasses, topK); void* postNMSScores = nextWorkspacePtr((int8_t*) indices, indicesSize); void* postNMSIndices = nextWorkspacePtr((int8_t*) postNMSScores, postNMSScoresSize); //size_t sortingWorkspaceSize = sortScoresPerClassWorkspaceSize(N, numClasses, numPredsPerClass, FLOAT32); void* sortingWorkspace = nextWorkspacePtr((int8_t*) postNMSIndices, postNMSIndicesSize); // Sort the scores so that the following NMS could be applied. status = sortScoresPerClass(stream, N, numClasses, numPredsPerClass, backgroundLabelId, confidenceThreshold, DataType::kFLOAT, scores, indices, sortingWorkspace); ASSERT_FAILURE(status == STATUS_SUCCESS); // NMS status = allClassNMS(stream, N, numClasses, numPredsPerClass, topK, nmsThreshold, shareLocation, isNormalized, DataType::kFLOAT, DataType::kFLOAT, bboxData, scores, indices, postNMSScores, postNMSIndices, false); ASSERT_FAILURE(status == STATUS_SUCCESS); // Sort the bounding boxes after NMS using scores status = sortScoresPerImage(stream, N, numClasses * topK, DataType::kFLOAT, postNMSScores, postNMSIndices, scores, indices, sortingWorkspace); ASSERT_FAILURE(status == STATUS_SUCCESS); // Gather data from the sorted bounding boxes after NMS status = gatherTopDetections(stream, shareLocation, N, numPredsPerClass, numClasses, topK, keepTopK, DataType::kFLOAT, DataType::kFLOAT, indices, scores, bboxData, keepCount, topDetections); ASSERT_FAILURE(status == STATUS_SUCCESS); return STATUS_SUCCESS; } namespace nvinfer1 { namespace plugin { pluginStatus_t detectionInference( hipStream_t stream, const int N, const int C1, const int C2, const bool shareLocation, const bool varianceEncodedInTarget, const int backgroundLabelId, const int numPredsPerClass, const int numClasses, const int topK, const int keepTopK, const float confidenceThreshold, const float nmsThreshold, const CodeTypeSSD codeType, const DataType DT_BBOX, const void* locData, const void* priorData, const DataType DT_SCORE, const void* confData, void* keepCount, void* topDetections, void* workspace, bool isNormalized, bool confSigmoid) { // Batch size * number bbox per sample * 4 = total number of bounding boxes * 4 const int locCount = N * C1; // Do not clip the bounding box that goes outside the image const bool clipBBox = false; /* * shareLocation * Bounding box are shared among all classes, i.e., a bounding box could be classified as any candidate class. * Otherwise * Bounding box are designed for specific classes, i.e., a bounding box could be classified as one certain class or not (binary classification). */ const int numLocClasses = shareLocation ? 1 : numClasses; size_t bboxDataSize = detectionForwardBBoxDataSize(N, C1, DataType::kFLOAT); void* bboxDataRaw = workspace; pluginStatus_t status = decodeBBoxes(stream, locCount, codeType, varianceEncodedInTarget, numPredsPerClass, shareLocation, numLocClasses, backgroundLabelId, clipBBox, DataType::kFLOAT, locData, priorData, bboxDataRaw); ASSERT_FAILURE(status == STATUS_SUCCESS); /* * bboxDataRaw format: * [batch size, numPriors (per sample), numLocClasses, 4] */ // float for now void* bboxData; size_t bboxPermuteSize = detectionForwardBBoxPermuteSize(shareLocation, N, C1, DataType::kFLOAT); void* bboxPermute = nextWorkspacePtr((int8_t*) bboxDataRaw, bboxDataSize); /* * After permutation, bboxData format: * [batch_size, numLocClasses, numPriors (per sample) (numPredsPerClass), 4] * This is equivalent to swapping axis */ if (!shareLocation) { status = permuteData(stream, locCount, numLocClasses, numPredsPerClass, 4, DataType::kFLOAT, false, bboxDataRaw, bboxPermute); ASSERT_FAILURE(status == STATUS_SUCCESS); bboxData = bboxPermute; } /* * If shareLocation, numLocClasses = 1 * No need to permute data on linear memory */ else { bboxData = bboxDataRaw; } /* * Conf data format * [batch size, numPriors * param.numClasses, 1, 1] */ const int numScores = N * C2; size_t scoresSize = detectionForwardPreNMSSize(N, C2); void* scores = nextWorkspacePtr((int8_t*) bboxPermute, bboxPermuteSize); // need a conf_scores /* * After permutation, bboxData format: * [batch_size, numClasses, numPredsPerClass, 1] */ status = permuteData(stream, numScores, numClasses, numPredsPerClass, 1, DataType::kFLOAT, confSigmoid, confData, scores); ASSERT_FAILURE(status == STATUS_SUCCESS); size_t indicesSize = detectionForwardPreNMSSize(N, C2); void* indices = nextWorkspacePtr((int8_t*) scores, scoresSize); size_t postNMSScoresSize = detectionForwardPostNMSSize(N, numClasses, topK); size_t postNMSIndicesSize = detectionForwardPostNMSSize(N, numClasses, topK); void* postNMSScores = nextWorkspacePtr((int8_t*) indices, indicesSize); void* postNMSIndices = nextWorkspacePtr((int8_t*) postNMSScores, postNMSScoresSize); //size_t sortingWorkspaceSize = sortScoresPerClassWorkspaceSize(N, numClasses, numPredsPerClass, FLOAT32); void* sortingWorkspace = nextWorkspacePtr((int8_t*) postNMSIndices, postNMSIndicesSize); // Sort the scores so that the following NMS could be applied. status = sortScoresPerClass(stream, N, numClasses, numPredsPerClass, backgroundLabelId, confidenceThreshold, DataType::kFLOAT, scores, indices, sortingWorkspace); ASSERT_FAILURE(status == STATUS_SUCCESS); // NMS status = allClassNMS(stream, N, numClasses, numPredsPerClass, topK, nmsThreshold, shareLocation, isNormalized, DataType::kFLOAT, DataType::kFLOAT, bboxData, scores, indices, postNMSScores, postNMSIndices, false); ASSERT_FAILURE(status == STATUS_SUCCESS); // Sort the bounding boxes after NMS using scores status = sortScoresPerImage(stream, N, numClasses * topK, DataType::kFLOAT, postNMSScores, postNMSIndices, scores, indices, sortingWorkspace); ASSERT_FAILURE(status == STATUS_SUCCESS); // Gather data from the sorted bounding boxes after NMS status = gatherTopDetections(stream, shareLocation, N, numPredsPerClass, numClasses, topK, keepTopK, DataType::kFLOAT, DataType::kFLOAT, indices, scores, bboxData, keepCount, topDetections); ASSERT_FAILURE(status == STATUS_SUCCESS); return STATUS_SUCCESS; } } // namespace plugin } // namespace nvinfer1
6043b4c1d57f9557002b162d5b9827b0db146434.cu
/* * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "kernel.h" #include "bboxUtils.h" pluginStatus_t detectionInference( cudaStream_t stream, const int N, const int C1, const int C2, const bool shareLocation, const bool varianceEncodedInTarget, const int backgroundLabelId, const int numPredsPerClass, const int numClasses, const int topK, const int keepTopK, const float confidenceThreshold, const float nmsThreshold, const CodeTypeSSD codeType, const DataType DT_BBOX, const void* locData, const void* priorData, const DataType DT_SCORE, const void* confData, void* keepCount, void* topDetections, void* workspace, bool isNormalized, bool confSigmoid) { // Batch size * number bbox per sample * 4 = total number of bounding boxes * 4 const int locCount = N * C1; // Do not clip the bounding box that goes outside the image const bool clipBBox = false; /* * shareLocation * Bounding box are shared among all classes, i.e., a bounding box could be classified as any candidate class. * Otherwise * Bounding box are designed for specific classes, i.e., a bounding box could be classified as one certain class or not (binary classification). */ const int numLocClasses = shareLocation ? 1 : numClasses; size_t bboxDataSize = detectionForwardBBoxDataSize(N, C1, DataType::kFLOAT); void* bboxDataRaw = workspace; pluginStatus_t status = decodeBBoxes(stream, locCount, codeType, varianceEncodedInTarget, numPredsPerClass, shareLocation, numLocClasses, backgroundLabelId, clipBBox, DataType::kFLOAT, locData, priorData, bboxDataRaw); ASSERT_FAILURE(status == STATUS_SUCCESS); /* * bboxDataRaw format: * [batch size, numPriors (per sample), numLocClasses, 4] */ // float for now void* bboxData; size_t bboxPermuteSize = detectionForwardBBoxPermuteSize(shareLocation, N, C1, DataType::kFLOAT); void* bboxPermute = nextWorkspacePtr((int8_t*) bboxDataRaw, bboxDataSize); /* * After permutation, bboxData format: * [batch_size, numLocClasses, numPriors (per sample) (numPredsPerClass), 4] * This is equivalent to swapping axis */ if (!shareLocation) { status = permuteData(stream, locCount, numLocClasses, numPredsPerClass, 4, DataType::kFLOAT, false, bboxDataRaw, bboxPermute); ASSERT_FAILURE(status == STATUS_SUCCESS); bboxData = bboxPermute; } /* * If shareLocation, numLocClasses = 1 * No need to permute data on linear memory */ else { bboxData = bboxDataRaw; } /* * Conf data format * [batch size, numPriors * param.numClasses, 1, 1] */ const int numScores = N * C2; size_t scoresSize = detectionForwardPreNMSSize(N, C2); void* scores = nextWorkspacePtr((int8_t*) bboxPermute, bboxPermuteSize); // need a conf_scores /* * After permutation, bboxData format: * [batch_size, numClasses, numPredsPerClass, 1] */ status = permuteData(stream, numScores, numClasses, numPredsPerClass, 1, DataType::kFLOAT, confSigmoid, confData, scores); ASSERT_FAILURE(status == STATUS_SUCCESS); size_t indicesSize = detectionForwardPreNMSSize(N, C2); void* indices = nextWorkspacePtr((int8_t*) scores, scoresSize); size_t postNMSScoresSize = detectionForwardPostNMSSize(N, numClasses, topK); size_t postNMSIndicesSize = detectionForwardPostNMSSize(N, numClasses, topK); void* postNMSScores = nextWorkspacePtr((int8_t*) indices, indicesSize); void* postNMSIndices = nextWorkspacePtr((int8_t*) postNMSScores, postNMSScoresSize); //size_t sortingWorkspaceSize = sortScoresPerClassWorkspaceSize(N, numClasses, numPredsPerClass, FLOAT32); void* sortingWorkspace = nextWorkspacePtr((int8_t*) postNMSIndices, postNMSIndicesSize); // Sort the scores so that the following NMS could be applied. status = sortScoresPerClass(stream, N, numClasses, numPredsPerClass, backgroundLabelId, confidenceThreshold, DataType::kFLOAT, scores, indices, sortingWorkspace); ASSERT_FAILURE(status == STATUS_SUCCESS); // NMS status = allClassNMS(stream, N, numClasses, numPredsPerClass, topK, nmsThreshold, shareLocation, isNormalized, DataType::kFLOAT, DataType::kFLOAT, bboxData, scores, indices, postNMSScores, postNMSIndices, false); ASSERT_FAILURE(status == STATUS_SUCCESS); // Sort the bounding boxes after NMS using scores status = sortScoresPerImage(stream, N, numClasses * topK, DataType::kFLOAT, postNMSScores, postNMSIndices, scores, indices, sortingWorkspace); ASSERT_FAILURE(status == STATUS_SUCCESS); // Gather data from the sorted bounding boxes after NMS status = gatherTopDetections(stream, shareLocation, N, numPredsPerClass, numClasses, topK, keepTopK, DataType::kFLOAT, DataType::kFLOAT, indices, scores, bboxData, keepCount, topDetections); ASSERT_FAILURE(status == STATUS_SUCCESS); return STATUS_SUCCESS; } namespace nvinfer1 { namespace plugin { pluginStatus_t detectionInference( cudaStream_t stream, const int N, const int C1, const int C2, const bool shareLocation, const bool varianceEncodedInTarget, const int backgroundLabelId, const int numPredsPerClass, const int numClasses, const int topK, const int keepTopK, const float confidenceThreshold, const float nmsThreshold, const CodeTypeSSD codeType, const DataType DT_BBOX, const void* locData, const void* priorData, const DataType DT_SCORE, const void* confData, void* keepCount, void* topDetections, void* workspace, bool isNormalized, bool confSigmoid) { // Batch size * number bbox per sample * 4 = total number of bounding boxes * 4 const int locCount = N * C1; // Do not clip the bounding box that goes outside the image const bool clipBBox = false; /* * shareLocation * Bounding box are shared among all classes, i.e., a bounding box could be classified as any candidate class. * Otherwise * Bounding box are designed for specific classes, i.e., a bounding box could be classified as one certain class or not (binary classification). */ const int numLocClasses = shareLocation ? 1 : numClasses; size_t bboxDataSize = detectionForwardBBoxDataSize(N, C1, DataType::kFLOAT); void* bboxDataRaw = workspace; pluginStatus_t status = decodeBBoxes(stream, locCount, codeType, varianceEncodedInTarget, numPredsPerClass, shareLocation, numLocClasses, backgroundLabelId, clipBBox, DataType::kFLOAT, locData, priorData, bboxDataRaw); ASSERT_FAILURE(status == STATUS_SUCCESS); /* * bboxDataRaw format: * [batch size, numPriors (per sample), numLocClasses, 4] */ // float for now void* bboxData; size_t bboxPermuteSize = detectionForwardBBoxPermuteSize(shareLocation, N, C1, DataType::kFLOAT); void* bboxPermute = nextWorkspacePtr((int8_t*) bboxDataRaw, bboxDataSize); /* * After permutation, bboxData format: * [batch_size, numLocClasses, numPriors (per sample) (numPredsPerClass), 4] * This is equivalent to swapping axis */ if (!shareLocation) { status = permuteData(stream, locCount, numLocClasses, numPredsPerClass, 4, DataType::kFLOAT, false, bboxDataRaw, bboxPermute); ASSERT_FAILURE(status == STATUS_SUCCESS); bboxData = bboxPermute; } /* * If shareLocation, numLocClasses = 1 * No need to permute data on linear memory */ else { bboxData = bboxDataRaw; } /* * Conf data format * [batch size, numPriors * param.numClasses, 1, 1] */ const int numScores = N * C2; size_t scoresSize = detectionForwardPreNMSSize(N, C2); void* scores = nextWorkspacePtr((int8_t*) bboxPermute, bboxPermuteSize); // need a conf_scores /* * After permutation, bboxData format: * [batch_size, numClasses, numPredsPerClass, 1] */ status = permuteData(stream, numScores, numClasses, numPredsPerClass, 1, DataType::kFLOAT, confSigmoid, confData, scores); ASSERT_FAILURE(status == STATUS_SUCCESS); size_t indicesSize = detectionForwardPreNMSSize(N, C2); void* indices = nextWorkspacePtr((int8_t*) scores, scoresSize); size_t postNMSScoresSize = detectionForwardPostNMSSize(N, numClasses, topK); size_t postNMSIndicesSize = detectionForwardPostNMSSize(N, numClasses, topK); void* postNMSScores = nextWorkspacePtr((int8_t*) indices, indicesSize); void* postNMSIndices = nextWorkspacePtr((int8_t*) postNMSScores, postNMSScoresSize); //size_t sortingWorkspaceSize = sortScoresPerClassWorkspaceSize(N, numClasses, numPredsPerClass, FLOAT32); void* sortingWorkspace = nextWorkspacePtr((int8_t*) postNMSIndices, postNMSIndicesSize); // Sort the scores so that the following NMS could be applied. status = sortScoresPerClass(stream, N, numClasses, numPredsPerClass, backgroundLabelId, confidenceThreshold, DataType::kFLOAT, scores, indices, sortingWorkspace); ASSERT_FAILURE(status == STATUS_SUCCESS); // NMS status = allClassNMS(stream, N, numClasses, numPredsPerClass, topK, nmsThreshold, shareLocation, isNormalized, DataType::kFLOAT, DataType::kFLOAT, bboxData, scores, indices, postNMSScores, postNMSIndices, false); ASSERT_FAILURE(status == STATUS_SUCCESS); // Sort the bounding boxes after NMS using scores status = sortScoresPerImage(stream, N, numClasses * topK, DataType::kFLOAT, postNMSScores, postNMSIndices, scores, indices, sortingWorkspace); ASSERT_FAILURE(status == STATUS_SUCCESS); // Gather data from the sorted bounding boxes after NMS status = gatherTopDetections(stream, shareLocation, N, numPredsPerClass, numClasses, topK, keepTopK, DataType::kFLOAT, DataType::kFLOAT, indices, scores, bboxData, keepCount, topDetections); ASSERT_FAILURE(status == STATUS_SUCCESS); return STATUS_SUCCESS; } } // namespace plugin } // namespace nvinfer1
4ea919bac0514a6e10b9be2bfa17c3233fee6bfc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // Created by Niu Chuang on 17-7-31. // #include <algorithm> #include <vector> #include "caffe/layers/relu_mask_layer.hpp" namespace caffe { template <typename Dtype> __global__ void ReLUMaskForward(const int n, const Dtype* in, Dtype* out, Dtype negative_slope) { CUDA_KERNEL_LOOP(index, n) { out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope; } } template <typename Dtype> __global__ void ReLUMaskForward_with_mask(const int n, const Dtype* in, Dtype* out, Dtype* out_mask, Dtype negative_slope) { CUDA_KERNEL_LOOP(index, n) { out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope; out_mask[index] = in[index] > 0 ? 1 : 0; } } template <typename Dtype> void ReLUMaskLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); const int count = bottom[0]->count(); const bool use_top_mask = top.size() > 1; Dtype* top_mask = NULL; Dtype negative_slope = this->layer_param_.relu_param().negative_slope(); if (use_top_mask) { top_mask = top[1]->mutable_gpu_data(); hipLaunchKernelGGL(( ReLUMaskForward_with_mask<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, bottom_data, top_data, top_mask, negative_slope); CUDA_POST_KERNEL_CHECK; } else { hipLaunchKernelGGL(( ReLUMaskForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, bottom_data, top_data, negative_slope); CUDA_POST_KERNEL_CHECK; } // NOLINT_NEXT_LINE(whitespace/operators) // ReLUForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( // count, bottom_data, top_data, negative_slope); // CUDA_POST_KERNEL_CHECK; // << " count: " << count << " bottom_data: " // << (unsigned long)bottom_data // << " top_data: " << (unsigned long)top_data // << " blocks: " << CAFFE_GET_BLOCKS(count) // << " threads: " << CAFFE_CUDA_NUM_THREADS; } template <typename Dtype> __global__ void ReLUBackward(const int n, const Dtype* in_diff, const Dtype* in_data, Dtype* out_diff, Dtype negative_slope) { CUDA_KERNEL_LOOP(index, n) { out_diff[index] = in_diff[index] * ((in_data[index] > 0) + (in_data[index] <= 0) * negative_slope); } } template <typename Dtype> void ReLUMaskLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (propagate_down[0]) { const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const int count = bottom[0]->count(); Dtype negative_slope = this->layer_param_.relu_param().negative_slope(); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( ReLUBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, top_diff, bottom_data, bottom_diff, negative_slope); CUDA_POST_KERNEL_CHECK; } } INSTANTIATE_LAYER_GPU_FUNCS(ReLUMaskLayer); } // namespace caffe
4ea919bac0514a6e10b9be2bfa17c3233fee6bfc.cu
// // Created by Niu Chuang on 17-7-31. // #include <algorithm> #include <vector> #include "caffe/layers/relu_mask_layer.hpp" namespace caffe { template <typename Dtype> __global__ void ReLUMaskForward(const int n, const Dtype* in, Dtype* out, Dtype negative_slope) { CUDA_KERNEL_LOOP(index, n) { out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope; } } template <typename Dtype> __global__ void ReLUMaskForward_with_mask(const int n, const Dtype* in, Dtype* out, Dtype* out_mask, Dtype negative_slope) { CUDA_KERNEL_LOOP(index, n) { out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope; out_mask[index] = in[index] > 0 ? 1 : 0; } } template <typename Dtype> void ReLUMaskLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); const int count = bottom[0]->count(); const bool use_top_mask = top.size() > 1; Dtype* top_mask = NULL; Dtype negative_slope = this->layer_param_.relu_param().negative_slope(); if (use_top_mask) { top_mask = top[1]->mutable_gpu_data(); ReLUMaskForward_with_mask<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, bottom_data, top_data, top_mask, negative_slope); CUDA_POST_KERNEL_CHECK; } else { ReLUMaskForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, bottom_data, top_data, negative_slope); CUDA_POST_KERNEL_CHECK; } // NOLINT_NEXT_LINE(whitespace/operators) // ReLUForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( // count, bottom_data, top_data, negative_slope); // CUDA_POST_KERNEL_CHECK; // << " count: " << count << " bottom_data: " // << (unsigned long)bottom_data // << " top_data: " << (unsigned long)top_data // << " blocks: " << CAFFE_GET_BLOCKS(count) // << " threads: " << CAFFE_CUDA_NUM_THREADS; } template <typename Dtype> __global__ void ReLUBackward(const int n, const Dtype* in_diff, const Dtype* in_data, Dtype* out_diff, Dtype negative_slope) { CUDA_KERNEL_LOOP(index, n) { out_diff[index] = in_diff[index] * ((in_data[index] > 0) + (in_data[index] <= 0) * negative_slope); } } template <typename Dtype> void ReLUMaskLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (propagate_down[0]) { const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const int count = bottom[0]->count(); Dtype negative_slope = this->layer_param_.relu_param().negative_slope(); // NOLINT_NEXT_LINE(whitespace/operators) ReLUBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, top_diff, bottom_data, bottom_diff, negative_slope); CUDA_POST_KERNEL_CHECK; } } INSTANTIATE_LAYER_GPU_FUNCS(ReLUMaskLayer); } // namespace caffe
64aaae68d979c202b267db4b78eca39acccba7bc.hip
// !!! This is a file automatically generated by hipify!!! #include <Windows.h> #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <GL\glfw.h> #include <SDL.h> #include <iostream> #include <string> #include <sstream> #include "cuda_renderer.cuh" #include "Vector3D.cuh" #include <vector> #include "sdl.cuh" #include "Color.cuh" #include "Camera.cuh" #include "Matrix_hip.cuh" #include "IGeometry.cuh" #include "IShader.cuh" #include "Node.cuh" #include "Lambert.cuh" #include "Plane.cuh" #include "Sphere.cuh" #include "EventHandler.h" #include "Menu.h" #include "Settings.cuh" using namespace std; extern "C" void cudaRenderer(Color* dev_vfb); extern "C" void freeDeviceMemory(); extern "C" void initScene(); extern "C" void cameraBeginFrame(); unsigned frameCount; unsigned lastFrameEnd; unsigned lastTitleUpdateTime; unsigned lastTitleUpdateFrameCount; const char* const appName = "CUDA Traycer"; // virtual framebuffer Color vfb[VFB_MAX_SIZE][VFB_MAX_SIZE]; // virtual framebuffer used for GPU operations Color vfb_linear[VFB_MAX_SIZE * VFB_MAX_SIZE]; /** * @brief - Function that prints CUDA specs * of the GPU device/s on the console */ void printGPUSpecs() { hipDeviceProp_t prop; int count; hipGetDeviceCount(&count); for (int i = 0; i < count; ++i) { hipGetDeviceProperties( &prop, i ); printf( " --- General Information for device %d ---\n", i ); printf( "Name: %s\n", prop.name ); printf( "Compute capability: %d.%d\n", prop.major, prop.minor ); printf( "Clock rate: %d\n", prop.clockRate ); printf( "Device copy overlap: " ); if (prop.deviceOverlap) { printf( "Enabled\n" ); } else { printf( "Disabled\n"); } printf( "Kernel execution timeout : " ); if (prop.kernelExecTimeoutEnabled) { printf( "Enabled\n" ); } else { printf( "Disabled\n" ); } printf( " --- Memory Information for device %d ---\n", i ); printf( "Total global mem: %ld\n", prop.totalGlobalMem ); printf( "Total constant Mem: %ld\n", prop.totalConstMem ); printf( "Max mem pitch: %ld\n", prop.memPitch ); printf( "Texture Alignment: %ld\n", prop.textureAlignment ); printf( " --- MP Information for device %d ---\n", i ); printf( "Multiprocessor count: %d\n", prop.multiProcessorCount ); printf( "Shared mem per mp: %ld\n", prop.sharedMemPerBlock ); printf( "Registers per mp: %d\n", prop.regsPerBlock ); printf( "Threads in warp: %d\n", prop.warpSize ); printf( "Max threads per block: %d\n", prop.maxThreadsPerBlock ); printf( "Max thread dimensions: (%d, %d, %d)\n", prop.maxThreadsDim[0], prop.maxThreadsDim[1], prop.maxThreadsDim[2] ); printf( "Max grid dimensions: (%d, %d, %d)\n", prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2] ); printf( "\n" ); } } /** * @brief - Wrapper function that creates timer and captures the start and stop time * @param start - output - captures the start time * @param stop - output - captires the stop time */ void cudaStartTimer(hipEvent_t& start, hipEvent_t& stop) { hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); } /** * @brief - Wrapper function that takes the previously captured start and stop time * from cudaStartTimer() function, calculates the elapsed time, * prints it on the console and shows it on the window frame * @param start - the start time that is previously captured by cudaStartTimer() * @param stop - the stop time that is previously captured by cudaStartTimer() * @reference - cudaStartTimer(hipEvent_t& start, hipEvent_t& stop) */ void cudaStopTimer(hipEvent_t& start, hipEvent_t& stop) { hipEventRecord(stop, 0); hipEventSynchronize(stop); float elapsedTime; hipEventElapsedTime(&elapsedTime, start, stop); printf( "Time to render: %3.1f ms\n\n", elapsedTime); char info[128]; sprintf(info, "CUDA Traycer || Time to render: %3.1f ms", elapsedTime); SDL_WM_SetCaption(info, NULL); hipEventDestroy(start); hipEventDestroy(stop); } void displayFrameCounter() { ++frameCount; const unsigned now = SDL_GetTicks(); const unsigned frameTime = now - lastFrameEnd; const unsigned titleUpdateTimeDelta = now - lastTitleUpdateTime; if (titleUpdateTimeDelta > 1000) { const unsigned framesDelta = frameCount - lastTitleUpdateFrameCount; const unsigned meanFrameTime = titleUpdateTimeDelta / framesDelta; const unsigned fps = framesDelta * 1000 / titleUpdateTimeDelta; std::ostringstream title; title << appName << " :\t\t\t mean frame time: " << meanFrameTime << " ms || fps: " << fps; title.flush(); SDL_WM_SetCaption(title.str().c_str(), NULL); lastTitleUpdateTime = now; lastTitleUpdateFrameCount = frameCount; } lastFrameEnd = SDL_GetTicks(); } /** * @brief - function that converts the linear array vfb_linear * into the 2D array vfb * * This is needed because we pass linear array to the GPU * to process our pixel data and then we convert it to * 2D array in order to display our pixel data with SDL */ void convertDeviceToHostBuffer() { //#pragma omp parallel for schedule(dynamic, 1) for (int i = 0; i < GlobalSettings::RES_Y; ++i) { for (int j = 0; j < GlobalSettings::RES_X; ++j) { vfb[i][j] = vfb_linear[i * GlobalSettings::RES_X + j]; } } } int main(int argc, char** argv) { Menu mainMenu(appName); mainMenu.Destroy(); initColorCache(); if (!initGraphics(GlobalSettings::RES_X, GlobalSettings::RES_Y)) { return -1; } /*SDL_Surface* icon = SDL_LoadBMP("../floor.bmp"); SDL_WM_SetIcon(icon, NULL);*/ printGPUSpecs(); EventHandler eventController; hipDeviceSetLimit(hipLimitStackSize, STACK_SIZE); // allocate memory for vfb on the GPU Color* dev_vfb; hipMalloc((void**)&dev_vfb, sizeof(Color) * GlobalSettings::RES_X * GlobalSettings::RES_Y); // memcpy HostToDevice hipMemcpy(dev_vfb, vfb_linear, sizeof(Color) * GlobalSettings::RES_X * GlobalSettings::RES_Y, hipMemcpyHostToDevice); // InitializeScene initScene(); SDL_WarpMouse(GlobalSettings::RES_X / 2, GlobalSettings::RES_Y / 2); if (GlobalSettings::realTime) { while (eventController.isRealTimeRendering) { cameraBeginFrame(); displayFrameCounter(); cudaRenderer(dev_vfb); hipMemcpy(vfb_linear, dev_vfb, sizeof(Color) * GlobalSettings::RES_X * GlobalSettings::RES_Y, hipMemcpyDeviceToHost); convertDeviceToHostBuffer(); eventController.handleEvents(); displayVFB(vfb); } } else { // capture the start time hipEvent_t start, stop; cudaStartTimer(start, stop); // call kernels // - RenderScene cudaRenderer(dev_vfb); // memcpy DeviceToHost hipMemcpy(vfb_linear, dev_vfb, sizeof(Color) * GlobalSettings::RES_X * GlobalSettings::RES_Y, hipMemcpyDeviceToHost); // get stop time, and display the timing results cudaStopTimer(start, stop); convertDeviceToHostBuffer(); displayVFB(vfb); eventController.handleUserInput(); } // free memory freeDeviceMemory(); hipFree(dev_vfb); closeGraphics(); return EXIT_SUCCESS; }
64aaae68d979c202b267db4b78eca39acccba7bc.cu
#include <Windows.h> #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <GL\glfw.h> #include <SDL.h> #include <iostream> #include <string> #include <sstream> #include "cuda_renderer.cuh" #include "Vector3D.cuh" #include <vector> #include "sdl.cuh" #include "Color.cuh" #include "Camera.cuh" #include "Matrix.cuh" #include "IGeometry.cuh" #include "IShader.cuh" #include "Node.cuh" #include "Lambert.cuh" #include "Plane.cuh" #include "Sphere.cuh" #include "EventHandler.h" #include "Menu.h" #include "Settings.cuh" using namespace std; extern "C" void cudaRenderer(Color* dev_vfb); extern "C" void freeDeviceMemory(); extern "C" void initScene(); extern "C" void cameraBeginFrame(); unsigned frameCount; unsigned lastFrameEnd; unsigned lastTitleUpdateTime; unsigned lastTitleUpdateFrameCount; const char* const appName = "CUDA Traycer"; // virtual framebuffer Color vfb[VFB_MAX_SIZE][VFB_MAX_SIZE]; // virtual framebuffer used for GPU operations Color vfb_linear[VFB_MAX_SIZE * VFB_MAX_SIZE]; /** * @brief - Function that prints CUDA specs * of the GPU device/s on the console */ void printGPUSpecs() { cudaDeviceProp prop; int count; cudaGetDeviceCount(&count); for (int i = 0; i < count; ++i) { cudaGetDeviceProperties( &prop, i ); printf( " --- General Information for device %d ---\n", i ); printf( "Name: %s\n", prop.name ); printf( "Compute capability: %d.%d\n", prop.major, prop.minor ); printf( "Clock rate: %d\n", prop.clockRate ); printf( "Device copy overlap: " ); if (prop.deviceOverlap) { printf( "Enabled\n" ); } else { printf( "Disabled\n"); } printf( "Kernel execution timeout : " ); if (prop.kernelExecTimeoutEnabled) { printf( "Enabled\n" ); } else { printf( "Disabled\n" ); } printf( " --- Memory Information for device %d ---\n", i ); printf( "Total global mem: %ld\n", prop.totalGlobalMem ); printf( "Total constant Mem: %ld\n", prop.totalConstMem ); printf( "Max mem pitch: %ld\n", prop.memPitch ); printf( "Texture Alignment: %ld\n", prop.textureAlignment ); printf( " --- MP Information for device %d ---\n", i ); printf( "Multiprocessor count: %d\n", prop.multiProcessorCount ); printf( "Shared mem per mp: %ld\n", prop.sharedMemPerBlock ); printf( "Registers per mp: %d\n", prop.regsPerBlock ); printf( "Threads in warp: %d\n", prop.warpSize ); printf( "Max threads per block: %d\n", prop.maxThreadsPerBlock ); printf( "Max thread dimensions: (%d, %d, %d)\n", prop.maxThreadsDim[0], prop.maxThreadsDim[1], prop.maxThreadsDim[2] ); printf( "Max grid dimensions: (%d, %d, %d)\n", prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2] ); printf( "\n" ); } } /** * @brief - Wrapper function that creates timer and captures the start and stop time * @param start - output - captures the start time * @param stop - output - captires the stop time */ void cudaStartTimer(cudaEvent_t& start, cudaEvent_t& stop) { cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); } /** * @brief - Wrapper function that takes the previously captured start and stop time * from cudaStartTimer() function, calculates the elapsed time, * prints it on the console and shows it on the window frame * @param start - the start time that is previously captured by cudaStartTimer() * @param stop - the stop time that is previously captured by cudaStartTimer() * @reference - cudaStartTimer(cudaEvent_t& start, cudaEvent_t& stop) */ void cudaStopTimer(cudaEvent_t& start, cudaEvent_t& stop) { cudaEventRecord(stop, 0); cudaEventSynchronize(stop); float elapsedTime; cudaEventElapsedTime(&elapsedTime, start, stop); printf( "Time to render: %3.1f ms\n\n", elapsedTime); char info[128]; sprintf(info, "CUDA Traycer || Time to render: %3.1f ms", elapsedTime); SDL_WM_SetCaption(info, NULL); cudaEventDestroy(start); cudaEventDestroy(stop); } void displayFrameCounter() { ++frameCount; const unsigned now = SDL_GetTicks(); const unsigned frameTime = now - lastFrameEnd; const unsigned titleUpdateTimeDelta = now - lastTitleUpdateTime; if (titleUpdateTimeDelta > 1000) { const unsigned framesDelta = frameCount - lastTitleUpdateFrameCount; const unsigned meanFrameTime = titleUpdateTimeDelta / framesDelta; const unsigned fps = framesDelta * 1000 / titleUpdateTimeDelta; std::ostringstream title; title << appName << " :\t\t\t mean frame time: " << meanFrameTime << " ms || fps: " << fps; title.flush(); SDL_WM_SetCaption(title.str().c_str(), NULL); lastTitleUpdateTime = now; lastTitleUpdateFrameCount = frameCount; } lastFrameEnd = SDL_GetTicks(); } /** * @brief - function that converts the linear array vfb_linear * into the 2D array vfb * * This is needed because we pass linear array to the GPU * to process our pixel data and then we convert it to * 2D array in order to display our pixel data with SDL */ void convertDeviceToHostBuffer() { //#pragma omp parallel for schedule(dynamic, 1) for (int i = 0; i < GlobalSettings::RES_Y; ++i) { for (int j = 0; j < GlobalSettings::RES_X; ++j) { vfb[i][j] = vfb_linear[i * GlobalSettings::RES_X + j]; } } } int main(int argc, char** argv) { Menu mainMenu(appName); mainMenu.Destroy(); initColorCache(); if (!initGraphics(GlobalSettings::RES_X, GlobalSettings::RES_Y)) { return -1; } /*SDL_Surface* icon = SDL_LoadBMP("../floor.bmp"); SDL_WM_SetIcon(icon, NULL);*/ printGPUSpecs(); EventHandler eventController; cudaDeviceSetLimit(cudaLimitStackSize, STACK_SIZE); // allocate memory for vfb on the GPU Color* dev_vfb; cudaMalloc((void**)&dev_vfb, sizeof(Color) * GlobalSettings::RES_X * GlobalSettings::RES_Y); // memcpy HostToDevice cudaMemcpy(dev_vfb, vfb_linear, sizeof(Color) * GlobalSettings::RES_X * GlobalSettings::RES_Y, cudaMemcpyHostToDevice); // InitializeScene initScene(); SDL_WarpMouse(GlobalSettings::RES_X / 2, GlobalSettings::RES_Y / 2); if (GlobalSettings::realTime) { while (eventController.isRealTimeRendering) { cameraBeginFrame(); displayFrameCounter(); cudaRenderer(dev_vfb); cudaMemcpy(vfb_linear, dev_vfb, sizeof(Color) * GlobalSettings::RES_X * GlobalSettings::RES_Y, cudaMemcpyDeviceToHost); convertDeviceToHostBuffer(); eventController.handleEvents(); displayVFB(vfb); } } else { // capture the start time cudaEvent_t start, stop; cudaStartTimer(start, stop); // call kernels // - RenderScene cudaRenderer(dev_vfb); // memcpy DeviceToHost cudaMemcpy(vfb_linear, dev_vfb, sizeof(Color) * GlobalSettings::RES_X * GlobalSettings::RES_Y, cudaMemcpyDeviceToHost); // get stop time, and display the timing results cudaStopTimer(start, stop); convertDeviceToHostBuffer(); displayVFB(vfb); eventController.handleUserInput(); } // free memory freeDeviceMemory(); cudaFree(dev_vfb); closeGraphics(); return EXIT_SUCCESS; }
d333e886f732fd93cda87a89aa80a56b130e5da2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "caffe2/operators/elementwise_div_op.h" #include <algorithm> #include <functional> #include <hipcub/hipcub.hpp> #include <hipcub/hipcub.hpp> #include "caffe2/core/context_gpu.h" #include "caffe2/operators/elementwise_ops_utils.h" namespace caffe2 { namespace { template <typename T> using BlockReduce = hipcub::BlockReduce<T, CAFFE_CUDA_NUM_THREADS>; template <typename TGrad, typename TIn, int D> __global__ void ComputeDivAGradientCUDAKernel( const int outer_size, const int inner_size, const SimpleArray<int, D> C_dims, const SimpleArray<int, D> C_strides, const SimpleArray<int, D> B_strides, const SimpleArray<int, D> A_dims, const TGrad* dC, const TIn* B, TGrad* dA) { __shared__ typename BlockReduce<TGrad>::TempStorage temp_storage; for (int i = blockIdx.x; i < outer_size; i += gridDim.x) { TGrad sum = 0; for (int j = threadIdx.x; j < inner_size; j += blockDim.x) { const int A_index = i * inner_size + j; int C_index = 0; int A_index_val = A_index; #pragma unroll for (int d = D - 1; d >= 0; --d) { C_index += (A_index_val % A_dims.data[d]) * C_strides.data[d]; A_index_val /= A_dims.data[d]; } int B_index = 0; int C_index_val = C_index; #pragma unroll for (int d = D - 1; d >= 0; --d) { B_index += B_strides.data[d] == 0 ? 0 : (C_index_val % C_dims.data[d]) * B_strides.data[d]; C_index_val /= C_dims.data[d]; } #if __CUDA_ARCH__ >= 350 sum += __ldg(dC + C_index) / __ldg(B + B_index); #else sum += dC[C_index] / B[B_index]; #endif } sum = BlockReduce<TGrad>(temp_storage).Reduce(sum, hipcub::Sum()); if (threadIdx.x == 0) { dA[i] = sum; } __syncthreads(); } } template <typename TGrad, typename TIn, typename TOut> __global__ void ComputeSimpleDivBGradientCUDAKernel( const int size, const TGrad* dC, const TIn* B, const TOut* C, TGrad* dB) { CUDA_1D_KERNEL_LOOP(i, size) { #if __CUDA_ARCH__ >= 350 dB[i] = -__ldg(dC + i) * __ldg(C + i) / __ldg(B + i); #else dB[i] = -dC[i] * C[i] / B[i]; #endif } } template <typename TGrad, typename TIn, typename TOut, int D> __global__ void ComputeDivBGradientCUDAKernel( const int outer_size, const int inner_size, const SimpleArray<int, D> C_strides, const SimpleArray<int, D> B_dims, const TGrad* dC, const TIn* B, const TOut* C, TGrad* dB) { __shared__ typename BlockReduce<TGrad>::TempStorage temp_storage; for (int i = blockIdx.x; i < outer_size; i += gridDim.x) { TGrad sum = 0; for (int j = threadIdx.x; j < inner_size; j += blockDim.x) { int C_index = 0; int B_index = i * inner_size + j; #pragma unroll for (int d = D - 1; d >= 0; --d) { C_index += (B_index % B_dims.data[d]) * C_strides.data[d]; B_index /= B_dims.data[d]; } #if __CUDA_ARCH__ >= 350 sum += -__ldg(dC + C_index) * __ldg(C + C_index) / __ldg(B + i); #else sum += -dC[C_index] * C[C_index] / B[i]; #endif } sum = BlockReduce<TGrad>(temp_storage).Reduce(sum, hipcub::Sum()); if (threadIdx.x == 0) { dB[i] = sum; } __syncthreads(); } } template <typename TGrad, typename TIn, int D> void ComputeDivAGradientCUDAImpl( const int outer_size, const int inner_size, const int* C_dims, const int* B_dims, const int* A_axes, const TGrad* dC, const TIn* B, TGrad* dA, CUDAContext* context) { SimpleArray<int, D> C_dims_arr; SimpleArray<int, D> C_strides_arr; SimpleArray<int, D> B_strides_arr; SimpleArray<int, D> A_dims_arr; std::copy_n(C_dims, D, C_dims_arr.data); math::utils::ComputeTransposedStrides(D, C_dims, A_axes, C_strides_arr.data); int cur_stride = 1; for (int i = D - 1; i >= 0; --i) { B_strides_arr.data[i] = B_dims[i] == 1 ? 0 : cur_stride; cur_stride *= B_dims[i]; } for (int i = 0; i < D; ++i) { A_dims_arr.data[i] = C_dims[A_axes[i]]; } hipLaunchKernelGGL(( ComputeDivAGradientCUDAKernel<TGrad, TIn, D>) , dim3(::min(outer_size, CAFFE_MAXIMUM_NUM_BLOCKS)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(), outer_size, inner_size, C_dims_arr, C_strides_arr, B_strides_arr, A_dims_arr, dC, B, dA); } template <typename TGrad, typename TIn, typename TOut, int D> void ComputeDivBGradientCUDAImpl( const int outer_size, const int inner_size, const int* C_dims, const int* B_axes, const TGrad* dC, const TIn* B, const TOut* C, TGrad* dB, CUDAContext* context) { SimpleArray<int, D> C_strides_arr; SimpleArray<int, D> B_dims_arr; math::utils::ComputeTransposedStrides(D, C_dims, B_axes, C_strides_arr.data); for (int i = 0; i < D; ++i) { B_dims_arr.data[i] = C_dims[B_axes[i]]; } hipLaunchKernelGGL(( ComputeDivBGradientCUDAKernel<TGrad, TIn, TOut, D>) , dim3(::min(outer_size, CAFFE_MAXIMUM_NUM_BLOCKS)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(), outer_size, inner_size, C_strides_arr, B_dims_arr, dC, B, C, dB); } template <typename TGrad, typename TIn> void ComputeDivAGradientCUDA( const std::vector<int>& C_dims, const std::vector<int>& B_dims, const std::vector<int>& A_axes, const TGrad* dC, const TIn* B, TGrad* dA, CUDAContext* context) { CAFFE_ENFORCE_EQ(C_dims.size(), B_dims.size()); const int ndim = C_dims.size(); std::vector<int> A_transpose_axes(ndim); math::utils::ComputeTransposeAxesForReduceOp( ndim, A_axes.size(), A_axes.data(), A_transpose_axes.data()); const int pivot = ndim - A_axes.size(); int outer_size = 1; for (int i = 0; i < pivot; ++i) { outer_size *= C_dims[A_transpose_axes[i]]; } int inner_size = 1; for (int i = pivot; i < ndim; ++i) { inner_size *= C_dims[A_transpose_axes[i]]; } DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_2( ndim, ComputeDivAGradientCUDAImpl, TGrad, TIn, outer_size, inner_size, C_dims.data(), B_dims.data(), A_transpose_axes.data(), dC, B, dA, context); } template <typename TGrad, typename TIn, typename TOut> void ComputeDivBGradientCUDA( const std::vector<int>& C_dims, const std::vector<int>& B_axes, const TGrad* dC, const TIn* B, const TOut* C, TGrad* dB, CUDAContext* context) { const int ndim = C_dims.size(); std::vector<int> B_transpose_axes(ndim); math::utils::ComputeTransposeAxesForReduceOp( ndim, B_axes.size(), B_axes.data(), B_transpose_axes.data()); const int pivot = ndim - B_axes.size(); int outer_size = 1; for (int i = 0; i < pivot; ++i) { outer_size *= C_dims[B_transpose_axes[i]]; } int inner_size = 1; for (int i = pivot; i < ndim; ++i) { inner_size *= C_dims[B_transpose_axes[i]]; } DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_3( ndim, ComputeDivBGradientCUDAImpl, TGrad, TIn, TOut, outer_size, inner_size, C_dims.data(), B_transpose_axes.data(), dC, B, C, dB, context); } } // namespace template <> template <typename TGrad, typename TIn, typename TOut> bool DivFunctor<CUDAContext>::Backward( const std::vector<int>& A_dims, const std::vector<int>& B_dims, const TGrad* dC, const TIn* /* A */, const TIn* B, const TOut* C, TGrad* dA, TGrad* dB, CUDAContext* context) const { if (A_dims == B_dims) { const int size = std::accumulate( A_dims.cbegin(), A_dims.cend(), 1, std::multiplies<int>()); math::Div(size, dC, B, dA, context); hipLaunchKernelGGL(( ComputeSimpleDivBGradientCUDAKernel<TGrad, TIn, TOut>) , dim3(CAFFE_GET_BLOCKS(size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(), size, dC, B, C, dB); return true; } const int ndim = ::max(A_dims.size(), B_dims.size()); std::vector<int> A_broadcast_dims(ndim); std::vector<int> B_broadcast_dims(ndim); std::vector<int> C_broadcast_dims(ndim); math::utils::ComputeBroadcastBinaryOpDims( A_dims.size(), A_dims.data(), B_dims.size(), B_dims.data(), A_broadcast_dims.data(), B_broadcast_dims.data(), C_broadcast_dims.data()); std::vector<int> A_axes; std::vector<int> B_axes; elementwise_ops_utils::ComputeBinaryBroadcastBackwardAxes( A_dims, B_dims, &A_axes, &B_axes); ComputeDivAGradientCUDA<TGrad, TIn>( C_broadcast_dims, B_broadcast_dims, A_axes, dC, B, dA, context); ComputeDivBGradientCUDA<TGrad, TIn, TOut>( C_broadcast_dims, B_axes, dC, B, C, dB, context); return true; } REGISTER_CUDA_OPERATOR( Div, BinaryElementwiseOp<NumericTypes, CUDAContext, DivFunctor<CUDAContext>>); REGISTER_CUDA_OPERATOR( DivGradient, BinaryElementwiseGradientOp< NumericTypes, CUDAContext, DivFunctor<CUDAContext>>); } // namespace caffe2
d333e886f732fd93cda87a89aa80a56b130e5da2.cu
#include "caffe2/operators/elementwise_div_op.h" #include <algorithm> #include <functional> #include <cub/block/block_reduce.cuh> #include <cub/cub.cuh> #include "caffe2/core/context_gpu.h" #include "caffe2/operators/elementwise_ops_utils.h" namespace caffe2 { namespace { template <typename T> using BlockReduce = cub::BlockReduce<T, CAFFE_CUDA_NUM_THREADS>; template <typename TGrad, typename TIn, int D> __global__ void ComputeDivAGradientCUDAKernel( const int outer_size, const int inner_size, const SimpleArray<int, D> C_dims, const SimpleArray<int, D> C_strides, const SimpleArray<int, D> B_strides, const SimpleArray<int, D> A_dims, const TGrad* dC, const TIn* B, TGrad* dA) { __shared__ typename BlockReduce<TGrad>::TempStorage temp_storage; for (int i = blockIdx.x; i < outer_size; i += gridDim.x) { TGrad sum = 0; for (int j = threadIdx.x; j < inner_size; j += blockDim.x) { const int A_index = i * inner_size + j; int C_index = 0; int A_index_val = A_index; #pragma unroll for (int d = D - 1; d >= 0; --d) { C_index += (A_index_val % A_dims.data[d]) * C_strides.data[d]; A_index_val /= A_dims.data[d]; } int B_index = 0; int C_index_val = C_index; #pragma unroll for (int d = D - 1; d >= 0; --d) { B_index += B_strides.data[d] == 0 ? 0 : (C_index_val % C_dims.data[d]) * B_strides.data[d]; C_index_val /= C_dims.data[d]; } #if __CUDA_ARCH__ >= 350 sum += __ldg(dC + C_index) / __ldg(B + B_index); #else sum += dC[C_index] / B[B_index]; #endif } sum = BlockReduce<TGrad>(temp_storage).Reduce(sum, cub::Sum()); if (threadIdx.x == 0) { dA[i] = sum; } __syncthreads(); } } template <typename TGrad, typename TIn, typename TOut> __global__ void ComputeSimpleDivBGradientCUDAKernel( const int size, const TGrad* dC, const TIn* B, const TOut* C, TGrad* dB) { CUDA_1D_KERNEL_LOOP(i, size) { #if __CUDA_ARCH__ >= 350 dB[i] = -__ldg(dC + i) * __ldg(C + i) / __ldg(B + i); #else dB[i] = -dC[i] * C[i] / B[i]; #endif } } template <typename TGrad, typename TIn, typename TOut, int D> __global__ void ComputeDivBGradientCUDAKernel( const int outer_size, const int inner_size, const SimpleArray<int, D> C_strides, const SimpleArray<int, D> B_dims, const TGrad* dC, const TIn* B, const TOut* C, TGrad* dB) { __shared__ typename BlockReduce<TGrad>::TempStorage temp_storage; for (int i = blockIdx.x; i < outer_size; i += gridDim.x) { TGrad sum = 0; for (int j = threadIdx.x; j < inner_size; j += blockDim.x) { int C_index = 0; int B_index = i * inner_size + j; #pragma unroll for (int d = D - 1; d >= 0; --d) { C_index += (B_index % B_dims.data[d]) * C_strides.data[d]; B_index /= B_dims.data[d]; } #if __CUDA_ARCH__ >= 350 sum += -__ldg(dC + C_index) * __ldg(C + C_index) / __ldg(B + i); #else sum += -dC[C_index] * C[C_index] / B[i]; #endif } sum = BlockReduce<TGrad>(temp_storage).Reduce(sum, cub::Sum()); if (threadIdx.x == 0) { dB[i] = sum; } __syncthreads(); } } template <typename TGrad, typename TIn, int D> void ComputeDivAGradientCUDAImpl( const int outer_size, const int inner_size, const int* C_dims, const int* B_dims, const int* A_axes, const TGrad* dC, const TIn* B, TGrad* dA, CUDAContext* context) { SimpleArray<int, D> C_dims_arr; SimpleArray<int, D> C_strides_arr; SimpleArray<int, D> B_strides_arr; SimpleArray<int, D> A_dims_arr; std::copy_n(C_dims, D, C_dims_arr.data); math::utils::ComputeTransposedStrides(D, C_dims, A_axes, C_strides_arr.data); int cur_stride = 1; for (int i = D - 1; i >= 0; --i) { B_strides_arr.data[i] = B_dims[i] == 1 ? 0 : cur_stride; cur_stride *= B_dims[i]; } for (int i = 0; i < D; ++i) { A_dims_arr.data[i] = C_dims[A_axes[i]]; } ComputeDivAGradientCUDAKernel<TGrad, TIn, D> <<<std::min(outer_size, CAFFE_MAXIMUM_NUM_BLOCKS), CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>( outer_size, inner_size, C_dims_arr, C_strides_arr, B_strides_arr, A_dims_arr, dC, B, dA); } template <typename TGrad, typename TIn, typename TOut, int D> void ComputeDivBGradientCUDAImpl( const int outer_size, const int inner_size, const int* C_dims, const int* B_axes, const TGrad* dC, const TIn* B, const TOut* C, TGrad* dB, CUDAContext* context) { SimpleArray<int, D> C_strides_arr; SimpleArray<int, D> B_dims_arr; math::utils::ComputeTransposedStrides(D, C_dims, B_axes, C_strides_arr.data); for (int i = 0; i < D; ++i) { B_dims_arr.data[i] = C_dims[B_axes[i]]; } ComputeDivBGradientCUDAKernel<TGrad, TIn, TOut, D> <<<std::min(outer_size, CAFFE_MAXIMUM_NUM_BLOCKS), CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>( outer_size, inner_size, C_strides_arr, B_dims_arr, dC, B, C, dB); } template <typename TGrad, typename TIn> void ComputeDivAGradientCUDA( const std::vector<int>& C_dims, const std::vector<int>& B_dims, const std::vector<int>& A_axes, const TGrad* dC, const TIn* B, TGrad* dA, CUDAContext* context) { CAFFE_ENFORCE_EQ(C_dims.size(), B_dims.size()); const int ndim = C_dims.size(); std::vector<int> A_transpose_axes(ndim); math::utils::ComputeTransposeAxesForReduceOp( ndim, A_axes.size(), A_axes.data(), A_transpose_axes.data()); const int pivot = ndim - A_axes.size(); int outer_size = 1; for (int i = 0; i < pivot; ++i) { outer_size *= C_dims[A_transpose_axes[i]]; } int inner_size = 1; for (int i = pivot; i < ndim; ++i) { inner_size *= C_dims[A_transpose_axes[i]]; } DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_2( ndim, ComputeDivAGradientCUDAImpl, TGrad, TIn, outer_size, inner_size, C_dims.data(), B_dims.data(), A_transpose_axes.data(), dC, B, dA, context); } template <typename TGrad, typename TIn, typename TOut> void ComputeDivBGradientCUDA( const std::vector<int>& C_dims, const std::vector<int>& B_axes, const TGrad* dC, const TIn* B, const TOut* C, TGrad* dB, CUDAContext* context) { const int ndim = C_dims.size(); std::vector<int> B_transpose_axes(ndim); math::utils::ComputeTransposeAxesForReduceOp( ndim, B_axes.size(), B_axes.data(), B_transpose_axes.data()); const int pivot = ndim - B_axes.size(); int outer_size = 1; for (int i = 0; i < pivot; ++i) { outer_size *= C_dims[B_transpose_axes[i]]; } int inner_size = 1; for (int i = pivot; i < ndim; ++i) { inner_size *= C_dims[B_transpose_axes[i]]; } DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_3( ndim, ComputeDivBGradientCUDAImpl, TGrad, TIn, TOut, outer_size, inner_size, C_dims.data(), B_transpose_axes.data(), dC, B, C, dB, context); } } // namespace template <> template <typename TGrad, typename TIn, typename TOut> bool DivFunctor<CUDAContext>::Backward( const std::vector<int>& A_dims, const std::vector<int>& B_dims, const TGrad* dC, const TIn* /* A */, const TIn* B, const TOut* C, TGrad* dA, TGrad* dB, CUDAContext* context) const { if (A_dims == B_dims) { const int size = std::accumulate( A_dims.cbegin(), A_dims.cend(), 1, std::multiplies<int>()); math::Div(size, dC, B, dA, context); ComputeSimpleDivBGradientCUDAKernel<TGrad, TIn, TOut> <<<CAFFE_GET_BLOCKS(size), CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>(size, dC, B, C, dB); return true; } const int ndim = std::max(A_dims.size(), B_dims.size()); std::vector<int> A_broadcast_dims(ndim); std::vector<int> B_broadcast_dims(ndim); std::vector<int> C_broadcast_dims(ndim); math::utils::ComputeBroadcastBinaryOpDims( A_dims.size(), A_dims.data(), B_dims.size(), B_dims.data(), A_broadcast_dims.data(), B_broadcast_dims.data(), C_broadcast_dims.data()); std::vector<int> A_axes; std::vector<int> B_axes; elementwise_ops_utils::ComputeBinaryBroadcastBackwardAxes( A_dims, B_dims, &A_axes, &B_axes); ComputeDivAGradientCUDA<TGrad, TIn>( C_broadcast_dims, B_broadcast_dims, A_axes, dC, B, dA, context); ComputeDivBGradientCUDA<TGrad, TIn, TOut>( C_broadcast_dims, B_axes, dC, B, C, dB, context); return true; } REGISTER_CUDA_OPERATOR( Div, BinaryElementwiseOp<NumericTypes, CUDAContext, DivFunctor<CUDAContext>>); REGISTER_CUDA_OPERATOR( DivGradient, BinaryElementwiseGradientOp< NumericTypes, CUDAContext, DivFunctor<CUDAContext>>); } // namespace caffe2
c87e2b2b286853419c174afc6a9c495cabc6dabd.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifdef USE_CUDNN #include <vector> #include "caffe/filler.hpp" #include "caffe/layer.hpp" #include "caffe/util/im2col.hpp" #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" namespace caffe { __global__ void sync_conv_groups() { } template <typename Dtype> void CuDNNConvolutionLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const int* kernel_shape_data = this->kernel_shape_.cpu_data(); const Dtype* weight = this->blobs_[0]->gpu_data(); for (int i = 0; i < bottom.size(); ++i) { const Dtype* bottom_data = bottom[i]->gpu_data(); Dtype* top_data = top[i]->mutable_gpu_data(); size_t workspace_limit_bytes = this->channels_ * sizeof(int); for(int axes_idx = 0; axes_idx < this->num_spatial_axes_; axes_idx ++){ workspace_limit_bytes *= kernel_shape_data[axes_idx]; } workspace_limit_bytes += 1; // Forward through cuDNN in parallel over groups. for (int g = 0; g < this->group_; g++) { cudnnConvolutionFwdAlgo_t algo; // pick the convolution algorithm // TODO(shelhamer) this should be done during reshape // TODO(shelhamer) the choice of automatic or manual algorithm picking // should be exposed in proto CUDNN_CHECK(cudnnGetConvolutionForwardAlgorithm(handle_[g], bottom_descs_[i], filter_desc_, conv_descs_[i], top_descs_[i], CUDNN_CONVOLUTION_FWD_SPECIFY_WORKSPACE_LIMIT, workspace_limit_bytes, // memoryLimitInBytes, &algo)); // get minimum size of the workspace needed for the desired algorithm size_t workspaceSizeInBytes_temp = 0; CUDNN_CHECK(cudnnGetConvolutionForwardWorkspaceSize(handle_[g], bottom_descs_[i], filter_desc_, conv_descs_[i], top_descs_[i], algo, &workspaceSizeInBytes_temp)); if (workspaceSizeInBytes_temp > workspaceSizeInBytes) { workspaceSizeInBytes = workspaceSizeInBytes_temp; // free the existing workspace and allocate a new (larger) one hipFree(this->workspace); hipError_t err = hipMalloc(&(this->workspace), workspaceSizeInBytes); if (err != hipSuccess) { // force zero memory path algo = CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM; workspace = NULL; workspaceSizeInBytes = 0; } } // Filters. CUDNN_CHECK(cudnnConvolutionForward(handle_[g], cudnn::dataType<Dtype>::one, bottom_descs_[i], bottom_data + bottom_offset_ * g, filter_desc_, weight + weight_offset_ * g, conv_descs_[i], algo, workspace, workspaceSizeInBytes, cudnn::dataType<Dtype>::zero, top_descs_[i], top_data + top_offset_ * g)); // Bias. if (this->bias_term_ && (this->num_spatial_axes_ == 2)) { const Dtype* bias_data = this->blobs_[1]->gpu_data(); CUDNN_CHECK(cudnnAddTensor(handle_[g], cudnn::dataType<Dtype>::one, bias_desc_, bias_data + bias_offset_ * g, cudnn::dataType<Dtype>::one, top_descs_[i], top_data + top_offset_ * g)); } } // bias for non 2D-images if (this->bias_term_ && (this->num_spatial_axes_ != 2)) { const Dtype* bias_data = this->blobs_[1]->gpu_data(); for (int n = 0; n < this->num_; ++n) { this->forward_gpu_bias(top_data + n * this->top_dim_, bias_data); } } // Synchronize the work across groups, each of which went into its own // stream, by launching an empty kernel into the default (null) stream. // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( sync_conv_groups), dim3(1), dim3(1), 0, 0, ); } } template <typename Dtype> void CuDNNConvolutionLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { const Dtype* weight = NULL; Dtype* weight_diff = NULL; Dtype* bias_diff = NULL; if (this->param_propagate_down_[0]) { weight = this->blobs_[0]->gpu_data(); weight_diff = this->blobs_[0]->mutable_gpu_diff(); caffe_gpu_set(this->blobs_[0]->count(), Dtype(0), weight_diff); } if (this->bias_term_ && this->param_propagate_down_[1]) { bias_diff = this->blobs_[1]->mutable_gpu_diff(); caffe_gpu_set(this->blobs_[1]->count(), Dtype(0), bias_diff); } for (int i = 0; i < top.size(); ++i) { const Dtype* top_diff = top[i]->gpu_diff(); // Backward through cuDNN in parallel over groups and gradients. for (int g = 0; g < this->group_; g++) { // Gradient w.r.t. bias. if (this->bias_term_ && this->param_propagate_down_[1]) { CUDNN_CHECK(cudnnConvolutionBackwardBias(handle_[0*this->group_ + g], cudnn::dataType<Dtype>::one, top_descs_[i], top_diff + top_offset_ * g, cudnn::dataType<Dtype>::one, bias_desc_, bias_diff + bias_offset_ * g)); } // Gradient w.r.t. weights. if (this->param_propagate_down_[0]) { const Dtype* bottom_data = bottom[i]->gpu_data(); CUDNN_CHECK(cudnnConvolutionBackwardFilter(handle_[1*this->group_ + g], cudnn::dataType<Dtype>::one, bottom_descs_[i], bottom_data + bottom_offset_ * g, top_descs_[i], top_diff + top_offset_ * g, conv_descs_[i], CUDNN_CONVOLUTION_BWD_FILTER_ALGO_0, 0, 0, cudnn::dataType<Dtype>::one, filter_desc_, weight_diff + weight_offset_ * g)); } // Gradient w.r.t. bottom data. if (propagate_down[i]) { if (weight == NULL) { weight = this->blobs_[0]->gpu_data(); } Dtype* bottom_diff = bottom[i]->mutable_gpu_diff(); CUDNN_CHECK(cudnnConvolutionBackwardData(handle_[2*this->group_ + g], cudnn::dataType<Dtype>::one, filter_desc_, weight + weight_offset_ * g, top_descs_[i], top_diff + top_offset_ * g, conv_descs_[i], CUDNN_CONVOLUTION_BWD_DATA_ALGO_0, 0, 0, cudnn::dataType<Dtype>::zero, bottom_descs_[i], bottom_diff + bottom_offset_ * g)); } } // Synchronize the work across groups, each of which went into its own // stream, by launching an empty kernel into the default (null) stream. // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( sync_conv_groups), dim3(1), dim3(1), 0, 0, ); } } INSTANTIATE_LAYER_GPU_FUNCS(CuDNNConvolutionLayer); } // namespace caffe #endif
c87e2b2b286853419c174afc6a9c495cabc6dabd.cu
#ifdef USE_CUDNN #include <vector> #include "caffe/filler.hpp" #include "caffe/layer.hpp" #include "caffe/util/im2col.hpp" #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" namespace caffe { __global__ void sync_conv_groups() { } template <typename Dtype> void CuDNNConvolutionLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const int* kernel_shape_data = this->kernel_shape_.cpu_data(); const Dtype* weight = this->blobs_[0]->gpu_data(); for (int i = 0; i < bottom.size(); ++i) { const Dtype* bottom_data = bottom[i]->gpu_data(); Dtype* top_data = top[i]->mutable_gpu_data(); size_t workspace_limit_bytes = this->channels_ * sizeof(int); for(int axes_idx = 0; axes_idx < this->num_spatial_axes_; axes_idx ++){ workspace_limit_bytes *= kernel_shape_data[axes_idx]; } workspace_limit_bytes += 1; // Forward through cuDNN in parallel over groups. for (int g = 0; g < this->group_; g++) { cudnnConvolutionFwdAlgo_t algo; // pick the convolution algorithm // TODO(shelhamer) this should be done during reshape // TODO(shelhamer) the choice of automatic or manual algorithm picking // should be exposed in proto CUDNN_CHECK(cudnnGetConvolutionForwardAlgorithm(handle_[g], bottom_descs_[i], filter_desc_, conv_descs_[i], top_descs_[i], CUDNN_CONVOLUTION_FWD_SPECIFY_WORKSPACE_LIMIT, workspace_limit_bytes, // memoryLimitInBytes, &algo)); // get minimum size of the workspace needed for the desired algorithm size_t workspaceSizeInBytes_temp = 0; CUDNN_CHECK(cudnnGetConvolutionForwardWorkspaceSize(handle_[g], bottom_descs_[i], filter_desc_, conv_descs_[i], top_descs_[i], algo, &workspaceSizeInBytes_temp)); if (workspaceSizeInBytes_temp > workspaceSizeInBytes) { workspaceSizeInBytes = workspaceSizeInBytes_temp; // free the existing workspace and allocate a new (larger) one cudaFree(this->workspace); cudaError_t err = cudaMalloc(&(this->workspace), workspaceSizeInBytes); if (err != cudaSuccess) { // force zero memory path algo = CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM; workspace = NULL; workspaceSizeInBytes = 0; } } // Filters. CUDNN_CHECK(cudnnConvolutionForward(handle_[g], cudnn::dataType<Dtype>::one, bottom_descs_[i], bottom_data + bottom_offset_ * g, filter_desc_, weight + weight_offset_ * g, conv_descs_[i], algo, workspace, workspaceSizeInBytes, cudnn::dataType<Dtype>::zero, top_descs_[i], top_data + top_offset_ * g)); // Bias. if (this->bias_term_ && (this->num_spatial_axes_ == 2)) { const Dtype* bias_data = this->blobs_[1]->gpu_data(); CUDNN_CHECK(cudnnAddTensor(handle_[g], cudnn::dataType<Dtype>::one, bias_desc_, bias_data + bias_offset_ * g, cudnn::dataType<Dtype>::one, top_descs_[i], top_data + top_offset_ * g)); } } // bias for non 2D-images if (this->bias_term_ && (this->num_spatial_axes_ != 2)) { const Dtype* bias_data = this->blobs_[1]->gpu_data(); for (int n = 0; n < this->num_; ++n) { this->forward_gpu_bias(top_data + n * this->top_dim_, bias_data); } } // Synchronize the work across groups, each of which went into its own // stream, by launching an empty kernel into the default (null) stream. // NOLINT_NEXT_LINE(whitespace/operators) sync_conv_groups<<<1, 1>>>(); } } template <typename Dtype> void CuDNNConvolutionLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { const Dtype* weight = NULL; Dtype* weight_diff = NULL; Dtype* bias_diff = NULL; if (this->param_propagate_down_[0]) { weight = this->blobs_[0]->gpu_data(); weight_diff = this->blobs_[0]->mutable_gpu_diff(); caffe_gpu_set(this->blobs_[0]->count(), Dtype(0), weight_diff); } if (this->bias_term_ && this->param_propagate_down_[1]) { bias_diff = this->blobs_[1]->mutable_gpu_diff(); caffe_gpu_set(this->blobs_[1]->count(), Dtype(0), bias_diff); } for (int i = 0; i < top.size(); ++i) { const Dtype* top_diff = top[i]->gpu_diff(); // Backward through cuDNN in parallel over groups and gradients. for (int g = 0; g < this->group_; g++) { // Gradient w.r.t. bias. if (this->bias_term_ && this->param_propagate_down_[1]) { CUDNN_CHECK(cudnnConvolutionBackwardBias(handle_[0*this->group_ + g], cudnn::dataType<Dtype>::one, top_descs_[i], top_diff + top_offset_ * g, cudnn::dataType<Dtype>::one, bias_desc_, bias_diff + bias_offset_ * g)); } // Gradient w.r.t. weights. if (this->param_propagate_down_[0]) { const Dtype* bottom_data = bottom[i]->gpu_data(); CUDNN_CHECK(cudnnConvolutionBackwardFilter(handle_[1*this->group_ + g], cudnn::dataType<Dtype>::one, bottom_descs_[i], bottom_data + bottom_offset_ * g, top_descs_[i], top_diff + top_offset_ * g, conv_descs_[i], CUDNN_CONVOLUTION_BWD_FILTER_ALGO_0, 0, 0, cudnn::dataType<Dtype>::one, filter_desc_, weight_diff + weight_offset_ * g)); } // Gradient w.r.t. bottom data. if (propagate_down[i]) { if (weight == NULL) { weight = this->blobs_[0]->gpu_data(); } Dtype* bottom_diff = bottom[i]->mutable_gpu_diff(); CUDNN_CHECK(cudnnConvolutionBackwardData(handle_[2*this->group_ + g], cudnn::dataType<Dtype>::one, filter_desc_, weight + weight_offset_ * g, top_descs_[i], top_diff + top_offset_ * g, conv_descs_[i], CUDNN_CONVOLUTION_BWD_DATA_ALGO_0, 0, 0, cudnn::dataType<Dtype>::zero, bottom_descs_[i], bottom_diff + bottom_offset_ * g)); } } // Synchronize the work across groups, each of which went into its own // stream, by launching an empty kernel into the default (null) stream. // NOLINT_NEXT_LINE(whitespace/operators) sync_conv_groups<<<1, 1>>>(); } } INSTANTIATE_LAYER_GPU_FUNCS(CuDNNConvolutionLayer); } // namespace caffe #endif
1ceb56e7ed630e91777bcb65362e8385d02504eb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifdef _TIMER_ #include "hip/hip_runtime_api.h" #endif #include "stdio.h" #include <rocm_smi/rocm_smi.h> #include <assert.h> #define FORMA_MAX(a,b) ( (a) > (b) ? (a) : (b) ) #define max(a,b) FORMA_MAX(a,b) #define FORMA_MIN(a,b) ( (a) < (b) ? (a) : (b) ) #define min(a,b) FORMA_MIN(a,b) #define FORMA_CEIL(a,b) ( (a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1 ) #define mod(x,y) ( (x) & (y-1)) #ifndef FORMA_MAX_BLOCKDIM_0 #define FORMA_MAX_BLOCKDIM_0 1024 #endif #ifndef FORMA_MAX_BLOCKDIM_1 #define FORMA_MAX_BLOCKDIM_1 1024 #endif #ifndef FORMA_MAX_BLOCKDIM_2 #define FORMA_MAX_BLOCKDIM_2 1024 #endif void Check_CUDA_Error(const char* message); /*Texture references */ /*Shared Memory Variable */ extern __shared__ char __FORMA_SHARED_MEM__[]; /* Device code Begin */ __global__ void __kernel___forma_kernel__0__(float * __restrict__ input, int N, int M, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, float * __restrict__ __var_1__){ int __FORMA_SHARED_MEM_OFFSET__ = 0; float * __tilevar_2__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__); __FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_X); float * __tilevar_3__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__); __FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_X); float * __tilevar_4__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__); __FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_X); float * __tilevar_5__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__); __FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_X); float t2=0.0f, t3=0.0f, t4=0.0f, t5=0.0f; float b2=0.0f, b3=0.0f, b4=0.0f, b5=0.0f; int __iter_0__ = (int)(blockIdx.x)*(int)(FORMA_BLOCKDIM_X-8); int __iter_y__ = FORMA_MAX((int)(blockIdx.y)*(int)(FORMA_BLOCKDIM_Y)-4, 0); // Initialize the values int __iter_3__ = FORMA_MAX(__iter_0__,0) + (int)(threadIdx.x) ; if (__iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(M-1))) { __tilevar_2__[__iter_3__-__iter_0__] = input[__iter_3__+M*__iter_y__]; t2 = input[__iter_3__+M*(__iter_y__+1)]; } // Initial computation for (int __iter_1__ = FORMA_MAX(1,__iter_y__+1); __iter_1__ < FORMA_MIN(N-1,__iter_y__+7); __iter_1__++) { if(__iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(M-1))){ b2 = __tilevar_2__[__iter_3__-__iter_0__]; __tilevar_2__[__iter_3__-__iter_0__] = t2; t2 = input[__iter_3__+M*(__iter_1__+1)]; } __syncthreads(); if(__iter_3__ >= FORMA_MAX((__iter_0__+1),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(M-2)) ){ float __temp_2__ = b2; float __temp_5__ = (__tilevar_2__[__iter_3__-1-__iter_0__]); float __temp_6__ = (5 * __temp_2__ + 12 * __temp_5__); float __temp_9__ = (__tilevar_2__[__iter_3__-__iter_0__]); float __temp_10__ = (__temp_6__ + 15 * __temp_9__); float __temp_13__ = (__tilevar_2__[__iter_3__+1-__iter_0__]); float __temp_14__ = (__temp_10__ + 12 * __temp_13__); float __temp_17__ = t2; float __temp_18__ = (__temp_14__ + 5 * __temp_17__); float __temp_19__ = (__temp_18__ / 118); b3 = __tilevar_3__[__iter_3__-__iter_0__]; __tilevar_3__[__iter_3__-__iter_0__] = t3; t3 = __temp_19__; } __syncthreads(); if(__iter_3__ >= FORMA_MAX((__iter_0__+2),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-2)) ){ float __temp_32__ = b3; float __temp_35__ = (__tilevar_3__[__iter_3__+(-1)-__iter_0__]); float __temp_36__ = (5 * __temp_32__ + 12 * __temp_35__); float __temp_39__ = (__tilevar_3__[__iter_3__-__iter_0__]); float __temp_40__ = (__temp_36__ + 15 * __temp_39__); float __temp_43__ = (__tilevar_3__[__iter_3__+1-__iter_0__]); float __temp_44__ = (__temp_40__ + 12 * __temp_43__); float __temp_47__ = t3; float __temp_48__ = (__temp_44__ + 5 * __temp_47__); float __temp_49__ = (__temp_48__ / 118); b4 = __tilevar_4__[__iter_3__-__iter_0__]; __tilevar_4__[__iter_3__-__iter_0__] = t4; t4 = __temp_49__; } __syncthreads (); if(__iter_3__ >= FORMA_MAX((__iter_0__+3),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(M-2)) ){ float __temp_60__ = b4; float __temp_61__ = (__tilevar_4__[__iter_3__+(-1)-__iter_0__]); float __temp_62__ = (5 * __temp_60__ + 12 * __temp_61__); float __temp_63__ = (__tilevar_4__[__iter_3__-__iter_0__]); float __temp_64__ = (__temp_62__ + 15 * __temp_63__); float __temp_65__ = (__tilevar_4__[__iter_3__+1-__iter_0__]); float __temp_66__ = (__temp_64__ + 12 * __temp_65__); float __temp_67__ = t4; float __temp_68__ = (__temp_66__ + 5 * __temp_67__); float __temp_69__ = (__temp_68__ / 118); b5 = __tilevar_5__[__iter_3__-__iter_0__]; __tilevar_5__[__iter_3__-__iter_0__] = t5; t5 = __temp_69__; } } // Rest of the computation for (int __iter_1__ = FORMA_MAX(1,__iter_y__+7); __iter_1__ < FORMA_MIN(N-1,__iter_y__+FORMA_BLOCKDIM_Y+8); __iter_1__++) { if(__iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(M-1))){ b2 = __tilevar_2__[__iter_3__-__iter_0__]; __tilevar_2__[__iter_3__-__iter_0__] = t2; t2 = input[__iter_3__+M*(__iter_1__+1)]; } __syncthreads(); if(__iter_3__ >= FORMA_MAX((__iter_0__+1),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(M-2)) ){ float __temp_2__ = b2; float __temp_5__ = (__tilevar_2__[__iter_3__-1-__iter_0__]); float __temp_6__ = (5 * __temp_2__ + 12 * __temp_5__); float __temp_9__ = (__tilevar_2__[__iter_3__-__iter_0__]); float __temp_10__ = (__temp_6__ + 15 * __temp_9__); float __temp_13__ = (__tilevar_2__[__iter_3__+1-__iter_0__]); float __temp_14__ = (__temp_10__ + 12 * __temp_13__); float __temp_17__ = t2; float __temp_18__ = (__temp_14__ + 5 * __temp_17__); float __temp_19__ = (__temp_18__ / 118); b3 = __tilevar_3__[__iter_3__-__iter_0__]; __tilevar_3__[__iter_3__-__iter_0__] = t3; t3 = __temp_19__; } __syncthreads(); if(__iter_3__ >= FORMA_MAX((__iter_0__+2),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-2)) ){ float __temp_32__ = b3; float __temp_35__ = (__tilevar_3__[__iter_3__+(-1)-__iter_0__]); float __temp_36__ = (5 * __temp_32__ + 12 * __temp_35__); float __temp_39__ = (__tilevar_3__[__iter_3__-__iter_0__]); float __temp_40__ = (__temp_36__ + 15 * __temp_39__); float __temp_43__ = (__tilevar_3__[__iter_3__+1-__iter_0__]); float __temp_44__ = (__temp_40__ + 12 * __temp_43__); float __temp_47__ = t3; float __temp_48__ = (__temp_44__ + 5 * __temp_47__); float __temp_49__ = (__temp_48__ / 118); b4 = __tilevar_4__[__iter_3__-__iter_0__]; __tilevar_4__[__iter_3__-__iter_0__] = t4; t4 = __temp_49__; } __syncthreads (); if(__iter_3__ >= FORMA_MAX((__iter_0__+3),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(M-2)) ){ float __temp_60__ = b4; float __temp_61__ = (__tilevar_4__[__iter_3__+(-1)-__iter_0__]); float __temp_62__ = (5 * __temp_60__ + 12 * __temp_61__); float __temp_63__ = (__tilevar_4__[__iter_3__-__iter_0__]); float __temp_64__ = (__temp_62__ + 15 * __temp_63__); float __temp_65__ = (__tilevar_4__[__iter_3__+1-__iter_0__]); float __temp_66__ = (__temp_64__ + 12 * __temp_65__); float __temp_67__ = t4; float __temp_68__ = (__temp_66__ + 5 * __temp_67__); float __temp_69__ = (__temp_68__ / 118); b5 = __tilevar_5__[__iter_3__-__iter_0__]; __tilevar_5__[__iter_3__-__iter_0__] = t5; t5 = __temp_69__; } __syncthreads (); if(__iter_3__ >= FORMA_MAX((__iter_0__+4),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-5),(M-2)) ){ float __temp_80__ = b5; float __temp_81__ = (__tilevar_5__[__iter_3__+(-1)-__iter_0__]); float __temp_82__ = (5 * __temp_80__ + 12 * __temp_81__); float __temp_83__ = (__tilevar_5__[__iter_3__-__iter_0__]); float __temp_84__ = (__temp_82__ + 15 * __temp_83__); float __temp_85__ = (__tilevar_5__[__iter_3__+1-__iter_0__]); float __temp_86__ = (__temp_84__ + 12 * __temp_85__); float __temp_87__ = t5; float __temp_88__ = (__temp_86__ + 5 * __temp_87__); float __temp_89__ = (__temp_88__ / 118); __var_1__[__iter_3__+(M)*FORMA_MAX(__iter_1__-3,0)] = __temp_89__; } } } int __blockSizeToSMemSize___kernel___forma_kernel__0__(dim3 blockDim){ int FORMA_BLOCKDIM_X = (int)(blockDim.x); int SMemSize = 0; SMemSize += sizeof(float)*(4*FORMA_BLOCKDIM_X); return SMemSize; } /*Device code End */ /* Host Code Begin */ extern "C" void jacobi(float * h_input, int N, int M, float * __var_0__){ /* Host allocation Begin */ float * input; hipMalloc(&input,sizeof(float)*((N)*(M))); Check_CUDA_Error("Allocation Error!! : input\n"); hipPointerAttribute_t ptrAttrib_h_input; hipMemcpyKind memcpy_kind_h_input = hipMemcpyHostToDevice; if (hipPointerGetAttributes(&ptrAttrib_h_input, h_input) == hipSuccess) if (ptrAttrib_h_input.memoryType == hipMemoryTypeDevice) memcpy_kind_h_input = hipMemcpyDeviceToDevice; hipGetLastError(); if( memcpy_kind_h_input != hipMemcpyDeviceToDevice ){ hipMemcpy(input,h_input,sizeof(float)*((N)*(M)), memcpy_kind_h_input); } float * __var_1__; hipMalloc(&__var_1__,sizeof(float)*((N)*(M))); Check_CUDA_Error("Allocation Error!! : __var_1__\n"); /*Host Allocation End */ /* Kernel Launch Begin */ int __FORMA_MAX_SHARED_MEM__; hipDeviceGetAttribute(&__FORMA_MAX_SHARED_MEM__,hipDeviceAttributeMaxSharedMemoryPerBlock,0); #ifdef _TIMER_ hipEvent_t _forma_timer_start_,_forma_timer_stop_; hipEventCreate(&_forma_timer_start_); hipEventCreate(&_forma_timer_stop_); hipEventRecord(_forma_timer_start_,0); #endif int __size_0___kernel___forma_kernel__0__ = M; int __size_1___kernel___forma_kernel__0__ = N; int __block_0___kernel___forma_kernel__0__ = 128; int __block_1___kernel___forma_kernel__0__ = 1; dim3 __blockConfig___kernel___forma_kernel__0__(__block_0___kernel___forma_kernel__0__,__block_1___kernel___forma_kernel__0__); int __SMemSize___kernel___forma_kernel__0__ = 0; __SMemSize___kernel___forma_kernel__0__ = __blockSizeToSMemSize___kernel___forma_kernel__0__(__blockConfig___kernel___forma_kernel__0__); int __grid_0___kernel___forma_kernel__0__ = FORMA_CEIL(__size_0___kernel___forma_kernel__0__,__blockConfig___kernel___forma_kernel__0__.x-8); int __grid_1___kernel___forma_kernel__0__ = FORMA_CEIL(__size_1___kernel___forma_kernel__0__, __size_1___kernel___forma_kernel__0__/64); dim3 __gridConfig___kernel___forma_kernel__0__(__grid_0___kernel___forma_kernel__0__,__grid_1___kernel___forma_kernel__0__); unsigned int power1, power2; rsmi_status_t result; uint32_t device; nvmlEnableState_t mode; result=nvmlInit(); result = nvmlDeviceGetHandleByIndex(0, &device); assert(RSMI_STATUS_SUCCESS == result); result=nvmlDeviceGetPowerManagementMode(device, &mode); printf("enabled = %d\n", mode); result=nvmlDeviceGetPowerUsage(device,&power1); assert(RSMI_STATUS_SUCCESS == result); hipDeviceSynchronize(); for (int x=0; x<1000; x++) { hipLaunchKernelGGL(( __kernel___forma_kernel__0__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(__blockConfig___kernel___forma_kernel__0__), __SMemSize___kernel___forma_kernel__0__, 0, input, N, M, __blockConfig___kernel___forma_kernel__0__.x, __size_1___kernel___forma_kernel__0__/64, __var_1__); Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n"); } hipDeviceSynchronize(); result=nvmlDeviceGetPowerUsage(device,&power2); assert(RSMI_STATUS_SUCCESS == result); power2 -= power1; printf("%u\n", power2); nvmlShutdown(); hipPointerAttribute_t ptrAttrib___var_0__; hipMemcpyKind memcpy_kind___var_0__ = hipMemcpyDeviceToHost; if (hipPointerGetAttributes(&ptrAttrib___var_0__, __var_0__) == hipSuccess) if (ptrAttrib___var_0__.memoryType == hipMemoryTypeDevice) memcpy_kind___var_0__ = hipMemcpyDeviceToDevice; hipGetLastError(); hipMemcpy(__var_0__,__var_1__, sizeof(float)*((N)*(M)), memcpy_kind___var_0__); #ifdef _TIMER_ hipEventRecord(_forma_timer_stop_,0); hipEventSynchronize(_forma_timer_stop_); float elapsedTime; hipEventElapsedTime(&elapsedTime,_forma_timer_start_,_forma_timer_stop_); printf("[FORMA] Computation Time(ms) : %lf\n",elapsedTime); hipEventDestroy(_forma_timer_start_); hipEventDestroy(_forma_timer_stop_); #endif /*Kernel Launch End */ /* Host Free Begin */ hipFree(input); hipFree(__var_1__); } /*Host Free End*/
1ceb56e7ed630e91777bcb65362e8385d02504eb.cu
#include "cuda.h" #ifdef _TIMER_ #include "cuda_profiler_api.h" #endif #include "stdio.h" #include <nvml.h> #include <assert.h> #define FORMA_MAX(a,b) ( (a) > (b) ? (a) : (b) ) #define max(a,b) FORMA_MAX(a,b) #define FORMA_MIN(a,b) ( (a) < (b) ? (a) : (b) ) #define min(a,b) FORMA_MIN(a,b) #define FORMA_CEIL(a,b) ( (a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1 ) #define mod(x,y) ( (x) & (y-1)) #ifndef FORMA_MAX_BLOCKDIM_0 #define FORMA_MAX_BLOCKDIM_0 1024 #endif #ifndef FORMA_MAX_BLOCKDIM_1 #define FORMA_MAX_BLOCKDIM_1 1024 #endif #ifndef FORMA_MAX_BLOCKDIM_2 #define FORMA_MAX_BLOCKDIM_2 1024 #endif void Check_CUDA_Error(const char* message); /*Texture references */ /*Shared Memory Variable */ extern __shared__ char __FORMA_SHARED_MEM__[]; /* Device code Begin */ __global__ void __kernel___forma_kernel__0__(float * __restrict__ input, int N, int M, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, float * __restrict__ __var_1__){ int __FORMA_SHARED_MEM_OFFSET__ = 0; float * __tilevar_2__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__); __FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_X); float * __tilevar_3__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__); __FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_X); float * __tilevar_4__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__); __FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_X); float * __tilevar_5__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__); __FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_X); float t2=0.0f, t3=0.0f, t4=0.0f, t5=0.0f; float b2=0.0f, b3=0.0f, b4=0.0f, b5=0.0f; int __iter_0__ = (int)(blockIdx.x)*(int)(FORMA_BLOCKDIM_X-8); int __iter_y__ = FORMA_MAX((int)(blockIdx.y)*(int)(FORMA_BLOCKDIM_Y)-4, 0); // Initialize the values int __iter_3__ = FORMA_MAX(__iter_0__,0) + (int)(threadIdx.x) ; if (__iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(M-1))) { __tilevar_2__[__iter_3__-__iter_0__] = input[__iter_3__+M*__iter_y__]; t2 = input[__iter_3__+M*(__iter_y__+1)]; } // Initial computation for (int __iter_1__ = FORMA_MAX(1,__iter_y__+1); __iter_1__ < FORMA_MIN(N-1,__iter_y__+7); __iter_1__++) { if(__iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(M-1))){ b2 = __tilevar_2__[__iter_3__-__iter_0__]; __tilevar_2__[__iter_3__-__iter_0__] = t2; t2 = input[__iter_3__+M*(__iter_1__+1)]; } __syncthreads(); if(__iter_3__ >= FORMA_MAX((__iter_0__+1),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(M-2)) ){ float __temp_2__ = b2; float __temp_5__ = (__tilevar_2__[__iter_3__-1-__iter_0__]); float __temp_6__ = (5 * __temp_2__ + 12 * __temp_5__); float __temp_9__ = (__tilevar_2__[__iter_3__-__iter_0__]); float __temp_10__ = (__temp_6__ + 15 * __temp_9__); float __temp_13__ = (__tilevar_2__[__iter_3__+1-__iter_0__]); float __temp_14__ = (__temp_10__ + 12 * __temp_13__); float __temp_17__ = t2; float __temp_18__ = (__temp_14__ + 5 * __temp_17__); float __temp_19__ = (__temp_18__ / 118); b3 = __tilevar_3__[__iter_3__-__iter_0__]; __tilevar_3__[__iter_3__-__iter_0__] = t3; t3 = __temp_19__; } __syncthreads(); if(__iter_3__ >= FORMA_MAX((__iter_0__+2),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-2)) ){ float __temp_32__ = b3; float __temp_35__ = (__tilevar_3__[__iter_3__+(-1)-__iter_0__]); float __temp_36__ = (5 * __temp_32__ + 12 * __temp_35__); float __temp_39__ = (__tilevar_3__[__iter_3__-__iter_0__]); float __temp_40__ = (__temp_36__ + 15 * __temp_39__); float __temp_43__ = (__tilevar_3__[__iter_3__+1-__iter_0__]); float __temp_44__ = (__temp_40__ + 12 * __temp_43__); float __temp_47__ = t3; float __temp_48__ = (__temp_44__ + 5 * __temp_47__); float __temp_49__ = (__temp_48__ / 118); b4 = __tilevar_4__[__iter_3__-__iter_0__]; __tilevar_4__[__iter_3__-__iter_0__] = t4; t4 = __temp_49__; } __syncthreads (); if(__iter_3__ >= FORMA_MAX((__iter_0__+3),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(M-2)) ){ float __temp_60__ = b4; float __temp_61__ = (__tilevar_4__[__iter_3__+(-1)-__iter_0__]); float __temp_62__ = (5 * __temp_60__ + 12 * __temp_61__); float __temp_63__ = (__tilevar_4__[__iter_3__-__iter_0__]); float __temp_64__ = (__temp_62__ + 15 * __temp_63__); float __temp_65__ = (__tilevar_4__[__iter_3__+1-__iter_0__]); float __temp_66__ = (__temp_64__ + 12 * __temp_65__); float __temp_67__ = t4; float __temp_68__ = (__temp_66__ + 5 * __temp_67__); float __temp_69__ = (__temp_68__ / 118); b5 = __tilevar_5__[__iter_3__-__iter_0__]; __tilevar_5__[__iter_3__-__iter_0__] = t5; t5 = __temp_69__; } } // Rest of the computation for (int __iter_1__ = FORMA_MAX(1,__iter_y__+7); __iter_1__ < FORMA_MIN(N-1,__iter_y__+FORMA_BLOCKDIM_Y+8); __iter_1__++) { if(__iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(M-1))){ b2 = __tilevar_2__[__iter_3__-__iter_0__]; __tilevar_2__[__iter_3__-__iter_0__] = t2; t2 = input[__iter_3__+M*(__iter_1__+1)]; } __syncthreads(); if(__iter_3__ >= FORMA_MAX((__iter_0__+1),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(M-2)) ){ float __temp_2__ = b2; float __temp_5__ = (__tilevar_2__[__iter_3__-1-__iter_0__]); float __temp_6__ = (5 * __temp_2__ + 12 * __temp_5__); float __temp_9__ = (__tilevar_2__[__iter_3__-__iter_0__]); float __temp_10__ = (__temp_6__ + 15 * __temp_9__); float __temp_13__ = (__tilevar_2__[__iter_3__+1-__iter_0__]); float __temp_14__ = (__temp_10__ + 12 * __temp_13__); float __temp_17__ = t2; float __temp_18__ = (__temp_14__ + 5 * __temp_17__); float __temp_19__ = (__temp_18__ / 118); b3 = __tilevar_3__[__iter_3__-__iter_0__]; __tilevar_3__[__iter_3__-__iter_0__] = t3; t3 = __temp_19__; } __syncthreads(); if(__iter_3__ >= FORMA_MAX((__iter_0__+2),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-2)) ){ float __temp_32__ = b3; float __temp_35__ = (__tilevar_3__[__iter_3__+(-1)-__iter_0__]); float __temp_36__ = (5 * __temp_32__ + 12 * __temp_35__); float __temp_39__ = (__tilevar_3__[__iter_3__-__iter_0__]); float __temp_40__ = (__temp_36__ + 15 * __temp_39__); float __temp_43__ = (__tilevar_3__[__iter_3__+1-__iter_0__]); float __temp_44__ = (__temp_40__ + 12 * __temp_43__); float __temp_47__ = t3; float __temp_48__ = (__temp_44__ + 5 * __temp_47__); float __temp_49__ = (__temp_48__ / 118); b4 = __tilevar_4__[__iter_3__-__iter_0__]; __tilevar_4__[__iter_3__-__iter_0__] = t4; t4 = __temp_49__; } __syncthreads (); if(__iter_3__ >= FORMA_MAX((__iter_0__+3),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(M-2)) ){ float __temp_60__ = b4; float __temp_61__ = (__tilevar_4__[__iter_3__+(-1)-__iter_0__]); float __temp_62__ = (5 * __temp_60__ + 12 * __temp_61__); float __temp_63__ = (__tilevar_4__[__iter_3__-__iter_0__]); float __temp_64__ = (__temp_62__ + 15 * __temp_63__); float __temp_65__ = (__tilevar_4__[__iter_3__+1-__iter_0__]); float __temp_66__ = (__temp_64__ + 12 * __temp_65__); float __temp_67__ = t4; float __temp_68__ = (__temp_66__ + 5 * __temp_67__); float __temp_69__ = (__temp_68__ / 118); b5 = __tilevar_5__[__iter_3__-__iter_0__]; __tilevar_5__[__iter_3__-__iter_0__] = t5; t5 = __temp_69__; } __syncthreads (); if(__iter_3__ >= FORMA_MAX((__iter_0__+4),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-5),(M-2)) ){ float __temp_80__ = b5; float __temp_81__ = (__tilevar_5__[__iter_3__+(-1)-__iter_0__]); float __temp_82__ = (5 * __temp_80__ + 12 * __temp_81__); float __temp_83__ = (__tilevar_5__[__iter_3__-__iter_0__]); float __temp_84__ = (__temp_82__ + 15 * __temp_83__); float __temp_85__ = (__tilevar_5__[__iter_3__+1-__iter_0__]); float __temp_86__ = (__temp_84__ + 12 * __temp_85__); float __temp_87__ = t5; float __temp_88__ = (__temp_86__ + 5 * __temp_87__); float __temp_89__ = (__temp_88__ / 118); __var_1__[__iter_3__+(M)*FORMA_MAX(__iter_1__-3,0)] = __temp_89__; } } } int __blockSizeToSMemSize___kernel___forma_kernel__0__(dim3 blockDim){ int FORMA_BLOCKDIM_X = (int)(blockDim.x); int SMemSize = 0; SMemSize += sizeof(float)*(4*FORMA_BLOCKDIM_X); return SMemSize; } /*Device code End */ /* Host Code Begin */ extern "C" void jacobi(float * h_input, int N, int M, float * __var_0__){ /* Host allocation Begin */ float * input; cudaMalloc(&input,sizeof(float)*((N)*(M))); Check_CUDA_Error("Allocation Error!! : input\n"); cudaPointerAttributes ptrAttrib_h_input; cudaMemcpyKind memcpy_kind_h_input = cudaMemcpyHostToDevice; if (cudaPointerGetAttributes(&ptrAttrib_h_input, h_input) == cudaSuccess) if (ptrAttrib_h_input.memoryType == cudaMemoryTypeDevice) memcpy_kind_h_input = cudaMemcpyDeviceToDevice; cudaGetLastError(); if( memcpy_kind_h_input != cudaMemcpyDeviceToDevice ){ cudaMemcpy(input,h_input,sizeof(float)*((N)*(M)), memcpy_kind_h_input); } float * __var_1__; cudaMalloc(&__var_1__,sizeof(float)*((N)*(M))); Check_CUDA_Error("Allocation Error!! : __var_1__\n"); /*Host Allocation End */ /* Kernel Launch Begin */ int __FORMA_MAX_SHARED_MEM__; cudaDeviceGetAttribute(&__FORMA_MAX_SHARED_MEM__,cudaDevAttrMaxSharedMemoryPerBlock,0); #ifdef _TIMER_ cudaEvent_t _forma_timer_start_,_forma_timer_stop_; cudaEventCreate(&_forma_timer_start_); cudaEventCreate(&_forma_timer_stop_); cudaEventRecord(_forma_timer_start_,0); #endif int __size_0___kernel___forma_kernel__0__ = M; int __size_1___kernel___forma_kernel__0__ = N; int __block_0___kernel___forma_kernel__0__ = 128; int __block_1___kernel___forma_kernel__0__ = 1; dim3 __blockConfig___kernel___forma_kernel__0__(__block_0___kernel___forma_kernel__0__,__block_1___kernel___forma_kernel__0__); int __SMemSize___kernel___forma_kernel__0__ = 0; __SMemSize___kernel___forma_kernel__0__ = __blockSizeToSMemSize___kernel___forma_kernel__0__(__blockConfig___kernel___forma_kernel__0__); int __grid_0___kernel___forma_kernel__0__ = FORMA_CEIL(__size_0___kernel___forma_kernel__0__,__blockConfig___kernel___forma_kernel__0__.x-8); int __grid_1___kernel___forma_kernel__0__ = FORMA_CEIL(__size_1___kernel___forma_kernel__0__, __size_1___kernel___forma_kernel__0__/64); dim3 __gridConfig___kernel___forma_kernel__0__(__grid_0___kernel___forma_kernel__0__,__grid_1___kernel___forma_kernel__0__); unsigned int power1, power2; nvmlReturn_t result; nvmlDevice_t device; nvmlEnableState_t mode; result=nvmlInit(); result = nvmlDeviceGetHandleByIndex(0, &device); assert(NVML_SUCCESS == result); result=nvmlDeviceGetPowerManagementMode(device, &mode); printf("enabled = %d\n", mode); result=nvmlDeviceGetPowerUsage(device,&power1); assert(NVML_SUCCESS == result); cudaDeviceSynchronize(); for (int x=0; x<1000; x++) { __kernel___forma_kernel__0__<<<__gridConfig___kernel___forma_kernel__0__, __blockConfig___kernel___forma_kernel__0__, __SMemSize___kernel___forma_kernel__0__>>> (input, N, M, __blockConfig___kernel___forma_kernel__0__.x, __size_1___kernel___forma_kernel__0__/64, __var_1__); Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n"); } cudaDeviceSynchronize(); result=nvmlDeviceGetPowerUsage(device,&power2); assert(NVML_SUCCESS == result); power2 -= power1; printf("%u\n", power2); nvmlShutdown(); cudaPointerAttributes ptrAttrib___var_0__; cudaMemcpyKind memcpy_kind___var_0__ = cudaMemcpyDeviceToHost; if (cudaPointerGetAttributes(&ptrAttrib___var_0__, __var_0__) == cudaSuccess) if (ptrAttrib___var_0__.memoryType == cudaMemoryTypeDevice) memcpy_kind___var_0__ = cudaMemcpyDeviceToDevice; cudaGetLastError(); cudaMemcpy(__var_0__,__var_1__, sizeof(float)*((N)*(M)), memcpy_kind___var_0__); #ifdef _TIMER_ cudaEventRecord(_forma_timer_stop_,0); cudaEventSynchronize(_forma_timer_stop_); float elapsedTime; cudaEventElapsedTime(&elapsedTime,_forma_timer_start_,_forma_timer_stop_); printf("[FORMA] Computation Time(ms) : %lf\n",elapsedTime); cudaEventDestroy(_forma_timer_start_); cudaEventDestroy(_forma_timer_stop_); #endif /*Kernel Launch End */ /* Host Free Begin */ cudaFree(input); cudaFree(__var_1__); } /*Host Free End*/
60d65245cbb6ade0e9c6454f6b580f6f9519a3b9.hip
// !!! This is a file automatically generated by hipify!!! // A C++ program for Dijkstra's shortest path algorithm. #include <bits/stdc++.h> #include <chrono> #include <fstream> #include <omp.h> // For the CUDA runtime routines (prefixed with "cuda_") #include <hip/hip_runtime.h> using namespace std; using namespace std::chrono; #define INF 2000000000 const string fin_str = "../matlab/gr_optimal_control_3rd_order.csv"; typedef pair<int, int> iPair; __global__ void bf(int n, int u, int const* d_weights, int* d_dist, bool* d_has_change, int* came_from) { int v = blockIdx.x * blockDim.x + threadIdx.x; if(v < n) { d_has_change[v] = false; int weight = d_weights[u * n + v]; if (weight < INF) { if (d_dist[v] > d_dist[u] + weight) { d_dist[v] = d_dist[u] + weight; d_has_change[v] = true; came_from[v] = u; } } } } //translate 2-dimension coordinate to 1-dimension int convert_dimension_2D_1D(int x, int y, int n) { return x * n + y; } // The main function that finds shortest distances void Dijkstra(int src, int goal, int n, int h_weights[]) { dim3 threadsPerBlock = 256; dim3 blocksPerGrid = ((n + threadsPerBlock.x - 1) / threadsPerBlock.x); // host int *h_dist = (int *)calloc(sizeof(int), n); int *h_came_from = (int *)calloc(sizeof(int), n); bool *h_has_change = (bool *)calloc(sizeof(bool), n); for (int i=0; i<n; i++) { h_dist[i] = INF; h_came_from[i] = INF; } h_dist[src] = 0; h_came_from[src] = src; // device int* d_weights; int* d_dist; int* d_came_from; bool* d_has_change; hipMalloc(&d_weights, n * n * sizeof(int)); hipMalloc(&d_dist, n * sizeof(int)); hipMalloc(&d_came_from, n * sizeof(int)); hipMalloc(&d_has_change, n * sizeof(bool)); // copy host to device hipMemcpy(d_weights, h_weights, n * n * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_dist, h_dist, n * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_came_from, h_came_from, n * sizeof(int), hipMemcpyHostToDevice); priority_queue< iPair, vector <iPair> , greater<iPair> > pq; pq.push(make_pair(0, src)); int counter = 0; // main loop auto start = high_resolution_clock::now(); while(!pq.empty()) { int u = pq.top().second; pq.pop(); if(u == goal) { break; } counter++; // invoke kernel hipLaunchKernelGGL(( bf) , dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, n, u, d_weights, d_dist, d_has_change, d_came_from); hipMemcpy(h_has_change, d_has_change, n * sizeof(bool), hipMemcpyDeviceToHost); hipMemcpy(h_dist, d_dist, sizeof(int) * n, hipMemcpyDeviceToHost); for (int i = 0; i < n; i++) { if (h_has_change[i]) { pq.push(make_pair(h_dist[i], i)); } } } auto stop = high_resolution_clock::now(); cout << "counter: " << counter << "\n"; hipMemcpy(h_came_from, d_came_from, n * sizeof(int), hipMemcpyDeviceToHost); hipFree(d_weights); hipFree(d_dist); hipFree(d_came_from); hipFree(d_has_change); // Print shortest distances stored in dist[] ofstream myfile ("dijkstra.txt"); if (myfile.is_open()) { for (int i = 0; i < n; ++i) myfile << i << "\t\t" << h_dist[i] <<"\n"; myfile.close(); } else cout << "Unable to open file"; ofstream myfile_path ("dijkstra_path.txt"); if (myfile_path.is_open()) { vector<int> path; int current = goal; while(current != src) { path.push_back(current); current = h_came_from[current]; } path.push_back(src); reverse(path.begin(), path.end()); for (vector<int>::iterator i = path.begin(); i < path.end(); ++i) { myfile_path << *i << "\t\t"; } myfile_path.close(); int total = 0; for (vector<int>::iterator i = path.begin(); i < path.end()-1;) { int u = *i; int v = *(++i); int weight = h_weights[convert_dimension_2D_1D(u, v, n)]; total += weight; cout << "u: " << u << ", v: " << v << ", weight: " << weight << "\n"; } cout << "total: " << total <<"\n"; } else cout << "Unable to open file"; auto duration = duration_cast<milliseconds>(stop - start); cout << "duration :" << duration.count() << endl; } void create_weights(int weights[], int n) { for (int i = 0; i < n * n; i++) { weights[i] = INF; } fstream fin; fin.open(fin_str, ios::in); vector<int> row; string line, word; getline(fin,line); while (!fin.eof()) { row.clear(); getline(fin, line); stringstream s(line); while (getline(s, word, ',')) { row.push_back(stoi(word)); } weights[convert_dimension_2D_1D(row[0]-1, row[1]-1, n)] = row[2]; weights[convert_dimension_2D_1D(row[1]-1, row[0]-1, n)] = row[2]; } fin.close(); } // Driver program to test above functions int main() { int N = 16456; int* mat = (int *)malloc(N * N * sizeof(int)); create_weights(mat, N); // for (int i=0; i< N*N; i++) // { // cout << mat[i] << " "; // } Dijkstra(0, 2324, N, mat); return 0; }
60d65245cbb6ade0e9c6454f6b580f6f9519a3b9.cu
// A C++ program for Dijkstra's shortest path algorithm. #include <bits/stdc++.h> #include <chrono> #include <fstream> #include <omp.h> // For the CUDA runtime routines (prefixed with "cuda_") #include <cuda_runtime.h> using namespace std; using namespace std::chrono; #define INF 2000000000 const string fin_str = "../matlab/gr_optimal_control_3rd_order.csv"; typedef pair<int, int> iPair; __global__ void bf(int n, int u, int const* d_weights, int* d_dist, bool* d_has_change, int* came_from) { int v = blockIdx.x * blockDim.x + threadIdx.x; if(v < n) { d_has_change[v] = false; int weight = d_weights[u * n + v]; if (weight < INF) { if (d_dist[v] > d_dist[u] + weight) { d_dist[v] = d_dist[u] + weight; d_has_change[v] = true; came_from[v] = u; } } } } //translate 2-dimension coordinate to 1-dimension int convert_dimension_2D_1D(int x, int y, int n) { return x * n + y; } // The main function that finds shortest distances void Dijkstra(int src, int goal, int n, int h_weights[]) { dim3 threadsPerBlock = 256; dim3 blocksPerGrid = ((n + threadsPerBlock.x - 1) / threadsPerBlock.x); // host int *h_dist = (int *)calloc(sizeof(int), n); int *h_came_from = (int *)calloc(sizeof(int), n); bool *h_has_change = (bool *)calloc(sizeof(bool), n); for (int i=0; i<n; i++) { h_dist[i] = INF; h_came_from[i] = INF; } h_dist[src] = 0; h_came_from[src] = src; // device int* d_weights; int* d_dist; int* d_came_from; bool* d_has_change; cudaMalloc(&d_weights, n * n * sizeof(int)); cudaMalloc(&d_dist, n * sizeof(int)); cudaMalloc(&d_came_from, n * sizeof(int)); cudaMalloc(&d_has_change, n * sizeof(bool)); // copy host to device cudaMemcpy(d_weights, h_weights, n * n * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_dist, h_dist, n * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_came_from, h_came_from, n * sizeof(int), cudaMemcpyHostToDevice); priority_queue< iPair, vector <iPair> , greater<iPair> > pq; pq.push(make_pair(0, src)); int counter = 0; // main loop auto start = high_resolution_clock::now(); while(!pq.empty()) { int u = pq.top().second; pq.pop(); if(u == goal) { break; } counter++; // invoke kernel bf <<<blocksPerGrid, threadsPerBlock>>>(n, u, d_weights, d_dist, d_has_change, d_came_from); cudaMemcpy(h_has_change, d_has_change, n * sizeof(bool), cudaMemcpyDeviceToHost); cudaMemcpy(h_dist, d_dist, sizeof(int) * n, cudaMemcpyDeviceToHost); for (int i = 0; i < n; i++) { if (h_has_change[i]) { pq.push(make_pair(h_dist[i], i)); } } } auto stop = high_resolution_clock::now(); cout << "counter: " << counter << "\n"; cudaMemcpy(h_came_from, d_came_from, n * sizeof(int), cudaMemcpyDeviceToHost); cudaFree(d_weights); cudaFree(d_dist); cudaFree(d_came_from); cudaFree(d_has_change); // Print shortest distances stored in dist[] ofstream myfile ("dijkstra.txt"); if (myfile.is_open()) { for (int i = 0; i < n; ++i) myfile << i << "\t\t" << h_dist[i] <<"\n"; myfile.close(); } else cout << "Unable to open file"; ofstream myfile_path ("dijkstra_path.txt"); if (myfile_path.is_open()) { vector<int> path; int current = goal; while(current != src) { path.push_back(current); current = h_came_from[current]; } path.push_back(src); reverse(path.begin(), path.end()); for (vector<int>::iterator i = path.begin(); i < path.end(); ++i) { myfile_path << *i << "\t\t"; } myfile_path.close(); int total = 0; for (vector<int>::iterator i = path.begin(); i < path.end()-1;) { int u = *i; int v = *(++i); int weight = h_weights[convert_dimension_2D_1D(u, v, n)]; total += weight; cout << "u: " << u << ", v: " << v << ", weight: " << weight << "\n"; } cout << "total: " << total <<"\n"; } else cout << "Unable to open file"; auto duration = duration_cast<milliseconds>(stop - start); cout << "duration :" << duration.count() << endl; } void create_weights(int weights[], int n) { for (int i = 0; i < n * n; i++) { weights[i] = INF; } fstream fin; fin.open(fin_str, ios::in); vector<int> row; string line, word; getline(fin,line); while (!fin.eof()) { row.clear(); getline(fin, line); stringstream s(line); while (getline(s, word, ',')) { row.push_back(stoi(word)); } weights[convert_dimension_2D_1D(row[0]-1, row[1]-1, n)] = row[2]; weights[convert_dimension_2D_1D(row[1]-1, row[0]-1, n)] = row[2]; } fin.close(); } // Driver program to test above functions int main() { int N = 16456; int* mat = (int *)malloc(N * N * sizeof(int)); create_weights(mat, N); // for (int i=0; i< N*N; i++) // { // cout << mat[i] << " "; // } Dijkstra(0, 2324, N, mat); return 0; }
98a327a1adb372be8352e161302c9007c68e572d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void ReduceWKernelSimple(const uint8_t *src, float *dst, int width, int height) { int y = blockIdx.x * blockDim.x + threadIdx.x; int x = blockIdx.y * 128; if (y < height) { float sum = 0; for (int xend = min(x + 128, width); x < xend; ++x) { sum += src[x + y * width]; } atomicAdd(&dst[y], sum); } }
98a327a1adb372be8352e161302c9007c68e572d.cu
#include "includes.h" __global__ void ReduceWKernelSimple(const uint8_t *src, float *dst, int width, int height) { int y = blockIdx.x * blockDim.x + threadIdx.x; int x = blockIdx.y * 128; if (y < height) { float sum = 0; for (int xend = min(x + 128, width); x < xend; ++x) { sum += src[x + y * width]; } atomicAdd(&dst[y], sum); } }
3756b79fe572baaa6cd09f05e5e6a8a75294fae9.hip
// !!! This is a file automatically generated by hipify!!! /** CUDA, rendezvousParalel.cu Purpose: Calcular as variveis fsicas de um veculo espacial capazes de tornar o Rendezvous possvel @author Cssio Santos @version 1.0 31/07/17 */ /** Para compilaao: ex.: nvcc rendezvousParalel.cu -o rendezvous Para execuo: ex.: ./rendezvous v-0.005-0.006.dat v-0.007-0.008.dat */ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <hip/hip_runtime.h> #include <device_launch_parameters.h> #include "cuPrintf.hip" //Declarao de constantes utilizadas para o calculo dos coeficientes #define MI 398600.4418 #define EARTH_RADIUS 6378.0 #define N_PRECISION 10 using namespace std; /** * Calcular coeficiente A do Rendezvous * @param N Nmero de iteraes no somatrio interno * @param x0 valor no eixo X da posio relativa inicial entre o satlite e o detrito * @param y0 valor no eixo Y da posio relativa inicial entre o satlite e o detrito * @param z0 valor no eixo z da posio relativa inicial entre o satlite e o detrito * @param xl0 valor no eixo x da velocidade relativa inicial entre o satlite e o detrito * @param yl0 valor no eixo y da velocidade relativa inicial entre o satlite e o detrito * @param zl0 valor no eixo z da velocidade relativa inicial entre o satlite e o detrito * @param Y Gama - Varivel fsica Gama a ser calculado o valor de A * @param X Chi - Varivel fsica Chi a ser calculado o valor de A * @param w * @param a - 0 - Potncia utilizada dentro do somtorio para casos em que o indice do somtorio utilizado elevado a potncias diferentes * a = 1 -> n^1 * a = 2 -> n^2 * @param vex Varivel fsica da Velocidade de exausto no eixo X a ser calculado o valor de A * @param vey Varivel fsica da Velocidade de exausto no eixo Y a ser calculado o valor de A * @param vez Varivel fsica da Velocidade de exausto no eixo Z a ser calculado o valor de A * @returns O coeficinte A dado os valores iniciais e as variveis fsicas a serem testadas */ double __device__ brute_A (int N, double x0, double y0, double z0, double xl0, double yl0, double zl0, double Y, double X, double w, int a, double vex, double vey, double vez) { double result = 0; int n; double aux; double sum = 0; result += (2*xl0)/w - 3*y0 +((2*vex)/w)*log((X+1)/X); //Calculo do somatorio for (n = 1; n <= N; n++) { aux = (1/(n*powf(X, n)))*(1/(1+powf(((n*Y)/w),2)))*(((2*vex)/w)+((n*Y*vey)/(w*w))); if (n%2 == 0) { sum -= aux; } else { sum += aux; } } result-= sum; return result; } /** * Calcular coeficiente B do Rendezvous * @param N Nmero de iteraes no somatrio interno * @param x0 valor no eixo X da posio relativa inicial entre o satlite e o detrito * @param y0 valor no eixo Y da posio relativa inicial entre o satlite e o detrito * @param z0 valor no eixo z da posio relativa inicial entre o satlite e o detrito * @param xl0 valor no eixo x da velocidade relativa inicial entre o satlite e o detrito * @param yl0 valor no eixo y da velocidade relativa inicial entre o satlite e o detrito * @param zl0 valor no eixo z da velocidade relativa inicial entre o satlite e o detrito * @param Y Gama - Varivel fsica Gama a ser calculado o valor de B * @param X Chi - Varivel fsica Chi a ser calculado o valor de B * @param w * @param a - 0 - Potncia utilizada dentro do somtorio para casos em que o indice do somtorio utilizado elevado a potncias diferentes * a = 1 -> n^1 * a = 2 -> n^2 * @param vex Varivel fsica da Velocidade de exausto no eixo X a ser calculado o valor de B * @param vey Varivel fsica da Velocidade de exausto no eixo Y a ser calculado o valor de B * @param vez Varivel fsica da Velocidade de exausto no eixo Z a ser calculado o valor de B * @returns O coeficinte B dado os valores iniciais e as variveis fsicas a serem testadas */ double __device__ brute_B (int N, double x0, double y0, double z0, double xl0, double yl0, double zl0, double Y, double X, double w, int a, double vex, double vey, double vez){ double result = 0; double sum = 0; int n; double aux; result += yl0/w + (vey/w)*log((X+1)/X); //Calculo do somatorio for (n = 1; n <= N; n++) { aux = (1/(n*powf(X,n)))*(1/(1+powf(((n*Y)/w),2)))*(vey/w + (2*n*Y*vex)/(w*w)); if (n%2 == 0) {//iterao Par aux = -aux; } sum += aux; } result += sum; return result; } /** * Calcular o somatrio dos coeficientes Cn do Rendezvous * @param N Nmero de iteraes no somatrio interno * @param x0 valor no eixo X da posio relativa inicial entre o satlite e o detrito * @param y0 valor no eixo Y da posio relativa inicial entre o satlite e o detrito * @param z0 valor no eixo z da posio relativa inicial entre o satlite e o detrito * @param xl0 valor no eixo x da velocidade relativa inicial entre o satlite e o detrito * @param yl0 valor no eixo y da velocidade relativa inicial entre o satlite e o detrito * @param zl0 valor no eixo z da velocidade relativa inicial entre o satlite e o detrito * @param Y Gama - Varivel fsica Gama a ser calculado o valor de C * @param X Chi - Varivel fsica Chi a ser calculado o valor de C * @param w * @param a - 0 - Potncia utilizada dentro do somtorio para casos em que o indice do somtorio utilizado elevado a potncias diferentes * a = 1 -> n^1 * a = 2 -> n^2 * @param vex Varivel fsica da Velocidade de exausto no eixo X a ser calculado o valor de C * @param vey Varivel fsica da Velocidade de exausto no eixo Y a ser calculado o valor de C * @param vez Varivel fsica da Velocidade de exausto no eixo Z a ser calculado o valor de C * @returns O somatrio dos coeficintes Cn dado os valores iniciais e as variveis fsicas a serem testadas */ double __device__ brute_C (int N, double x0, double y0, double z0, double xl0, double yl0, double zl0, double Y, double X, double w, int a, double vex, double vey, double vez){ double result = 0; int n; double aux; //Calculo do somatorio Cn for (n = 1; n <= N; n++) { aux = powf(n,a)*vex/(n*powf(X,n)*(1+powf((n*Y/w),2))) + n*Y*powf(n,a)*vey/(n*powf(X,n)*powf(w,2)*(1+powf((n*Y/w),2))); if (n%2 == 0) { aux = -aux; } result +=aux; } return result; } /** * Calcular coeficiente D do Rendezvous * @param N Nmero de iteraes no somatrio interno * @param x0 valor no eixo X da posio relativa inicial entre o satlite e o detrito * @param y0 valor no eixo Y da posio relativa inicial entre o satlite e o detrito * @param z0 valor no eixo z da posio relativa inicial entre o satlite e o detrito * @param xl0 valor no eixo x da velocidade relativa inicial entre o satlite e o detrito * @param yl0 valor no eixo y da velocidade relativa inicial entre o satlite e o detrito * @param zl0 valor no eixo z da velocidade relativa inicial entre o satlite e o detrito * @param Y Gama - Varivel fsica Gama a ser calculado o valor de D * @param X Chi - Varivel fsica Chi a ser calculado o valor de D * @param w * @param a - 0 - Potncia utilizada dentro do somtorio para casos em que o indice do somtorio utilizado elevado a potncias diferentes * a = 1 -> n^1 * a = 2 -> n^2 * @param vex Varivel fsica da Velocidade de exausto no eixo X a ser calculado o valor de D * @param vey Varivel fsica da Velocidade de exausto no eixo Y a ser calculado o valor de D * @param vez Varivel fsica da Velocidade de exausto no eixo Z a ser calculado o valor de D * @returns O coeficinte D dado os valores iniciais e as variveis fsicas a serem testadas */ double __device__ brute_D (int N, double x0, double y0, double z0, double xl0, double yl0, double zl0, double Y, double X, double w, int a, double vex, double vey, double vez) { double result = 0; result -= (2*vex* log((X+1)/X))/w; result += 4*y0 - 2*xl0/w; return result; } /** * Calcular coeficiente E do Rendezvous * @param N Nmero de iteraes no somatrio interno * @param x0 valor no eixo X da posio relativa inicial entre o satlite e o detrito * @param y0 valor no eixo Y da posio relativa inicial entre o satlite e o detrito * @param z0 valor no eixo z da posio relativa inicial entre o satlite e o detrito * @param xl0 valor no eixo x da velocidade relativa inicial entre o satlite e o detrito * @param yl0 valor no eixo y da velocidade relativa inicial entre o satlite e o detrito * @param zl0 valor no eixo z da velocidade relativa inicial entre o satlite e o detrito * @param Y Gama - Varivel fsica Gama a ser calculado o valor de E * @param X Chi - Varivel fsica Chi a ser calculado o valor de E * @param w * @param a - 0 - Potncia utilizada dentro do somtorio para casos em que o indice do somtorio utilizado elevado a potncias diferentes * a = 1 -> n^1 * a = 2 -> n^2 * @param vex Varivel fsica da Velocidade de exausto no eixo X a ser calculado o valor de E * @param vey Varivel fsica da Velocidade de exausto no eixo Y a ser calculado o valor de E * @param vez Varivel fsica da Velocidade de exausto no eixo Z a ser calculado o valor de E * @returns O coeficinte E dado os valores iniciais e as variveis fsicas a serem testadas */ double __device__ brute_E (int N, double x0, double y0, double z0, double xl0, double yl0, double zl0, double Y, double X, double w, int a, double vex, double vey, double vez) { double result = 0; result -= 3*vex*log((X+1)/X); result += 6*w*y0 - 3*xl0; return result; } /** * Calcular o somatrio dos coeficientes Fn do Rendezvous * @param N Nmero de iteraes no somatrio interno * @param x0 valor no eixo X da posio relativa inicial entre o satlite e o detrito * @param y0 valor no eixo Y da posio relativa inicial entre o satlite e o detrito * @param z0 valor no eixo z da posio relativa inicial entre o satlite e o detrito * @param xl0 valor no eixo x da velocidade relativa inicial entre o satlite e o detrito * @param yl0 valor no eixo y da velocidade relativa inicial entre o satlite e o detrito * @param zl0 valor no eixo z da velocidade relativa inicial entre o satlite e o detrito * @param Y Gama - Varivel fsica Gama a ser calculado o valor de Fn * @param X Chi - Varivel fsica Chi a ser calculado o valor de Fn * @param w * @param a - 0 - Potncia utilizada dentro do somtorio para casos em que o indice do somtorio utilizado elevado a potncias diferentes * a = 1 -> n^1 * a = 2 -> n^2 * @param vex Varivel fsica da Velocidade de exausto no eixo X a ser calculado o valor de Fn * @param vey Varivel fsica da Velocidade de exausto no eixo Y a ser calculado o valor de Fn * @param vez Varivel fsica da Velocidade de exausto no eixo Z a ser calculado o valor de Fn * @returns O somatrio coeficinte Fn dado os valores iniciais e as variveis fsicas a serem testadas */ double __device__ brute_F(int N, double x0, double y0, double z0, double xl0, double yl0, double zl0, double Y, double X, double w, int a, double vex, double vey, double vez) { double result = 0; double sum = 0; int n; double aux; //Calculo do somatorio for (n = 1; n <= N; n++) { aux = (1/(n*powf(X,n)))*((2*vey)/w + (4*vex)/(n*Y))/((1+powf((n*Y)/w,2))); if (n%2 == 0) { aux = - aux; } aux -= vex/(n*Y); aux *= powf(n,a); sum += aux; } result = sum; return result; } /** * Calcular coeficiente G do Rendezvous * @param N Nmero de iteraes no somatrio interno * @param x0 valor no eixo X da posio relativa inicial entre o satlite e o detrito * @param y0 valor no eixo Y da posio relativa inicial entre o satlite e o detrito * @param z0 valor no eixo z da posio relativa inicial entre o satlite e o detrito * @param xl0 valor no eixo x da velocidade relativa inicial entre o satlite e o detrito * @param yl0 valor no eixo y da velocidade relativa inicial entre o satlite e o detrito * @param zl0 valor no eixo z da velocidade relativa inicial entre o satlite e o detrito * @param Y Gama - Varivel fsica Gama a ser calculado o valor de G * @param X Chi - Varivel fsica Chi a ser calculado o valor de G * @param w * @param a - 0 - Potncia utilizada dentro do somtorio para casos em que o indice do somtorio utilizado elevado a potncias diferentes * a = 1 -> n^1 * a = 2 -> n^2 * @param vex Varivel fsica da Velocidade de exausto no eixo X a ser calculado o valor de G * @param vey Varivel fsica da Velocidade de exausto no eixo Y a ser calculado o valor de G * @param vez Varivel fsica da Velocidade de exausto no eixo Z a ser calculado o valor de G * @returns O coeficinte G dado os valores iniciais e as variveis fsicas a serem testadas */ double __device__ brute_G (int N, double x0, double y0, double z0, double xl0, double yl0, double zl0, double Y, double X, double w, int a, double vex, double vey, double vez) { double result = 0; double sum = 0; int n; double aux; result= 2*yl0/w + x0 + (2*vey*(log((X+1)/X)))/w; //Calculo do somatorio for (n = 1; n <= N; n++) { aux = 3*vex/(powf(n,2)*powf(X,n)*w); if (n%2 == 0) { aux = -aux; } sum +=aux; } result-=sum; return result; } /** * Calcular coeficiente H do Rendezvous * @param N Nmero de iteraes no somatrio interno * @param x0 valor no eixo X da posio relativa inicial entre o satlite e o detrito * @param y0 valor no eixo Y da posio relativa inicial entre o satlite e o detrito * @param z0 valor no eixo z da posio relativa inicial entre o satlite e o detrito * @param xl0 valor no eixo x da velocidade relativa inicial entre o satlite e o detrito * @param yl0 valor no eixo y da velocidade relativa inicial entre o satlite e o detrito * @param zl0 valor no eixo z da velocidade relativa inicial entre o satlite e o detrito * @param Y Gama - Varivel fsica Gama a ser calculado o valor de H * @param X Chi - Varivel fsica Chi a ser calculado o valor de H * @param w * @param a - 0 - Potncia utilizada dentro do somtorio para casos em que o indice do somtorio utilizado elevado a potncias diferentes * a = 1 -> n^1 * a = 2 -> n^2 * @param vex Varivel fsica da Velocidade de exausto no eixo X a ser calculado o valor de H * @param vey Varivel fsica da Velocidade de exausto no eixo Y a ser calculado o valor de H * @param vez Varivel fsica da Velocidade de exausto no eixo Z a ser calculado o valor de H * @returns O coeficinte H dado os valores iniciais e as variveis fsicas a serem testadas */ double __device__ brute_H (int N, double x0, double y0, double z0, double xl0, double yl0, double zl0, double Y, double X, double w, int a, double vex, double vey, double vez) { double result = 0; double sum = 0; int n; double aux; result = z0; //Calculo do somatorio for (n = 1; n <= N; n++) { aux = ((vez*Y)/(powf(X,n)*powf(w,2)))/(1+powf((n*Y)/w,2)); if (n%2 == 0) { aux = -aux; } sum += aux; } result += sum; return result; } /** * Calcular coeficiente I do Rendezvous * @param N Nmero de iteraes no somatrio interno * @param x0 valor no eixo X da posio relativa inicial entre o satlite e o detrito * @param y0 valor no eixo Y da posio relativa inicial entre o satlite e o detrito * @param z0 valor no eixo z da posio relativa inicial entre o satlite e o detrito * @param xl0 valor no eixo x da velocidade relativa inicial entre o satlite e o detrito * @param yl0 valor no eixo y da velocidade relativa inicial entre o satlite e o detrito * @param zl0 valor no eixo z da velocidade relativa inicial entre o satlite e o detrito * @param Y Gama - Varivel fsica Gama a ser calculado o valor de I * @param X Chi - Varivel fsica Chi a ser calculado o valor de I * @param w * @param a - 0 - Potncia utilizada dentro do somtorio para casos em que o indice do somtorio utilizado elevado a potncias diferentes * a = 1 -> n^1 * a = 2 -> n^2 * @param vex Varivel fsica da Velocidade de exausto no eixo X a ser calculado o valor de I * @param vey Varivel fsica da Velocidade de exausto no eixo Y a ser calculado o valor de I * @param vez Varivel fsica da Velocidade de exausto no eixo Z a ser calculado o valor de I * @returns O coeficinte I dado os valores iniciais e as variveis fsicas a serem testadas */ double __device__ brute_I (int N, double x0, double y0, double z0, double xl0, double yl0, double zl0, double Y, double X, double w, int a, double vex, double vey, double vez) { double result = 0; double sum = 0; int n; double aux; result = zl0/w - (vez/w)*(log((X+1)/X)); //Calculo do somatorio for (n = 1; n <= N; n++) { aux = ((vez)/(powf(n,2)*powf(X,n)*w))/(1+powf((n*Y)/w,2)); if (n%2 == 0) { aux = -aux; } sum += aux; } result += sum; return result; } /** * Calcular o somatrio dos coeficientes Jn do Rendezvous * @param N Nmero de iteraes no somatrio interno * @param x0 valor no eixo X da posio relativa inicial entre o satlite e o detrito * @param y0 valor no eixo Y da posio relativa inicial entre o satlite e o detrito * @param z0 valor no eixo z da posio relativa inicial entre o satlite e o detrito * @param xl0 valor no eixo x da velocidade relativa inicial entre o satlite e o detrito * @param yl0 valor no eixo y da velocidade relativa inicial entre o satlite e o detrito * @param zl0 valor no eixo z da velocidade relativa inicial entre o satlite e o detrito * @param Y Gama - Varivel fsica Gama a ser calculado o valor de Jn * @param X Chi - Varivel fsica Chi a ser calculado o valor de Jn * @param w * @param a - 0 - Potncia utilizada dentro do somtorio para casos em que o indice do somtorio utilizado elevado a potncias diferentes * a = 1 -> n^1 * a = 2 -> n^2 * @param vex Varivel fsica da Velocidade de exausto no eixo X a ser calculado o valor de Jn * @param vey Varivel fsica da Velocidade de exausto no eixo Y a ser calculado o valor de Jn * @param vez Varivel fsica da Velocidade de exausto no eixo Z a ser calculado o valor de Jn * @returns O somatrio coeficinte Jn dado os valores iniciais e as variveis fsicas a serem testadas */ double __device__ brute_J(int N, double x0, double y0, double z0, double xl0, double yl0, double zl0, double Y, double X, double w, int a, double vex, double vey, double vez){ double result = 0; double sum = 0; int n; double aux; for (n = 1; n <= N; n++) { aux = vez/(n*powf(X,n)*w)/(1+powf((n*Y)/w,2)); if (n%2 == 0) { aux = - aux; } aux *= powf(n,a); sum += aux; } result = sum; return result; } /** * Calcular coeficientes A sufixados de 1 a 12 da equao de Rendezvous * @param A Ponteiro para o array que ser modificado e ao fim da execuo conter os valores de A sufixados em cada um de seus indices de 1 12 * @param N Nmero de iteraes no somatrio interno * @param x0 valor no eixo X da posio relativa inicial entre o satlite e o detrito * @param y0 valor no eixo Y da posio relativa inicial entre o satlite e o detrito * @param z0 valor no eixo z da posio relativa inicial entre o satlite e o detrito * @param xl0 valor no eixo x da velocidade relativa inicial entre o satlite e o detrito * @param yl0 valor no eixo y da velocidade relativa inicial entre o satlite e o detrito * @param zl0 valor no eixo z da velocidade relativa inicial entre o satlite e o detrito * @param Y Gama - Varivel fsica Gama a ser calculado os coeficientes A sufixados * @param X Chi - Varivel fsica Chi a ser calculado os coeficientes A sufixados * @param w * @param vex Varivel fsica da Velocidade de exausto no eixo X a ser calculado os coeficientes A sufixados * @param vey Varivel fsica da Velocidade de exausto no eixo Y a ser calculado os coeficientes A sufixados * @param vez Varivel fsica da Velocidade de exausto no eixo Z a ser calculado os coeficientes A sufixados */ void __device__ brute_all(double *A, int N, double x0, double y0, double z0, double xl0, double yl0, double zl0, double Y, int X, double w, double vex, double vey, double vez){ double a, B, D, e, G, H, I; //Calculando valores de A, B, C, D, E, F, G, H, I e J a = brute_A(N,x0,y0,z0,xl0,yl0,zl0, Y, X, w, 0, vex, vey,vez); B = brute_B(N,x0,y0,z0,xl0,yl0,zl0, Y, X, w, 0, vex, vey,vez); D = brute_D(N,x0,y0,z0,xl0,yl0,zl0,Y, X, w, 0, vex, vey,vez); e = brute_E(N,x0,y0,z0,xl0,yl0,zl0, Y, X, w, 0, vex, vey,vez); G = brute_G(N,x0,y0,z0,xl0,yl0,zl0, Y, X, w, 0, vex, vey,vez); H = brute_H(N,x0,y0,z0,xl0,yl0,zl0, Y, X, w, 0, vex, vey,vez); I = brute_I(N,x0,y0,z0,xl0,yl0,zl0, Y, X, w, 0, vex, vey,vez); __syncthreads(); //Calculando inicialmente a soma (A1 + A3 + A5) A[1] = 2*a*w + e - Y*brute_F(N,x0,y0,z0, xl0,yl0, zl0, Y, X, w, 1, vex, vey,vez); A[3] = B*w - Y*brute_C(N,x0,y0,z0,xl0,yl0,zl0, Y, X, w, 1, vex, vey,vez); A[5] = I*w +Y*brute_J(N,x0,y0,z0,xl0, yl0, zl0, Y, X, w, 1, vex, vey,vez); //Calculando inicialmente a soma (A2 + A4 + A6) A[2] = G - 2*B + brute_F(N,x0,y0,z0,xl0,yl0, zl0, Y, X, w, 0, vex, vey,vez); A[4] = a +D + brute_C(N,x0,y0,z0,xl0,yl0,zl0, Y, X, w, 0, vex, vey,vez); A[6] = H - brute_J(N,x0,y0,z0,xl0,yl0,zl0, Y, X, w, 0, vex, vey,vez); //Calculando inicialmente a soma (A7 + A9 + A11) A[7] = 2*B*w*w + Y*Y*brute_F(N,x0,y0,z0, xl0, yl0, zl0, Y, X, w, 2, vex, vey,vez); A[9] = Y*Y*brute_C(N,x0,y0,z0,xl0,yl0,zl0, Y, X, w, 2, vex, vey,vez) - a*w*w; A[11] = -w*w*H - Y*Y*brute_J(N,x0,y0,z0,xl0,yl0,zl0, Y, X, w, 2, vex, vey,vez); //Calculando inicialmente a soma (A8 + A10 + A12) A8=A1 // A10 = A3 // A12 = A5 A[8] = A[1]; A[10] = A[3]; A[12] = A[5]; } double __device__ calcularDiferenca (int N, double x0, double y0, double z0, double xl0, double yl0, double zl0, double gama, double chi, double w, double vex, double vey, double vez){ double A[13]; double a,b,c,d; double a1,a2,a3,a4,a5,a6; double result; brute_all(A, N, x0, y0, z0, xl0, yl0, zl0, gama, chi, w, vex, vey, vez); //a1 = 1/(A1 + A3 + A5) //a2 = (A2 + A4 + A6) //a3 = 1/(A7 + A9 + A11) //a4 = (A8 + A10 + A12) //a5 = (A1*A2 + A3*A4 + A5*A6) //a6 = (A7*A8 + A9*A10 + A11*A12) a1 = 1/(A[1]*A[1]+A[3]*A[3]+A[5]*A[5]); a2 = A[2]*A[2]+A[4]*A[4]+A[6]*A[6]; a3 = 1/(A[7]*A[7]+A[9]*A[9]+A[11]*A[11]); a4 = A[8]*A[8]+A[10]*A[10]+A[12]*A[12]; a5 = A[1]*A[2] + A[3]*A[4] + A[5]*A[6]; a6 = A[7]*A[8] + A[9]*A[10] + A[11]*A[12]; a = a5*a1; b = a1*a2; c = a6*a3; d = a3*a4; //Result equivale a diferena entre os dois lados da igualdade que descrevem o Rendezvous //Equao do Rendezvous (b-d) = 4(a-c)(bc-ad) result = powf(b-d,2)-4*(a-c)*(b*c-a*d); return result; } void __global__ calcularRendezvousDevice(double *d_variables){ double d_x0 = d_variables[0]; double d_y0 = d_variables[1]; double d_z0 = d_variables[2]; double d_xl0 = d_variables[3]; double d_yl0 = d_variables[4]; double d_zl0 = d_variables[5]; double d_w = d_variables[6]; double gama = blockIdx.x; //Y(Gama) recebe o valor x atual do bloco; double chi = blockIdx.y; //X(Chi) recebe o valor y atual do bloco; double ve = threadIdx.x; //ve||vex(Velocidade de exausto) recebe o valor x atual da thread; //Converso de indexs para valores reais chi++; gama = gama-14; gama = powf(10,gama); ve++; ve = ve/10; double yInicial = calcularDiferenca(N_PRECISION, d_x0, d_y0, d_z0, d_xl0, d_yl0, d_zl0, gama, chi, d_w, ve, ve, ve); cuPrintf("%.14lf , %lf , %lf , %lf\n",gama, chi, ve, yInicial); } void calcularRendezvous(double x0, double y0, double z0, double xl0, double yl0, double zl0, double w) { int tx = 17; //Nmero de iteraes em Gama (de -14 a 2 com passo 1) int ty = 100; //Nmero de iteraes em Chi (de 1 a 100 com passo 1) int tz = 100; //Nmero de iteraes em ve (de 0.1 a 10 com passo 0.1) dim3 numBlocks(tx,ty); size_t size = 7*sizeof(double); int threadsPerBlock = tz; double *h_variables = (double *)malloc(7*sizeof(double)); h_variables[0] = x0; h_variables[1] = y0; h_variables[2] = z0; h_variables[3] = xl0; h_variables[4] = yl0; h_variables[5] = zl0; h_variables[6] = w; //Alocando a memria das variveis no Device double *d_variables; hipMalloc((void **) &d_variables, size); //Copiando as variveis do host para o Device hipMemcpy(d_variables, h_variables, size, hipMemcpyHostToDevice); hipLaunchKernelGGL(( calcularRendezvousDevice), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, d_variables); } int main(int argc, char **argv){ //Declarao das variaveis double x0,y0,z0; //Componentes das posies relativas double xl0,yl0,zl0; //Componentes das velocidades relativas double w; //Raio (Constante) double raio; //raio (Constante) double r0; //Altitude <-----Verificar -----> //Variaveis inutilizadas (Por enquanto) double tempo, alpha, beta, vi, xf, yf, zf, rf, dxf, dyf, vf; if(argc == 1){ printf("Passe o nome dos arquivos de input como parmetro\n"); return 1; } //Aumentando o tamanho do Buffer usado para transferir os dados internos do Device para o Host // Informao de cuPrintf.cuh // "bufferLen=1048576 1-meg - that's enough for 4096 printfs by all threads put together" size_t size = 43520000; //Cada printf necessita de 256 bits 256*100*100*17 hipDeviceSetLimit(hipLimitPrintfFifoSize, size); for(int i = 1; i < argc; i++){ //Tentativa de leitura de cada um dos arquivos passados como parmetro /*Leitura do Arquivo*/ char * nomeDoArquivo = argv[i]; FILE *file; file = fopen(nomeDoArquivo, "r"); int b = 0; if (file == NULL) { //Verifica se o caminho existe break; } else { while((fscanf(file,"%lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf", &tempo, &alpha, &beta, &x0, &y0, &z0, &r0, &xl0, &yl0, &zl0, &vi, &xf, &yf, &zf, &rf, &dxf, &dyf, &zf, &vf)) != EOF){ //Enquanto no for o fim do arquivo cudaPrintfInit(size); char nomeDoArquivoDeEscrita[256]; sprintf( nomeDoArquivoDeEscrita, "%d-output-%d.csv", i, b); FILE *fileToWrite; fileToWrite = fopen(nomeDoArquivoDeEscrita, "w"); // Tempo Alpha Beta X0 y0 z0 r0 xl0 yl0 zl0 |Vi| xf yf zf rf dxf dyf dzf |Vf| // 456.000000 104 89 -0.725655 2.910444 0.052357 3.000000 0.005108 -0.006719 -0.000104 0.008441 0.000000 0.000000 0.000000 0.000000 -0.001749 -0.005737 -0.000121 0.005999 raio = EARTH_RADIUS + r0; w = sqrt(MI/(raio*raio*raio)); calcularRendezvous(x0,y0,z0,xl0,yl0,zl0,w); // Forando Flush do buffer do cuPrintf e armazenando em arquivo // O segundo parmetro setado para true faz com que os indices do bloco e da thread que chamou o printf sejam exibidos cudaPrintfDisplay(fileToWrite,false); cudaPrintfEnd(); b++; } } } }
3756b79fe572baaa6cd09f05e5e6a8a75294fae9.cu
/** CUDA, rendezvousParalel.cu Purpose: Calcular as variáveis físicas de um veículo espacial capazes de tornar o Rendezvous possível @author Cássio Santos @version 1.0 31/07/17 */ /** Para compilaçao: ex.: nvcc rendezvousParalel.cu -o rendezvous Para execução: ex.: ./rendezvous v-0.005-0.006.dat v-0.007-0.008.dat */ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <cuda_runtime.h> #include <device_launch_parameters.h> #include "cuPrintf.cu" //Declaração de constantes utilizadas para o calculo dos coeficientes #define MI 398600.4418 #define EARTH_RADIUS 6378.0 #define N_PRECISION 10 using namespace std; /** * Calcular coeficiente A do Rendezvous * @param N Número de iterações no somatório interno * @param x0 valor no eixo X da posição relativa inicial entre o satélite e o detrito * @param y0 valor no eixo Y da posição relativa inicial entre o satélite e o detrito * @param z0 valor no eixo z da posição relativa inicial entre o satélite e o detrito * @param xl0 valor no eixo x da velocidade relativa inicial entre o satélite e o detrito * @param yl0 valor no eixo y da velocidade relativa inicial entre o satélite e o detrito * @param zl0 valor no eixo z da velocidade relativa inicial entre o satélite e o detrito * @param Y Gama - Variável física Gama a ser calculado o valor de A * @param X Chi - Variável física Chi a ser calculado o valor de A * @param w * @param a - 0 - Potência utilizada dentro do somátorio para casos em que o indice do somátorio é utilizado elevado a potências diferentes * a = 1 -> n^1 * a = 2 -> n^2 * @param vex Variável física da Velocidade de exaustão no eixo X a ser calculado o valor de A * @param vey Variável física da Velocidade de exaustão no eixo Y a ser calculado o valor de A * @param vez Variável física da Velocidade de exaustão no eixo Z a ser calculado o valor de A * @returns O coeficiênte A dado os valores iniciais e as variáveis físicas a serem testadas */ double __device__ brute_A (int N, double x0, double y0, double z0, double xl0, double yl0, double zl0, double Y, double X, double w, int a, double vex, double vey, double vez) { double result = 0; int n; double aux; double sum = 0; result += (2*xl0)/w - 3*y0 +((2*vex)/w)*log((X+1)/X); //Calculo do somatorio for (n = 1; n <= N; n++) { aux = (1/(n*powf(X, n)))*(1/(1+powf(((n*Y)/w),2)))*(((2*vex)/w)+((n*Y*vey)/(w*w))); if (n%2 == 0) { sum -= aux; } else { sum += aux; } } result-= sum; return result; } /** * Calcular coeficiente B do Rendezvous * @param N Número de iterações no somatório interno * @param x0 valor no eixo X da posição relativa inicial entre o satélite e o detrito * @param y0 valor no eixo Y da posição relativa inicial entre o satélite e o detrito * @param z0 valor no eixo z da posição relativa inicial entre o satélite e o detrito * @param xl0 valor no eixo x da velocidade relativa inicial entre o satélite e o detrito * @param yl0 valor no eixo y da velocidade relativa inicial entre o satélite e o detrito * @param zl0 valor no eixo z da velocidade relativa inicial entre o satélite e o detrito * @param Y Gama - Variável física Gama a ser calculado o valor de B * @param X Chi - Variável física Chi a ser calculado o valor de B * @param w * @param a - 0 - Potência utilizada dentro do somátorio para casos em que o indice do somátorio é utilizado elevado a potências diferentes * a = 1 -> n^1 * a = 2 -> n^2 * @param vex Variável física da Velocidade de exaustão no eixo X a ser calculado o valor de B * @param vey Variável física da Velocidade de exaustão no eixo Y a ser calculado o valor de B * @param vez Variável física da Velocidade de exaustão no eixo Z a ser calculado o valor de B * @returns O coeficiênte B dado os valores iniciais e as variáveis físicas a serem testadas */ double __device__ brute_B (int N, double x0, double y0, double z0, double xl0, double yl0, double zl0, double Y, double X, double w, int a, double vex, double vey, double vez){ double result = 0; double sum = 0; int n; double aux; result += yl0/w + (vey/w)*log((X+1)/X); //Calculo do somatorio for (n = 1; n <= N; n++) { aux = (1/(n*powf(X,n)))*(1/(1+powf(((n*Y)/w),2)))*(vey/w + (2*n*Y*vex)/(w*w)); if (n%2 == 0) {//iteração Par aux = -aux; } sum += aux; } result += sum; return result; } /** * Calcular o somatório dos coeficientes Cn do Rendezvous * @param N Número de iterações no somatório interno * @param x0 valor no eixo X da posição relativa inicial entre o satélite e o detrito * @param y0 valor no eixo Y da posição relativa inicial entre o satélite e o detrito * @param z0 valor no eixo z da posição relativa inicial entre o satélite e o detrito * @param xl0 valor no eixo x da velocidade relativa inicial entre o satélite e o detrito * @param yl0 valor no eixo y da velocidade relativa inicial entre o satélite e o detrito * @param zl0 valor no eixo z da velocidade relativa inicial entre o satélite e o detrito * @param Y Gama - Variável física Gama a ser calculado o valor de C * @param X Chi - Variável física Chi a ser calculado o valor de C * @param w * @param a - 0 - Potência utilizada dentro do somátorio para casos em que o indice do somátorio é utilizado elevado a potências diferentes * a = 1 -> n^1 * a = 2 -> n^2 * @param vex Variável física da Velocidade de exaustão no eixo X a ser calculado o valor de C * @param vey Variável física da Velocidade de exaustão no eixo Y a ser calculado o valor de C * @param vez Variável física da Velocidade de exaustão no eixo Z a ser calculado o valor de C * @returns O somatório dos coeficiêntes Cn dado os valores iniciais e as variáveis físicas a serem testadas */ double __device__ brute_C (int N, double x0, double y0, double z0, double xl0, double yl0, double zl0, double Y, double X, double w, int a, double vex, double vey, double vez){ double result = 0; int n; double aux; //Calculo do somatorio Cn for (n = 1; n <= N; n++) { aux = powf(n,a)*vex/(n*powf(X,n)*(1+powf((n*Y/w),2))) + n*Y*powf(n,a)*vey/(n*powf(X,n)*powf(w,2)*(1+powf((n*Y/w),2))); if (n%2 == 0) { aux = -aux; } result +=aux; } return result; } /** * Calcular coeficiente D do Rendezvous * @param N Número de iterações no somatório interno * @param x0 valor no eixo X da posição relativa inicial entre o satélite e o detrito * @param y0 valor no eixo Y da posição relativa inicial entre o satélite e o detrito * @param z0 valor no eixo z da posição relativa inicial entre o satélite e o detrito * @param xl0 valor no eixo x da velocidade relativa inicial entre o satélite e o detrito * @param yl0 valor no eixo y da velocidade relativa inicial entre o satélite e o detrito * @param zl0 valor no eixo z da velocidade relativa inicial entre o satélite e o detrito * @param Y Gama - Variável física Gama a ser calculado o valor de D * @param X Chi - Variável física Chi a ser calculado o valor de D * @param w * @param a - 0 - Potência utilizada dentro do somátorio para casos em que o indice do somátorio é utilizado elevado a potências diferentes * a = 1 -> n^1 * a = 2 -> n^2 * @param vex Variável física da Velocidade de exaustão no eixo X a ser calculado o valor de D * @param vey Variável física da Velocidade de exaustão no eixo Y a ser calculado o valor de D * @param vez Variável física da Velocidade de exaustão no eixo Z a ser calculado o valor de D * @returns O coeficiênte D dado os valores iniciais e as variáveis físicas a serem testadas */ double __device__ brute_D (int N, double x0, double y0, double z0, double xl0, double yl0, double zl0, double Y, double X, double w, int a, double vex, double vey, double vez) { double result = 0; result -= (2*vex* log((X+1)/X))/w; result += 4*y0 - 2*xl0/w; return result; } /** * Calcular coeficiente E do Rendezvous * @param N Número de iterações no somatório interno * @param x0 valor no eixo X da posição relativa inicial entre o satélite e o detrito * @param y0 valor no eixo Y da posição relativa inicial entre o satélite e o detrito * @param z0 valor no eixo z da posição relativa inicial entre o satélite e o detrito * @param xl0 valor no eixo x da velocidade relativa inicial entre o satélite e o detrito * @param yl0 valor no eixo y da velocidade relativa inicial entre o satélite e o detrito * @param zl0 valor no eixo z da velocidade relativa inicial entre o satélite e o detrito * @param Y Gama - Variável física Gama a ser calculado o valor de E * @param X Chi - Variável física Chi a ser calculado o valor de E * @param w * @param a - 0 - Potência utilizada dentro do somátorio para casos em que o indice do somátorio é utilizado elevado a potências diferentes * a = 1 -> n^1 * a = 2 -> n^2 * @param vex Variável física da Velocidade de exaustão no eixo X a ser calculado o valor de E * @param vey Variável física da Velocidade de exaustão no eixo Y a ser calculado o valor de E * @param vez Variável física da Velocidade de exaustão no eixo Z a ser calculado o valor de E * @returns O coeficiênte E dado os valores iniciais e as variáveis físicas a serem testadas */ double __device__ brute_E (int N, double x0, double y0, double z0, double xl0, double yl0, double zl0, double Y, double X, double w, int a, double vex, double vey, double vez) { double result = 0; result -= 3*vex*log((X+1)/X); result += 6*w*y0 - 3*xl0; return result; } /** * Calcular o somatório dos coeficientes Fn do Rendezvous * @param N Número de iterações no somatório interno * @param x0 valor no eixo X da posição relativa inicial entre o satélite e o detrito * @param y0 valor no eixo Y da posição relativa inicial entre o satélite e o detrito * @param z0 valor no eixo z da posição relativa inicial entre o satélite e o detrito * @param xl0 valor no eixo x da velocidade relativa inicial entre o satélite e o detrito * @param yl0 valor no eixo y da velocidade relativa inicial entre o satélite e o detrito * @param zl0 valor no eixo z da velocidade relativa inicial entre o satélite e o detrito * @param Y Gama - Variável física Gama a ser calculado o valor de Fn * @param X Chi - Variável física Chi a ser calculado o valor de Fn * @param w * @param a - 0 - Potência utilizada dentro do somátorio para casos em que o indice do somátorio é utilizado elevado a potências diferentes * a = 1 -> n^1 * a = 2 -> n^2 * @param vex Variável física da Velocidade de exaustão no eixo X a ser calculado o valor de Fn * @param vey Variável física da Velocidade de exaustão no eixo Y a ser calculado o valor de Fn * @param vez Variável física da Velocidade de exaustão no eixo Z a ser calculado o valor de Fn * @returns O somatório coeficiênte Fn dado os valores iniciais e as variáveis físicas a serem testadas */ double __device__ brute_F(int N, double x0, double y0, double z0, double xl0, double yl0, double zl0, double Y, double X, double w, int a, double vex, double vey, double vez) { double result = 0; double sum = 0; int n; double aux; //Calculo do somatorio for (n = 1; n <= N; n++) { aux = (1/(n*powf(X,n)))*((2*vey)/w + (4*vex)/(n*Y))/((1+powf((n*Y)/w,2))); if (n%2 == 0) { aux = - aux; } aux -= vex/(n*Y); aux *= powf(n,a); sum += aux; } result = sum; return result; } /** * Calcular coeficiente G do Rendezvous * @param N Número de iterações no somatório interno * @param x0 valor no eixo X da posição relativa inicial entre o satélite e o detrito * @param y0 valor no eixo Y da posição relativa inicial entre o satélite e o detrito * @param z0 valor no eixo z da posição relativa inicial entre o satélite e o detrito * @param xl0 valor no eixo x da velocidade relativa inicial entre o satélite e o detrito * @param yl0 valor no eixo y da velocidade relativa inicial entre o satélite e o detrito * @param zl0 valor no eixo z da velocidade relativa inicial entre o satélite e o detrito * @param Y Gama - Variável física Gama a ser calculado o valor de G * @param X Chi - Variável física Chi a ser calculado o valor de G * @param w * @param a - 0 - Potência utilizada dentro do somátorio para casos em que o indice do somátorio é utilizado elevado a potências diferentes * a = 1 -> n^1 * a = 2 -> n^2 * @param vex Variável física da Velocidade de exaustão no eixo X a ser calculado o valor de G * @param vey Variável física da Velocidade de exaustão no eixo Y a ser calculado o valor de G * @param vez Variável física da Velocidade de exaustão no eixo Z a ser calculado o valor de G * @returns O coeficiênte G dado os valores iniciais e as variáveis físicas a serem testadas */ double __device__ brute_G (int N, double x0, double y0, double z0, double xl0, double yl0, double zl0, double Y, double X, double w, int a, double vex, double vey, double vez) { double result = 0; double sum = 0; int n; double aux; result= 2*yl0/w + x0 + (2*vey*(log((X+1)/X)))/w; //Calculo do somatorio for (n = 1; n <= N; n++) { aux = 3*vex/(powf(n,2)*powf(X,n)*w); if (n%2 == 0) { aux = -aux; } sum +=aux; } result-=sum; return result; } /** * Calcular coeficiente H do Rendezvous * @param N Número de iterações no somatório interno * @param x0 valor no eixo X da posição relativa inicial entre o satélite e o detrito * @param y0 valor no eixo Y da posição relativa inicial entre o satélite e o detrito * @param z0 valor no eixo z da posição relativa inicial entre o satélite e o detrito * @param xl0 valor no eixo x da velocidade relativa inicial entre o satélite e o detrito * @param yl0 valor no eixo y da velocidade relativa inicial entre o satélite e o detrito * @param zl0 valor no eixo z da velocidade relativa inicial entre o satélite e o detrito * @param Y Gama - Variável física Gama a ser calculado o valor de H * @param X Chi - Variável física Chi a ser calculado o valor de H * @param w * @param a - 0 - Potência utilizada dentro do somátorio para casos em que o indice do somátorio é utilizado elevado a potências diferentes * a = 1 -> n^1 * a = 2 -> n^2 * @param vex Variável física da Velocidade de exaustão no eixo X a ser calculado o valor de H * @param vey Variável física da Velocidade de exaustão no eixo Y a ser calculado o valor de H * @param vez Variável física da Velocidade de exaustão no eixo Z a ser calculado o valor de H * @returns O coeficiênte H dado os valores iniciais e as variáveis físicas a serem testadas */ double __device__ brute_H (int N, double x0, double y0, double z0, double xl0, double yl0, double zl0, double Y, double X, double w, int a, double vex, double vey, double vez) { double result = 0; double sum = 0; int n; double aux; result = z0; //Calculo do somatorio for (n = 1; n <= N; n++) { aux = ((vez*Y)/(powf(X,n)*powf(w,2)))/(1+powf((n*Y)/w,2)); if (n%2 == 0) { aux = -aux; } sum += aux; } result += sum; return result; } /** * Calcular coeficiente I do Rendezvous * @param N Número de iterações no somatório interno * @param x0 valor no eixo X da posição relativa inicial entre o satélite e o detrito * @param y0 valor no eixo Y da posição relativa inicial entre o satélite e o detrito * @param z0 valor no eixo z da posição relativa inicial entre o satélite e o detrito * @param xl0 valor no eixo x da velocidade relativa inicial entre o satélite e o detrito * @param yl0 valor no eixo y da velocidade relativa inicial entre o satélite e o detrito * @param zl0 valor no eixo z da velocidade relativa inicial entre o satélite e o detrito * @param Y Gama - Variável física Gama a ser calculado o valor de I * @param X Chi - Variável física Chi a ser calculado o valor de I * @param w * @param a - 0 - Potência utilizada dentro do somátorio para casos em que o indice do somátorio é utilizado elevado a potências diferentes * a = 1 -> n^1 * a = 2 -> n^2 * @param vex Variável física da Velocidade de exaustão no eixo X a ser calculado o valor de I * @param vey Variável física da Velocidade de exaustão no eixo Y a ser calculado o valor de I * @param vez Variável física da Velocidade de exaustão no eixo Z a ser calculado o valor de I * @returns O coeficiênte I dado os valores iniciais e as variáveis físicas a serem testadas */ double __device__ brute_I (int N, double x0, double y0, double z0, double xl0, double yl0, double zl0, double Y, double X, double w, int a, double vex, double vey, double vez) { double result = 0; double sum = 0; int n; double aux; result = zl0/w - (vez/w)*(log((X+1)/X)); //Calculo do somatorio for (n = 1; n <= N; n++) { aux = ((vez)/(powf(n,2)*powf(X,n)*w))/(1+powf((n*Y)/w,2)); if (n%2 == 0) { aux = -aux; } sum += aux; } result += sum; return result; } /** * Calcular o somatório dos coeficientes Jn do Rendezvous * @param N Número de iterações no somatório interno * @param x0 valor no eixo X da posição relativa inicial entre o satélite e o detrito * @param y0 valor no eixo Y da posição relativa inicial entre o satélite e o detrito * @param z0 valor no eixo z da posição relativa inicial entre o satélite e o detrito * @param xl0 valor no eixo x da velocidade relativa inicial entre o satélite e o detrito * @param yl0 valor no eixo y da velocidade relativa inicial entre o satélite e o detrito * @param zl0 valor no eixo z da velocidade relativa inicial entre o satélite e o detrito * @param Y Gama - Variável física Gama a ser calculado o valor de Jn * @param X Chi - Variável física Chi a ser calculado o valor de Jn * @param w * @param a - 0 - Potência utilizada dentro do somátorio para casos em que o indice do somátorio é utilizado elevado a potências diferentes * a = 1 -> n^1 * a = 2 -> n^2 * @param vex Variável física da Velocidade de exaustão no eixo X a ser calculado o valor de Jn * @param vey Variável física da Velocidade de exaustão no eixo Y a ser calculado o valor de Jn * @param vez Variável física da Velocidade de exaustão no eixo Z a ser calculado o valor de Jn * @returns O somatório coeficiênte Jn dado os valores iniciais e as variáveis físicas a serem testadas */ double __device__ brute_J(int N, double x0, double y0, double z0, double xl0, double yl0, double zl0, double Y, double X, double w, int a, double vex, double vey, double vez){ double result = 0; double sum = 0; int n; double aux; for (n = 1; n <= N; n++) { aux = vez/(n*powf(X,n)*w)/(1+powf((n*Y)/w,2)); if (n%2 == 0) { aux = - aux; } aux *= powf(n,a); sum += aux; } result = sum; return result; } /** * Calcular coeficientes A sufixados de 1 a 12 da equação de Rendezvous * @param A Ponteiro para o array que será modificado e ao fim da execução conterá os valores de A sufixados em cada um de seus indices de 1 à 12 * @param N Número de iterações no somatório interno * @param x0 valor no eixo X da posição relativa inicial entre o satélite e o detrito * @param y0 valor no eixo Y da posição relativa inicial entre o satélite e o detrito * @param z0 valor no eixo z da posição relativa inicial entre o satélite e o detrito * @param xl0 valor no eixo x da velocidade relativa inicial entre o satélite e o detrito * @param yl0 valor no eixo y da velocidade relativa inicial entre o satélite e o detrito * @param zl0 valor no eixo z da velocidade relativa inicial entre o satélite e o detrito * @param Y Gama - Variável física Gama a ser calculado os coeficientes A sufixados * @param X Chi - Variável física Chi a ser calculado os coeficientes A sufixados * @param w * @param vex Variável física da Velocidade de exaustão no eixo X a ser calculado os coeficientes A sufixados * @param vey Variável física da Velocidade de exaustão no eixo Y a ser calculado os coeficientes A sufixados * @param vez Variável física da Velocidade de exaustão no eixo Z a ser calculado os coeficientes A sufixados */ void __device__ brute_all(double *A, int N, double x0, double y0, double z0, double xl0, double yl0, double zl0, double Y, int X, double w, double vex, double vey, double vez){ double a, B, D, e, G, H, I; //Calculando valores de A, B, C, D, E, F, G, H, I e J a = brute_A(N,x0,y0,z0,xl0,yl0,zl0, Y, X, w, 0, vex, vey,vez); B = brute_B(N,x0,y0,z0,xl0,yl0,zl0, Y, X, w, 0, vex, vey,vez); D = brute_D(N,x0,y0,z0,xl0,yl0,zl0,Y, X, w, 0, vex, vey,vez); e = brute_E(N,x0,y0,z0,xl0,yl0,zl0, Y, X, w, 0, vex, vey,vez); G = brute_G(N,x0,y0,z0,xl0,yl0,zl0, Y, X, w, 0, vex, vey,vez); H = brute_H(N,x0,y0,z0,xl0,yl0,zl0, Y, X, w, 0, vex, vey,vez); I = brute_I(N,x0,y0,z0,xl0,yl0,zl0, Y, X, w, 0, vex, vey,vez); __syncthreads(); //Calculando inicialmente a soma (A1² + A3² + A5²) A[1] = 2*a*w + e - Y*brute_F(N,x0,y0,z0, xl0,yl0, zl0, Y, X, w, 1, vex, vey,vez); A[3] = B*w - Y*brute_C(N,x0,y0,z0,xl0,yl0,zl0, Y, X, w, 1, vex, vey,vez); A[5] = I*w +Y*brute_J(N,x0,y0,z0,xl0, yl0, zl0, Y, X, w, 1, vex, vey,vez); //Calculando inicialmente a soma (A2² + A4² + A6²) A[2] = G - 2*B + brute_F(N,x0,y0,z0,xl0,yl0, zl0, Y, X, w, 0, vex, vey,vez); A[4] = a +D + brute_C(N,x0,y0,z0,xl0,yl0,zl0, Y, X, w, 0, vex, vey,vez); A[6] = H - brute_J(N,x0,y0,z0,xl0,yl0,zl0, Y, X, w, 0, vex, vey,vez); //Calculando inicialmente a soma (A7² + A9² + A11²) A[7] = 2*B*w*w + Y*Y*brute_F(N,x0,y0,z0, xl0, yl0, zl0, Y, X, w, 2, vex, vey,vez); A[9] = Y*Y*brute_C(N,x0,y0,z0,xl0,yl0,zl0, Y, X, w, 2, vex, vey,vez) - a*w*w; A[11] = -w*w*H - Y*Y*brute_J(N,x0,y0,z0,xl0,yl0,zl0, Y, X, w, 2, vex, vey,vez); //Calculando inicialmente a soma (A8² + A10² + A12²) A8=A1 // A10 = A3 // A12 = A5 A[8] = A[1]; A[10] = A[3]; A[12] = A[5]; } double __device__ calcularDiferenca (int N, double x0, double y0, double z0, double xl0, double yl0, double zl0, double gama, double chi, double w, double vex, double vey, double vez){ double A[13]; double a,b,c,d; double a1,a2,a3,a4,a5,a6; double result; brute_all(A, N, x0, y0, z0, xl0, yl0, zl0, gama, chi, w, vex, vey, vez); //a1 = 1/(A1² + A3² + A5²) //a2 = (A2² + A4² + A6²) //a3 = 1/(A7² + A9² + A11²) //a4 = (A8² + A10² + A12²) //a5 = (A1*A2 + A3*A4 + A5*A6) //a6 = (A7*A8 + A9*A10 + A11*A12) a1 = 1/(A[1]*A[1]+A[3]*A[3]+A[5]*A[5]); a2 = A[2]*A[2]+A[4]*A[4]+A[6]*A[6]; a3 = 1/(A[7]*A[7]+A[9]*A[9]+A[11]*A[11]); a4 = A[8]*A[8]+A[10]*A[10]+A[12]*A[12]; a5 = A[1]*A[2] + A[3]*A[4] + A[5]*A[6]; a6 = A[7]*A[8] + A[9]*A[10] + A[11]*A[12]; a = a5*a1; b = a1*a2; c = a6*a3; d = a3*a4; //Result equivale a diferença entre os dois lados da igualdade que descrevem o Rendezvous //Equação do Rendezvous (b-d)² = 4(a-c)(bc-ad) result = powf(b-d,2)-4*(a-c)*(b*c-a*d); return result; } void __global__ calcularRendezvousDevice(double *d_variables){ double d_x0 = d_variables[0]; double d_y0 = d_variables[1]; double d_z0 = d_variables[2]; double d_xl0 = d_variables[3]; double d_yl0 = d_variables[4]; double d_zl0 = d_variables[5]; double d_w = d_variables[6]; double gama = blockIdx.x; //Y(Gama) recebe o valor x atual do bloco; double chi = blockIdx.y; //X(Chi) recebe o valor y atual do bloco; double ve = threadIdx.x; //ve||vex(Velocidade de exaustão) recebe o valor x atual da thread; //Conversão de indexs para valores reais chi++; gama = gama-14; gama = powf(10,gama); ve++; ve = ve/10; double yInicial = calcularDiferenca(N_PRECISION, d_x0, d_y0, d_z0, d_xl0, d_yl0, d_zl0, gama, chi, d_w, ve, ve, ve); cuPrintf("%.14lf , %lf , %lf , %lf\n",gama, chi, ve, yInicial); } void calcularRendezvous(double x0, double y0, double z0, double xl0, double yl0, double zl0, double w) { int tx = 17; //Número de iterações em Gama (de -14 a 2 com passo 1) int ty = 100; //Número de iterações em Chi (de 1 a 100 com passo 1) int tz = 100; //Número de iterações em ve (de 0.1 a 10 com passo 0.1) dim3 numBlocks(tx,ty); size_t size = 7*sizeof(double); int threadsPerBlock = tz; double *h_variables = (double *)malloc(7*sizeof(double)); h_variables[0] = x0; h_variables[1] = y0; h_variables[2] = z0; h_variables[3] = xl0; h_variables[4] = yl0; h_variables[5] = zl0; h_variables[6] = w; //Alocando a memória das variáveis no Device double *d_variables; cudaMalloc((void **) &d_variables, size); //Copiando as variáveis do host para o Device cudaMemcpy(d_variables, h_variables, size, cudaMemcpyHostToDevice); calcularRendezvousDevice<<<numBlocks, threadsPerBlock>>>(d_variables); } int main(int argc, char **argv){ //Declaração das variaveis double x0,y0,z0; //Componentes das posições relativas double xl0,yl0,zl0; //Componentes das velocidades relativas double w; //Raio (Constante) double raio; //raio (Constante) double r0; //Altitude <-----Verificar -----> //Variaveis inutilizadas (Por enquanto) double tempo, alpha, beta, vi, xf, yf, zf, rf, dxf, dyf, vf; if(argc == 1){ printf("Passe o nome dos arquivos de input como parâmetro\n"); return 1; } //Aumentando o tamanho do Buffer usado para transferir os dados internos do Device para o Host // Informação de cuPrintf.cuh // "bufferLen=1048576 1-meg - that's enough for 4096 printfs by all threads put together" size_t size = 43520000; //Cada printf necessita de 256 bits 256*100*100*17 cudaDeviceSetLimit(cudaLimitPrintfFifoSize, size); for(int i = 1; i < argc; i++){ //Tentativa de leitura de cada um dos arquivos passados como parâmetro /*Leitura do Arquivo*/ char * nomeDoArquivo = argv[i]; FILE *file; file = fopen(nomeDoArquivo, "r"); int b = 0; if (file == NULL) { //Verifica se o caminho existe break; } else { while((fscanf(file,"%lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf", &tempo, &alpha, &beta, &x0, &y0, &z0, &r0, &xl0, &yl0, &zl0, &vi, &xf, &yf, &zf, &rf, &dxf, &dyf, &zf, &vf)) != EOF){ //Enquanto não for o fim do arquivo cudaPrintfInit(size); char nomeDoArquivoDeEscrita[256]; sprintf( nomeDoArquivoDeEscrita, "%d-output-%d.csv", i, b); FILE *fileToWrite; fileToWrite = fopen(nomeDoArquivoDeEscrita, "w"); // Tempo Alpha Beta X0 y0 z0 r0 xl0 yl0 zl0 |Vi| xf yf zf rf dxf dyf dzf |Vf| // 456.000000 104 89 -0.725655 2.910444 0.052357 3.000000 0.005108 -0.006719 -0.000104 0.008441 0.000000 0.000000 0.000000 0.000000 -0.001749 -0.005737 -0.000121 0.005999 raio = EARTH_RADIUS + r0; w = sqrt(MI/(raio*raio*raio)); calcularRendezvous(x0,y0,z0,xl0,yl0,zl0,w); // Forçando Flush do buffer do cuPrintf e armazenando em arquivo // O segundo parâmetro setado para true faz com que os indices do bloco e da thread que chamou o printf sejam exibidos cudaPrintfDisplay(fileToWrite,false); cudaPrintfEnd(); b++; } } } }
fd7a436e74d9f359f20326e7a19cfb7f1ea4d9b9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2017, The OctNet authors // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of the <organization> nor the // names of its contributors may be used to endorse or promote products // derived from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL OCTNET AUTHORS BE LIABLE FOR ANY // DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include <iostream> #include <cstring> #include <chrono> #include <limits> #include <smmintrin.h> #include "octnet/gpu/gpu.h" #include "octnet/gpu/common.h" #include "octnet/cpu/cpu.h" #include "octnet/test/objects.h" // #ifdef __CUDA_ARCH__ // __host__ __device__ // #endif // OCTREE_FUNCTION // inline int tree_data_idx2(const ot_tree_t* tree, int bit_idx, ot_size_t feature_size) { // // int pa_idx = IMAX(0, tree_parent_bit_idx(bit_idx)); // // if(!tree_isset_bit(tree, pa_idx)) { // // bit_idx = pa_idx; // // pa_idx = tree_parent_bit_idx(pa_idx); // // } // // if(!tree_isset_bit(tree, pa_idx)) { // // bit_idx = pa_idx; // // pa_idx = tree_parent_bit_idx(pa_idx); // // } // // if(!tree_isset_bit(tree, pa_idx)) { // // return 0; // // } // int pa_idx = tree_parent_bit_idx(bit_idx); // int papa_idx = tree_parent_bit_idx(pa_idx); // int papapa_idx = tree_parent_bit_idx(papa_idx); // bit_idx = IMAX(bit_idx * tree_isset_bit(tree, pa_idx), IMAX(pa_idx * tree_isset_bit(tree, papa_idx), papa_idx * tree_isset_bit(tree, papapa_idx))); // if(bit_idx == 0) { // return 0; // } // pa_idx = IMAX(0, tree_parent_bit_idx(bit_idx)); // // int data_idx = tree_cnt0(tree, 0, IMIN(bit_idx, 73)); // // if(pa_idx > 1) { // // data_idx -= 8 * tree_cnt0(tree, 1, pa_idx); // // } // // if(bit_idx > 72) { // // data_idx += bit_idx - 73; // // } // int data_idx = tree_cnt1(tree, 0, pa_idx); // data_idx = data_idx * 8 + 1 // + (bit_idx-1)%8 // - (data_idx + tree_cnt1(tree, pa_idx, bit_idx)); // return data_idx * feature_size; // } // void correctness_cpu(ot_tree_t* tree) { // std::cout << "---------------------- test correctness cpu --------------------" << std::endl; // std::cout << tree_bit_str(tree) << std::endl; // for(int bit_idx = 0; bit_idx < (1+8+64+64*8); ++bit_idx) { // int di_gt = tree_data_idx(tree, bit_idx, 1); // int di2 = tree_data_idx2(tree, bit_idx, 1); // if(!tree_isset_bit(tree, bit_idx) && di2 != di_gt) { // std::cout << "[ERROR_CPU] bit_idx=" << bit_idx << ": " << di2 << " should be " << di_gt << std::endl; // } // } // } // void speed_cpu(ot_tree_t* tree) { // int reps = 100000; // int di = 0; // auto t1 = std::chrono::high_resolution_clock::now(); // for(int rep = 0; rep < reps; ++rep) { // for(int bit_idx = 0; bit_idx < (1+8+64+64*8); ++bit_idx) { // int tmp = tree_data_idx(tree, bit_idx, 1); // di += tmp; // } // } // auto t2 = std::chrono::high_resolution_clock::now(); // auto time_span = std::chrono::duration_cast<std::chrono::duration<double> >(t2 - t1); // std::cout << "cpu old took " << time_span.count() << "[s]" << std::endl; // std::cout << di << std::endl; // di = 0; // t1 = std::chrono::high_resolution_clock::now(); // for(int rep = 0; rep < reps; ++rep) { // for(int bit_idx = 0; bit_idx < (1+8+64+64*8); ++bit_idx) { // int tmp = tree_data_idx2(tree, bit_idx, 1); // di += tmp; // } // } // t2 = std::chrono::high_resolution_clock::now(); // time_span = std::chrono::duration_cast<std::chrono::duration<double> >(t2 - t1); // std::cout << "cpu new took " << time_span.count() << "[s]" << std::endl; // std::cout << di << std::endl; // } // __global__ void kernel_correctness(const ot_tree_t* tree, int n_bit_ind) { // CUDA_KERNEL_LOOP(bit_idx, n_bit_ind) { // int di_gt = tree_data_idx(tree, bit_idx, 1); // int di2 = tree_data_idx2(tree, bit_idx, 1); // if(!tree_isset_bit(tree, bit_idx) && di2 != di_gt) { // printf("[ERROR_GPU] bit_idx=%d: %d should be %d\n", bit_idx, di2, di_gt); // } // } // } // void correctness_gpu(ot_tree_t* tree_h) { // std::cout << "---------------------- test correctness gpu --------------------" << std::endl; // std::cout << tree_bit_str(tree_h) << std::endl; // ot_tree_t* tree_d = host_to_device_malloc(tree_h, N_TREE_INTS); // int n_bit_ind = 1+8+64+64*8; // kernel_correctness<<<GET_BLOCKS(n_bit_ind), CUDA_NUM_THREADS>>>( // tree_d, n_bit_ind // ); // CUDA_POST_KERNEL_CHECK; // device_free(tree_d); // } // __global__ void kernel_speed1(const ot_tree_t* tree, int n_bit_ind) { // CUDA_KERNEL_LOOP(bit_idx, n_bit_ind) { // int di = tree_data_idx(tree, bit_idx, 1); // if(di > 1000000 ) { // printf("[ERROR_GPU] you summoned an evil demon\n"); // } // } // } // __global__ void kernel_speed2(const ot_tree_t* tree, int n_bit_ind) { // CUDA_KERNEL_LOOP(bit_idx, n_bit_ind) { // int di = tree_data_idx2(tree, bit_idx, 1); // if(di > 1000000 ) { // printf("[ERROR_GPU] you summoned an evil demon\n"); // } // } // } // void speed_gpu(ot_tree_t* tree_h) { // int reps = 100000; // int n_bit_ind = 1+8+64+64*8; // ot_tree_t* tree_d = host_to_device_malloc(tree_h, N_TREE_INTS); // kernel_speed1<<<GET_BLOCKS(n_bit_ind), CUDA_NUM_THREADS>>>(tree_d, n_bit_ind); // auto t1 = std::chrono::high_resolution_clock::now(); // for(int rep = 0; rep < reps; ++rep) { // kernel_speed1<<<GET_BLOCKS(n_bit_ind), CUDA_NUM_THREADS>>>(tree_d, n_bit_ind); // CUDA_POST_KERNEL_CHECK; // } // auto t2 = std::chrono::high_resolution_clock::now(); // auto time_span = std::chrono::duration_cast<std::chrono::duration<double> >(t2 - t1); // std::cout << "gpu old took " << time_span.count() << "[s]" << std::endl; // kernel_speed2<<<GET_BLOCKS(n_bit_ind), CUDA_NUM_THREADS>>>(tree_d, n_bit_ind); // t1 = std::chrono::high_resolution_clock::now(); // for(int rep = 0; rep < reps; ++rep) { // kernel_speed2<<<GET_BLOCKS(n_bit_ind), CUDA_NUM_THREADS>>>(tree_d, n_bit_ind); // CUDA_POST_KERNEL_CHECK; // } // t2 = std::chrono::high_resolution_clock::now(); // time_span = std::chrono::duration_cast<std::chrono::duration<double> >(t2 - t1); // std::cout << "gpu new took " << time_span.count() << "[s]" << std::endl; // device_free(tree_d); // } int main(int argc, char** argv) { // std::cout << "[IMPROVE] tree_data_idx" << std::endl; // ot_tree_t* tree = new ot_tree_t[N_TREE_INTS]; // //test set 1 // memset(tree, 0, N_TREE_INTS * sizeof(ot_tree_t)); // correctness_cpu(tree); // correctness_gpu(tree); // speed_cpu(tree); // speed_gpu(tree); // //test set 1 // memset(tree, 0, N_TREE_INTS * sizeof(ot_tree_t)); // tree_set_bit(tree, 0); // tree_set_bit(tree, 1); // tree_set_bit(tree, 2); // tree_set_bit(tree, 9); // tree_set_bit(tree, 10); // tree_set_bit(tree, 18); // correctness_cpu(tree); // correctness_gpu(tree); // speed_cpu(tree); // speed_gpu(tree); // //test set 2 // memset(tree, 0, N_TREE_INTS * sizeof(ot_tree_t)); // tree_set_bit(tree, 0); // tree_set_bit(tree, 1); // tree_set_bit(tree, 9); // tree_set_bit(tree, 10); // tree_set_bit(tree, 11); // tree_set_bit(tree, 4); // tree_set_bit(tree, 5); // tree_set_bit(tree, 8); // tree_set_bit(tree, 65); // tree_set_bit(tree, 66); // tree_set_bit(tree, 72); // correctness_cpu(tree); // correctness_gpu(tree); // speed_cpu(tree); // speed_gpu(tree); // //test set 3 // memset(tree, 0, N_TREE_INTS * sizeof(ot_tree_t)); // for(int idx = 0; idx < 73; ++idx) { tree_set_bit(tree, idx); } // correctness_cpu(tree); // correctness_gpu(tree); // speed_cpu(tree); // speed_gpu(tree); // delete[] tree; // std::cout << "[DONE]" << std::endl; return 0; }
fd7a436e74d9f359f20326e7a19cfb7f1ea4d9b9.cu
// Copyright (c) 2017, The OctNet authors // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of the <organization> nor the // names of its contributors may be used to endorse or promote products // derived from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL OCTNET AUTHORS BE LIABLE FOR ANY // DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include <iostream> #include <cstring> #include <chrono> #include <limits> #include <smmintrin.h> #include "octnet/gpu/gpu.h" #include "octnet/gpu/common.h" #include "octnet/cpu/cpu.h" #include "octnet/test/objects.h" // #ifdef __CUDA_ARCH__ // __host__ __device__ // #endif // OCTREE_FUNCTION // inline int tree_data_idx2(const ot_tree_t* tree, int bit_idx, ot_size_t feature_size) { // // int pa_idx = IMAX(0, tree_parent_bit_idx(bit_idx)); // // if(!tree_isset_bit(tree, pa_idx)) { // // bit_idx = pa_idx; // // pa_idx = tree_parent_bit_idx(pa_idx); // // } // // if(!tree_isset_bit(tree, pa_idx)) { // // bit_idx = pa_idx; // // pa_idx = tree_parent_bit_idx(pa_idx); // // } // // if(!tree_isset_bit(tree, pa_idx)) { // // return 0; // // } // int pa_idx = tree_parent_bit_idx(bit_idx); // int papa_idx = tree_parent_bit_idx(pa_idx); // int papapa_idx = tree_parent_bit_idx(papa_idx); // bit_idx = IMAX(bit_idx * tree_isset_bit(tree, pa_idx), IMAX(pa_idx * tree_isset_bit(tree, papa_idx), papa_idx * tree_isset_bit(tree, papapa_idx))); // if(bit_idx == 0) { // return 0; // } // pa_idx = IMAX(0, tree_parent_bit_idx(bit_idx)); // // int data_idx = tree_cnt0(tree, 0, IMIN(bit_idx, 73)); // // if(pa_idx > 1) { // // data_idx -= 8 * tree_cnt0(tree, 1, pa_idx); // // } // // if(bit_idx > 72) { // // data_idx += bit_idx - 73; // // } // int data_idx = tree_cnt1(tree, 0, pa_idx); // data_idx = data_idx * 8 + 1 // + (bit_idx-1)%8 // - (data_idx + tree_cnt1(tree, pa_idx, bit_idx)); // return data_idx * feature_size; // } // void correctness_cpu(ot_tree_t* tree) { // std::cout << "---------------------- test correctness cpu --------------------" << std::endl; // std::cout << tree_bit_str(tree) << std::endl; // for(int bit_idx = 0; bit_idx < (1+8+64+64*8); ++bit_idx) { // int di_gt = tree_data_idx(tree, bit_idx, 1); // int di2 = tree_data_idx2(tree, bit_idx, 1); // if(!tree_isset_bit(tree, bit_idx) && di2 != di_gt) { // std::cout << "[ERROR_CPU] bit_idx=" << bit_idx << ": " << di2 << " should be " << di_gt << std::endl; // } // } // } // void speed_cpu(ot_tree_t* tree) { // int reps = 100000; // int di = 0; // auto t1 = std::chrono::high_resolution_clock::now(); // for(int rep = 0; rep < reps; ++rep) { // for(int bit_idx = 0; bit_idx < (1+8+64+64*8); ++bit_idx) { // int tmp = tree_data_idx(tree, bit_idx, 1); // di += tmp; // } // } // auto t2 = std::chrono::high_resolution_clock::now(); // auto time_span = std::chrono::duration_cast<std::chrono::duration<double> >(t2 - t1); // std::cout << "cpu old took " << time_span.count() << "[s]" << std::endl; // std::cout << di << std::endl; // di = 0; // t1 = std::chrono::high_resolution_clock::now(); // for(int rep = 0; rep < reps; ++rep) { // for(int bit_idx = 0; bit_idx < (1+8+64+64*8); ++bit_idx) { // int tmp = tree_data_idx2(tree, bit_idx, 1); // di += tmp; // } // } // t2 = std::chrono::high_resolution_clock::now(); // time_span = std::chrono::duration_cast<std::chrono::duration<double> >(t2 - t1); // std::cout << "cpu new took " << time_span.count() << "[s]" << std::endl; // std::cout << di << std::endl; // } // __global__ void kernel_correctness(const ot_tree_t* tree, int n_bit_ind) { // CUDA_KERNEL_LOOP(bit_idx, n_bit_ind) { // int di_gt = tree_data_idx(tree, bit_idx, 1); // int di2 = tree_data_idx2(tree, bit_idx, 1); // if(!tree_isset_bit(tree, bit_idx) && di2 != di_gt) { // printf("[ERROR_GPU] bit_idx=%d: %d should be %d\n", bit_idx, di2, di_gt); // } // } // } // void correctness_gpu(ot_tree_t* tree_h) { // std::cout << "---------------------- test correctness gpu --------------------" << std::endl; // std::cout << tree_bit_str(tree_h) << std::endl; // ot_tree_t* tree_d = host_to_device_malloc(tree_h, N_TREE_INTS); // int n_bit_ind = 1+8+64+64*8; // kernel_correctness<<<GET_BLOCKS(n_bit_ind), CUDA_NUM_THREADS>>>( // tree_d, n_bit_ind // ); // CUDA_POST_KERNEL_CHECK; // device_free(tree_d); // } // __global__ void kernel_speed1(const ot_tree_t* tree, int n_bit_ind) { // CUDA_KERNEL_LOOP(bit_idx, n_bit_ind) { // int di = tree_data_idx(tree, bit_idx, 1); // if(di > 1000000 ) { // printf("[ERROR_GPU] you summoned an evil demon\n"); // } // } // } // __global__ void kernel_speed2(const ot_tree_t* tree, int n_bit_ind) { // CUDA_KERNEL_LOOP(bit_idx, n_bit_ind) { // int di = tree_data_idx2(tree, bit_idx, 1); // if(di > 1000000 ) { // printf("[ERROR_GPU] you summoned an evil demon\n"); // } // } // } // void speed_gpu(ot_tree_t* tree_h) { // int reps = 100000; // int n_bit_ind = 1+8+64+64*8; // ot_tree_t* tree_d = host_to_device_malloc(tree_h, N_TREE_INTS); // kernel_speed1<<<GET_BLOCKS(n_bit_ind), CUDA_NUM_THREADS>>>(tree_d, n_bit_ind); // auto t1 = std::chrono::high_resolution_clock::now(); // for(int rep = 0; rep < reps; ++rep) { // kernel_speed1<<<GET_BLOCKS(n_bit_ind), CUDA_NUM_THREADS>>>(tree_d, n_bit_ind); // CUDA_POST_KERNEL_CHECK; // } // auto t2 = std::chrono::high_resolution_clock::now(); // auto time_span = std::chrono::duration_cast<std::chrono::duration<double> >(t2 - t1); // std::cout << "gpu old took " << time_span.count() << "[s]" << std::endl; // kernel_speed2<<<GET_BLOCKS(n_bit_ind), CUDA_NUM_THREADS>>>(tree_d, n_bit_ind); // t1 = std::chrono::high_resolution_clock::now(); // for(int rep = 0; rep < reps; ++rep) { // kernel_speed2<<<GET_BLOCKS(n_bit_ind), CUDA_NUM_THREADS>>>(tree_d, n_bit_ind); // CUDA_POST_KERNEL_CHECK; // } // t2 = std::chrono::high_resolution_clock::now(); // time_span = std::chrono::duration_cast<std::chrono::duration<double> >(t2 - t1); // std::cout << "gpu new took " << time_span.count() << "[s]" << std::endl; // device_free(tree_d); // } int main(int argc, char** argv) { // std::cout << "[IMPROVE] tree_data_idx" << std::endl; // ot_tree_t* tree = new ot_tree_t[N_TREE_INTS]; // //test set 1 // memset(tree, 0, N_TREE_INTS * sizeof(ot_tree_t)); // correctness_cpu(tree); // correctness_gpu(tree); // speed_cpu(tree); // speed_gpu(tree); // //test set 1 // memset(tree, 0, N_TREE_INTS * sizeof(ot_tree_t)); // tree_set_bit(tree, 0); // tree_set_bit(tree, 1); // tree_set_bit(tree, 2); // tree_set_bit(tree, 9); // tree_set_bit(tree, 10); // tree_set_bit(tree, 18); // correctness_cpu(tree); // correctness_gpu(tree); // speed_cpu(tree); // speed_gpu(tree); // //test set 2 // memset(tree, 0, N_TREE_INTS * sizeof(ot_tree_t)); // tree_set_bit(tree, 0); // tree_set_bit(tree, 1); // tree_set_bit(tree, 9); // tree_set_bit(tree, 10); // tree_set_bit(tree, 11); // tree_set_bit(tree, 4); // tree_set_bit(tree, 5); // tree_set_bit(tree, 8); // tree_set_bit(tree, 65); // tree_set_bit(tree, 66); // tree_set_bit(tree, 72); // correctness_cpu(tree); // correctness_gpu(tree); // speed_cpu(tree); // speed_gpu(tree); // //test set 3 // memset(tree, 0, N_TREE_INTS * sizeof(ot_tree_t)); // for(int idx = 0; idx < 73; ++idx) { tree_set_bit(tree, idx); } // correctness_cpu(tree); // correctness_gpu(tree); // speed_cpu(tree); // speed_gpu(tree); // delete[] tree; // std::cout << "[DONE]" << std::endl; return 0; }
5df1fa1bbec71f278eb92b3a7d081f826ab5bce8.hip
// !!! This is a file automatically generated by hipify!!! // ### // ### // ### Practical Course: GPU Programming in Computer Vision // ### // ### // ### Technical University Munich, Computer Vision Group // ### Summer Semester 2017, September 11 - October 9 // ### #include <hip/hip_runtime.h> #include <iostream> using namespace std; // cuda error checking #define CUDA_CHECK cuda_check(__FILE__,__LINE__) void cuda_check(string file, int line) { hipError_t e = hipGetLastError(); if (e != hipSuccess) { cout << endl << file << ", line " << line << ": " << hipGetErrorString(e) << " (" << e << ")" << endl; exit(1); } } // Add vector on GPU __global__ void vecAdd(float *d_a, float *d_b, float *d_c, int n){ for (int i = 0; i < n; i++){ d_c[i] = d_a[i] + d_b[i]; } } int main(int argc, char **argv) { // alloc and init input arrays on host (CPU) int n = 20; float *a = new float[n]; float *b = new float[n]; float *c = new float[n]; size_t nbytes = (size_t)(n)*sizeof(float); for(int i=0; i<n; i++) { a[i] = i; b[i] = (i%5)+1; c[i] = 0; } // CPU computation for(int i=0; i<n; i++) c[i] = a[i] + b[i]; // print result cout << "CPU:"<<endl; for(int i=0; i<n; i++) cout << i << ": " << a[i] << " + " << b[i] << " = " << c[i] << endl; cout << endl; // init c for(int i=0; i<n; i++) c[i] = 0; //Memory allocation on GPU float *d_a = NULL; float *d_b = NULL; float *d_c = NULL; hipMalloc(&d_a, nbytes); hipMalloc(&d_b, nbytes); hipMalloc(&d_c, nbytes); hipMemcpy(d_a, a, nbytes, hipMemcpyHostToDevice); hipMemcpy(d_b, b, nbytes, hipMemcpyHostToDevice); hipMemcpy(d_c, c, nbytes, hipMemcpyHostToDevice); // Launch kernel dim3 block = dim3(128, 1, 1); dim3 grid = dim3((n+block.x-1)/block.x, 1, 1); // Add vectors hipLaunchKernelGGL(( vecAdd) , dim3(grid), dim3(block), 0, 0, d_a, d_b, d_c, n); // Copy back to host hipMemcpy(a, d_a, nbytes, hipMemcpyDeviceToHost); CUDA_CHECK; hipMemcpy(b, d_b, nbytes, hipMemcpyDeviceToHost); CUDA_CHECK; hipMemcpy(c, d_c, nbytes, hipMemcpyDeviceToHost); CUDA_CHECK; // Free cuda memory hipFree(d_a); CUDA_CHECK; hipFree(d_b); CUDA_CHECK; hipFree(d_c); CUDA_CHECK; // print result cout << "GPU:"<<endl; for(int i=0; i<n; i++) cout << i << ": " << a[i] << " + " << b[i] << " = " << c[i] << endl; cout << endl; // free CPU arrays delete[] a; delete[] b; delete[] c; }
5df1fa1bbec71f278eb92b3a7d081f826ab5bce8.cu
// ### // ### // ### Practical Course: GPU Programming in Computer Vision // ### // ### // ### Technical University Munich, Computer Vision Group // ### Summer Semester 2017, September 11 - October 9 // ### #include <cuda_runtime.h> #include <iostream> using namespace std; // cuda error checking #define CUDA_CHECK cuda_check(__FILE__,__LINE__) void cuda_check(string file, int line) { cudaError_t e = cudaGetLastError(); if (e != cudaSuccess) { cout << endl << file << ", line " << line << ": " << cudaGetErrorString(e) << " (" << e << ")" << endl; exit(1); } } // Add vector on GPU __global__ void vecAdd(float *d_a, float *d_b, float *d_c, int n){ for (int i = 0; i < n; i++){ d_c[i] = d_a[i] + d_b[i]; } } int main(int argc, char **argv) { // alloc and init input arrays on host (CPU) int n = 20; float *a = new float[n]; float *b = new float[n]; float *c = new float[n]; size_t nbytes = (size_t)(n)*sizeof(float); for(int i=0; i<n; i++) { a[i] = i; b[i] = (i%5)+1; c[i] = 0; } // CPU computation for(int i=0; i<n; i++) c[i] = a[i] + b[i]; // print result cout << "CPU:"<<endl; for(int i=0; i<n; i++) cout << i << ": " << a[i] << " + " << b[i] << " = " << c[i] << endl; cout << endl; // init c for(int i=0; i<n; i++) c[i] = 0; //Memory allocation on GPU float *d_a = NULL; float *d_b = NULL; float *d_c = NULL; cudaMalloc(&d_a, nbytes); cudaMalloc(&d_b, nbytes); cudaMalloc(&d_c, nbytes); cudaMemcpy(d_a, a, nbytes, cudaMemcpyHostToDevice); cudaMemcpy(d_b, b, nbytes, cudaMemcpyHostToDevice); cudaMemcpy(d_c, c, nbytes, cudaMemcpyHostToDevice); // Launch kernel dim3 block = dim3(128, 1, 1); dim3 grid = dim3((n+block.x-1)/block.x, 1, 1); // Add vectors vecAdd <<<grid, block>>> (d_a, d_b, d_c, n); // Copy back to host cudaMemcpy(a, d_a, nbytes, cudaMemcpyDeviceToHost); CUDA_CHECK; cudaMemcpy(b, d_b, nbytes, cudaMemcpyDeviceToHost); CUDA_CHECK; cudaMemcpy(c, d_c, nbytes, cudaMemcpyDeviceToHost); CUDA_CHECK; // Free cuda memory cudaFree(d_a); CUDA_CHECK; cudaFree(d_b); CUDA_CHECK; cudaFree(d_c); CUDA_CHECK; // print result cout << "GPU:"<<endl; for(int i=0; i<n; i++) cout << i << ": " << a[i] << " + " << b[i] << " = " << c[i] << endl; cout << endl; // free CPU arrays delete[] a; delete[] b; delete[] c; }
b11440bbdc464d622c025247053de2cea6b070b0.hip
// !!! This is a file automatically generated by hipify!!! // $ nvcc -c -std=c++11 --expt-extended-lambda -I../.. test.cu -o test.o #include <cassert> #include <thrust/device_vector.h> #include "daxpy.hpp" void test(size_t n) { // create resources hipStream_t stream; if(hipError_t error = hipStreamCreate(&stream)) { throw std::runtime_error("test: CUDA error after hipStreamCreate: " + std::string(hipGetErrorString(error))); } thrust::device_vector<double> x(n, 1); thrust::device_vector<double> y(n, 2); double a = 2; graph_executor ex(stream); any_sender task = daxpy(ex, n, a, x.data().get(), y.data().get()); task.submit(); task.sync_wait(); if(hipError_t error = hipStreamDestroy(stream)) { throw std::runtime_error("test: CUDA error after hipStreamDestroy: " + std::string(hipGetErrorString(error))); } thrust::device_vector<double> reference(n, 4); assert(reference == y); }
b11440bbdc464d622c025247053de2cea6b070b0.cu
// $ nvcc -c -std=c++11 --expt-extended-lambda -I../.. test.cu -o test.o #include <cassert> #include <thrust/device_vector.h> #include "daxpy.hpp" void test(size_t n) { // create resources cudaStream_t stream; if(cudaError_t error = cudaStreamCreate(&stream)) { throw std::runtime_error("test: CUDA error after cudaStreamCreate: " + std::string(cudaGetErrorString(error))); } thrust::device_vector<double> x(n, 1); thrust::device_vector<double> y(n, 2); double a = 2; graph_executor ex(stream); any_sender task = daxpy(ex, n, a, x.data().get(), y.data().get()); task.submit(); task.sync_wait(); if(cudaError_t error = cudaStreamDestroy(stream)) { throw std::runtime_error("test: CUDA error after cudaStreamDestroy: " + std::string(cudaGetErrorString(error))); } thrust::device_vector<double> reference(n, 4); assert(reference == y); }
12389cc0597d29c175029b8957db65799ab2fd36.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "generic/MultiLabelMarginCriterion.cu" #else // TODO: improve error messages void THNN_(MultiLabelMarginCriterion_updateOutput)( THCState *state, THCTensor *input, THCIndexTensor *target, THCTensor *output, THCTensor *istarget, int64_t reduction) { input = THCTensor_(newContiguous)(state, input); target = THCIndexTensor_(newContiguous)(state, target); istarget = THCTensor_(newContiguous)(state, istarget); THCTensor_(resizeAs)(state, istarget, input); if(input->dim() == 1) { int dim = input->size(0); THArgCheck(!target->is_empty() && (target->dim() == 1) && (target->size(0) == dim), 3, "inconsistent target size"); THCTensor_(resize1d)(state, output, 1); dim3 blocks(1); dim3 threads(MULTILABELMARGIN_THREADS); hipLaunchKernelGGL(( cunn_MultiLabelMarginCriterion_updateOutput_kernel<scalar_t, accreal>) , dim3(blocks), dim3(threads), 0, THCState_getCurrentStream(state), THCTensor_(data)(state, output), THCTensor_(data)(state, input), THCIndexTensor_(data)(state, target), THCTensor_(data)(state, istarget), 1, dim, reduction == Reduction::Mean ); THCudaCheck(hipGetLastError()); } else if(input->dim() == 2) { int nframe = input->size(0); int dim = input->size(1); THArgCheck(!target->is_empty() && (target->dim() == 2) && (target->size(0) == nframe) && (target->size(1) == dim), 3, "inconsistent target size"); dim3 blocks(input->size(0)); dim3 threads(MULTILABELMARGIN_THREADS); if (reduction != Reduction::None) { THCTensor *output_tmp = THCTensor_(newWithSize1d)(state, input->size(0)); THCTensor_(resize1d)(state, output, 1); hipLaunchKernelGGL(( cunn_MultiLabelMarginCriterion_updateOutput_kernel<scalar_t, accreal>) , dim3(blocks), dim3(threads), 0, THCState_getCurrentStream(state), THCTensor_(data)(state, output_tmp), THCTensor_(data)(state, input), THCIndexTensor_(data)(state, target), THCTensor_(data)(state, istarget), nframe, dim, reduction == Reduction::Mean ); THCudaCheck(hipGetLastError()); THCTensor_(set1d)(state, output, 0, ScalarConvert<accreal, scalar_t>::to(THCTensor_(sumall)(state, output_tmp))); THCTensor_(free)(state, output_tmp); } else { THCTensor_(resize1d)(state, output, input->size(0)); hipLaunchKernelGGL(( cunn_MultiLabelMarginCriterion_updateOutput_kernel<scalar_t, accreal>) , dim3(blocks), dim3(threads), 0, THCState_getCurrentStream(state), THCTensor_(data)(state, output), THCTensor_(data)(state, input), THCIndexTensor_(data)(state, target), THCTensor_(data)(state, istarget), nframe, dim, false ); THCudaCheck(hipGetLastError()); } } else AT_ERROR("non-empty vector or matrix expected, got size: ", input->sizes()); THCTensor_(free)(state, input); THCIndexTensor_(free)(state, target); THCTensor_(free)(state, istarget); } void THNN_(MultiLabelMarginCriterion_updateGradInput)( THCState *state, THCTensor *input, THCIndexTensor *target, THCTensor *gradOutput, THCTensor *gradInput, THCTensor *istarget, int64_t reduction) { input = THCTensor_(newContiguous)(state, input); target = THCIndexTensor_(newContiguous)(state, target); istarget = THCTensor_(newContiguous)(state, istarget); gradOutput = THCTensor_(newContiguous)(state, gradOutput); THCTensor_(resizeAs)(state, gradInput, input); if(gradInput->dim() == 1) { int dim = gradInput->size(0); THArgCheck(!target->is_empty() && (target->dim() == 1) && (target->size(0) == dim), 3, "inconsistent target size"); THArgCheck(!istarget->is_empty() && (istarget->dim() == 1) && (istarget->size(0) == dim), 3, "inconsistent isTarget size"); dim3 blocks(1); dim3 threads(MULTILABELMARGIN_THREADS); hipLaunchKernelGGL(( cunn_MultiLabelMarginCriterion_updateGradInput_kernel<scalar_t, accreal>) , dim3(blocks), dim3(threads), 0, THCState_getCurrentStream(state), THCTensor_(data)(state, gradInput), THCTensor_(data)(state, gradOutput), THCTensor_(data)(state, input), THCIndexTensor_(data)(state, target), THCTensor_(data)(state, istarget), 1, gradInput->size(0), reduction == Reduction::Mean, reduction != Reduction::None); } else if(gradInput->dim() == 2) { int nframe = gradInput->size(0); int dim = gradInput->size(1); THArgCheck(!target->is_empty() && (target->dim() == 2) && (target->size(0) == nframe) && (target->size(1) == dim), 3, "inconsistent target size"); THArgCheck(!istarget->is_empty() && (istarget->dim() == 2) && (istarget->size(0) == nframe) && (istarget->size(1) == dim), 3, "inconsistent isTarget size"); dim3 blocks(gradInput->size(0)); dim3 threads(MULTILABELMARGIN_THREADS); hipLaunchKernelGGL(( cunn_MultiLabelMarginCriterion_updateGradInput_kernel<scalar_t, accreal>) , dim3(blocks), dim3(threads), 0, THCState_getCurrentStream(state), THCTensor_(data)(state, gradInput), THCTensor_(data)(state, gradOutput), THCTensor_(data)(state, input), THCIndexTensor_(data)(state, target), THCTensor_(data)(state, istarget), gradInput->size(0), gradInput->size(1), reduction == Reduction::Mean, reduction != Reduction::None); } else AT_ERROR("non-empty vector or matrix expected, got size: ", gradInput->sizes()); THCudaCheck(hipGetLastError()); THCTensor_(free)(state, input); THCIndexTensor_(free)(state, target); THCTensor_(free)(state, istarget); THCTensor_(free)(state, gradOutput); } #endif
12389cc0597d29c175029b8957db65799ab2fd36.cu
#ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "generic/MultiLabelMarginCriterion.cu" #else // TODO: improve error messages void THNN_(MultiLabelMarginCriterion_updateOutput)( THCState *state, THCTensor *input, THCIndexTensor *target, THCTensor *output, THCTensor *istarget, int64_t reduction) { input = THCTensor_(newContiguous)(state, input); target = THCIndexTensor_(newContiguous)(state, target); istarget = THCTensor_(newContiguous)(state, istarget); THCTensor_(resizeAs)(state, istarget, input); if(input->dim() == 1) { int dim = input->size(0); THArgCheck(!target->is_empty() && (target->dim() == 1) && (target->size(0) == dim), 3, "inconsistent target size"); THCTensor_(resize1d)(state, output, 1); dim3 blocks(1); dim3 threads(MULTILABELMARGIN_THREADS); cunn_MultiLabelMarginCriterion_updateOutput_kernel<scalar_t, accreal> <<<blocks, threads, 0, THCState_getCurrentStream(state)>>>( THCTensor_(data)(state, output), THCTensor_(data)(state, input), THCIndexTensor_(data)(state, target), THCTensor_(data)(state, istarget), 1, dim, reduction == Reduction::Mean ); THCudaCheck(cudaGetLastError()); } else if(input->dim() == 2) { int nframe = input->size(0); int dim = input->size(1); THArgCheck(!target->is_empty() && (target->dim() == 2) && (target->size(0) == nframe) && (target->size(1) == dim), 3, "inconsistent target size"); dim3 blocks(input->size(0)); dim3 threads(MULTILABELMARGIN_THREADS); if (reduction != Reduction::None) { THCTensor *output_tmp = THCTensor_(newWithSize1d)(state, input->size(0)); THCTensor_(resize1d)(state, output, 1); cunn_MultiLabelMarginCriterion_updateOutput_kernel<scalar_t, accreal> <<<blocks, threads, 0, THCState_getCurrentStream(state)>>>( THCTensor_(data)(state, output_tmp), THCTensor_(data)(state, input), THCIndexTensor_(data)(state, target), THCTensor_(data)(state, istarget), nframe, dim, reduction == Reduction::Mean ); THCudaCheck(cudaGetLastError()); THCTensor_(set1d)(state, output, 0, ScalarConvert<accreal, scalar_t>::to(THCTensor_(sumall)(state, output_tmp))); THCTensor_(free)(state, output_tmp); } else { THCTensor_(resize1d)(state, output, input->size(0)); cunn_MultiLabelMarginCriterion_updateOutput_kernel<scalar_t, accreal> <<<blocks, threads, 0, THCState_getCurrentStream(state)>>>( THCTensor_(data)(state, output), THCTensor_(data)(state, input), THCIndexTensor_(data)(state, target), THCTensor_(data)(state, istarget), nframe, dim, false ); THCudaCheck(cudaGetLastError()); } } else AT_ERROR("non-empty vector or matrix expected, got size: ", input->sizes()); THCTensor_(free)(state, input); THCIndexTensor_(free)(state, target); THCTensor_(free)(state, istarget); } void THNN_(MultiLabelMarginCriterion_updateGradInput)( THCState *state, THCTensor *input, THCIndexTensor *target, THCTensor *gradOutput, THCTensor *gradInput, THCTensor *istarget, int64_t reduction) { input = THCTensor_(newContiguous)(state, input); target = THCIndexTensor_(newContiguous)(state, target); istarget = THCTensor_(newContiguous)(state, istarget); gradOutput = THCTensor_(newContiguous)(state, gradOutput); THCTensor_(resizeAs)(state, gradInput, input); if(gradInput->dim() == 1) { int dim = gradInput->size(0); THArgCheck(!target->is_empty() && (target->dim() == 1) && (target->size(0) == dim), 3, "inconsistent target size"); THArgCheck(!istarget->is_empty() && (istarget->dim() == 1) && (istarget->size(0) == dim), 3, "inconsistent isTarget size"); dim3 blocks(1); dim3 threads(MULTILABELMARGIN_THREADS); cunn_MultiLabelMarginCriterion_updateGradInput_kernel<scalar_t, accreal> <<<blocks, threads, 0, THCState_getCurrentStream(state)>>>( THCTensor_(data)(state, gradInput), THCTensor_(data)(state, gradOutput), THCTensor_(data)(state, input), THCIndexTensor_(data)(state, target), THCTensor_(data)(state, istarget), 1, gradInput->size(0), reduction == Reduction::Mean, reduction != Reduction::None); } else if(gradInput->dim() == 2) { int nframe = gradInput->size(0); int dim = gradInput->size(1); THArgCheck(!target->is_empty() && (target->dim() == 2) && (target->size(0) == nframe) && (target->size(1) == dim), 3, "inconsistent target size"); THArgCheck(!istarget->is_empty() && (istarget->dim() == 2) && (istarget->size(0) == nframe) && (istarget->size(1) == dim), 3, "inconsistent isTarget size"); dim3 blocks(gradInput->size(0)); dim3 threads(MULTILABELMARGIN_THREADS); cunn_MultiLabelMarginCriterion_updateGradInput_kernel<scalar_t, accreal> <<<blocks, threads, 0, THCState_getCurrentStream(state)>>>( THCTensor_(data)(state, gradInput), THCTensor_(data)(state, gradOutput), THCTensor_(data)(state, input), THCIndexTensor_(data)(state, target), THCTensor_(data)(state, istarget), gradInput->size(0), gradInput->size(1), reduction == Reduction::Mean, reduction != Reduction::None); } else AT_ERROR("non-empty vector or matrix expected, got size: ", gradInput->sizes()); THCudaCheck(cudaGetLastError()); THCTensor_(free)(state, input); THCIndexTensor_(free)(state, target); THCTensor_(free)(state, istarget); THCTensor_(free)(state, gradOutput); } #endif
90aaae71803df348d8e3cfbcf5a394943a8940c7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*------------------------------------------------------------------------ CUDA C extention for Python Provides functionality for PET image reconstruction. Copyrights: 2018-2020 Pawel Markiewicz 2020 Casper da Costa-Luis ------------------------------------------------------------------------*/ #include "recon.h" #include <cassert> #define FLOAT_WITHIN_EPS(x) (-0.000001f < x && x < 0.000001f) /// z: how many Z-slices to add __global__ void pad(float *dst, float *src, const int z) { int i = threadIdx.x + blockDim.x * blockIdx.x; if (i >= SZ_IMX) return; int j = threadIdx.y + blockDim.y * blockIdx.y; if (j >= SZ_IMY) return; src += i * SZ_IMY * SZ_IMZ + j * SZ_IMZ; dst += i * SZ_IMY * (SZ_IMZ + z) + j * (SZ_IMZ + z); for (int k = 0; k < SZ_IMZ; ++k) dst[k] = src[k]; } void d_pad(float *dst, float *src, const int z = COLUMNS_BLOCKDIM_X - SZ_IMZ % COLUMNS_BLOCKDIM_X) { HANDLE_ERROR(hipMemset(dst, 0, SZ_IMX * SZ_IMY * (SZ_IMZ + z) * sizeof(float))); dim3 BpG((SZ_IMX + NIPET_CU_THREADS / 32 - 1) / (NIPET_CU_THREADS / 32), (SZ_IMY + 31) / 32); dim3 TpB(NIPET_CU_THREADS / 32, 32); hipLaunchKernelGGL(( pad), dim3(BpG), dim3(TpB), 0, 0, dst, src, z); } /// z: how many Z-slices to remove __global__ void unpad(float *dst, float *src, const int z) { int i = threadIdx.x + blockDim.x * blockIdx.x; if (i >= SZ_IMX) return; int j = threadIdx.y + blockDim.y * blockIdx.y; if (j >= SZ_IMY) return; dst += i * SZ_IMY * SZ_IMZ + j * SZ_IMZ; src += i * SZ_IMY * (SZ_IMZ + z) + j * (SZ_IMZ + z); for (int k = 0; k < SZ_IMZ; ++k) dst[k] = src[k]; } void d_unpad(float *dst, float *src, const int z = COLUMNS_BLOCKDIM_X - SZ_IMZ % COLUMNS_BLOCKDIM_X) { dim3 BpG((SZ_IMX + NIPET_CU_THREADS / 32 - 1) / (NIPET_CU_THREADS / 32), (SZ_IMY + 31) / 32); dim3 TpB(NIPET_CU_THREADS / 32, 32); hipLaunchKernelGGL(( unpad), dim3(BpG), dim3(TpB), 0, 0, dst, src, z); } /** separable convolution */ /// Convolution kernel array __constant__ float c_Kernel[3 * KERNEL_LENGTH]; void setConvolutionKernel(float *krnl) { // krnl: separable three kernels for x, y and z hipMemcpyToSymbol(c_Kernel, krnl, 3 * KERNEL_LENGTH * sizeof(float)); } /// sigma: Gaussian sigma void setKernelGaussian(float sigma) { float knlRM[KERNEL_LENGTH * 3]; const double tmpE = -1.0 / (2 * sigma * sigma); for (int i = 0; i < KERNEL_LENGTH; ++i) knlRM[i] = (float)exp(tmpE * pow(RSZ_PSF_KRNL - i, 2)); // normalise double knlSum = 0; for (size_t i = 0; i < KERNEL_LENGTH; ++i) knlSum += knlRM[i]; for (size_t i = 0; i < KERNEL_LENGTH; ++i) { knlRM[i] /= knlSum; // also fill in other dimensions knlRM[i + KERNEL_LENGTH] = knlRM[i]; knlRM[i + KERNEL_LENGTH * 2] = knlRM[i]; } setConvolutionKernel(knlRM); } /// Row convolution filter __global__ void cnv_rows(float *d_Dst, float *d_Src, int imageW, int imageH, int pitch) { __shared__ float s_Data[ROWS_BLOCKDIM_Y] [(ROWS_RESULT_STEPS + 2 * ROWS_HALO_STEPS) * ROWS_BLOCKDIM_X]; // Offset to the left halo edge const int baseX = (blockIdx.x * ROWS_RESULT_STEPS - ROWS_HALO_STEPS) * ROWS_BLOCKDIM_X + threadIdx.x; const int baseY = blockIdx.y * ROWS_BLOCKDIM_Y + threadIdx.y; d_Src += baseY * pitch + baseX; d_Dst += baseY * pitch + baseX; // Load main data #pragma unroll for (int i = ROWS_HALO_STEPS; i < ROWS_HALO_STEPS + ROWS_RESULT_STEPS; i++) { s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = d_Src[i * ROWS_BLOCKDIM_X]; } // Load left halo #pragma unroll for (int i = 0; i < ROWS_HALO_STEPS; i++) { s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = (baseX >= -i * ROWS_BLOCKDIM_X) ? d_Src[i * ROWS_BLOCKDIM_X] : 0; } // Load right halo #pragma unroll for (int i = ROWS_HALO_STEPS + ROWS_RESULT_STEPS; i < ROWS_HALO_STEPS + ROWS_RESULT_STEPS + ROWS_HALO_STEPS; i++) { s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = (imageW - baseX > i * ROWS_BLOCKDIM_X) ? d_Src[i * ROWS_BLOCKDIM_X] : 0; } // Compute and store results __syncthreads(); #pragma unroll for (int i = ROWS_HALO_STEPS; i < ROWS_HALO_STEPS + ROWS_RESULT_STEPS; i++) { float sum = 0; #pragma unroll for (int j = -RSZ_PSF_KRNL; j <= RSZ_PSF_KRNL; j++) { sum += c_Kernel[RSZ_PSF_KRNL - j] * s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X + j]; } d_Dst[i * ROWS_BLOCKDIM_X] = sum; } } /// Column convolution filter __global__ void cnv_columns(float *d_Dst, float *d_Src, int imageW, int imageH, int pitch, int offKrnl // kernel offset for asymmetric kernels // x, y, z (still the same dims though) ) { __shared__ float s_Data[COLUMNS_BLOCKDIM_X] [(COLUMNS_RESULT_STEPS + 2 * COLUMNS_HALO_STEPS) * COLUMNS_BLOCKDIM_Y + 1]; // Offset to the upper halo edge const int baseX = blockIdx.x * COLUMNS_BLOCKDIM_X + threadIdx.x; const int baseY = (blockIdx.y * COLUMNS_RESULT_STEPS - COLUMNS_HALO_STEPS) * COLUMNS_BLOCKDIM_Y + threadIdx.y; d_Src += baseY * pitch + baseX; d_Dst += baseY * pitch + baseX; // Main data #pragma unroll for (int i = COLUMNS_HALO_STEPS; i < COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS; i++) { s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y] = d_Src[i * COLUMNS_BLOCKDIM_Y * pitch]; } // Upper halo #pragma unroll for (int i = 0; i < COLUMNS_HALO_STEPS; i++) { s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y] = (baseY >= -i * COLUMNS_BLOCKDIM_Y) ? d_Src[i * COLUMNS_BLOCKDIM_Y * pitch] : 0; } // Lower halo #pragma unroll for (int i = COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS; i < COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS + COLUMNS_HALO_STEPS; i++) { s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y] = (imageH - baseY > i * COLUMNS_BLOCKDIM_Y) ? d_Src[i * COLUMNS_BLOCKDIM_Y * pitch] : 0; } // Compute and store results __syncthreads(); #pragma unroll for (int i = COLUMNS_HALO_STEPS; i < COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS; i++) { float sum = 0; #pragma unroll for (int j = -RSZ_PSF_KRNL; j <= RSZ_PSF_KRNL; j++) { sum += c_Kernel[offKrnl + RSZ_PSF_KRNL - j] * s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y + j]; } d_Dst[i * COLUMNS_BLOCKDIM_Y * pitch] = sum; } } /// d_buff: temporary image buffer void d_conv(float *d_buff, float *d_imgout, float *d_imgint, int Nvk, int Nvj, int Nvi) { assert(d_imgout != d_imgint); assert(ROWS_BLOCKDIM_X * ROWS_HALO_STEPS >= RSZ_PSF_KRNL); assert(Nvk % (ROWS_RESULT_STEPS * ROWS_BLOCKDIM_X) == 0); assert(Nvj % ROWS_BLOCKDIM_Y == 0); assert(COLUMNS_BLOCKDIM_Y * COLUMNS_HALO_STEPS >= RSZ_PSF_KRNL); assert(Nvk % COLUMNS_BLOCKDIM_X == 0); assert(Nvj % (COLUMNS_RESULT_STEPS * COLUMNS_BLOCKDIM_Y) == 0); assert(Nvi % COLUMNS_BLOCKDIM_X == 0); HANDLE_ERROR(hipMemset(d_imgout, 0, Nvk * Nvj * Nvi * sizeof(float))); // perform smoothing for (int k = 0; k < Nvk; k++) { //------ ROWS ------- dim3 blocks(Nvi / (ROWS_RESULT_STEPS * ROWS_BLOCKDIM_X), Nvj / ROWS_BLOCKDIM_Y); dim3 threads(ROWS_BLOCKDIM_X, ROWS_BLOCKDIM_Y); hipLaunchKernelGGL(( cnv_rows), dim3(blocks), dim3(threads), 0, 0, d_imgout + k * Nvi * Nvj, d_imgint + k * Nvi * Nvj, Nvi, Nvj, Nvi); HANDLE_ERROR(hipGetLastError()); //----- COLUMNS ---- dim3 blocks2(Nvi / COLUMNS_BLOCKDIM_X, Nvj / (COLUMNS_RESULT_STEPS * COLUMNS_BLOCKDIM_Y)); dim3 threads2(COLUMNS_BLOCKDIM_X, COLUMNS_BLOCKDIM_Y); hipLaunchKernelGGL(( cnv_columns), dim3(blocks2), dim3(threads2), 0, 0, d_buff + k * Nvi * Nvj, d_imgout + k * Nvi * Nvj, Nvi, Nvj, Nvi, KERNEL_LENGTH); HANDLE_ERROR(hipGetLastError()); } //----- THIRD DIM ---- for (int j = 0; j < Nvj; j++) { dim3 blocks3(Nvi / COLUMNS_BLOCKDIM_X, Nvk / (COLUMNS_RESULT_STEPS * COLUMNS_BLOCKDIM_Y)); dim3 threads3(COLUMNS_BLOCKDIM_X, COLUMNS_BLOCKDIM_Y); hipLaunchKernelGGL(( cnv_columns), dim3(blocks3), dim3(threads3), 0, 0, d_imgout + j * Nvi, d_buff + j * Nvi, Nvi, Nvk, Nvi * Nvj, 2 * KERNEL_LENGTH); HANDLE_ERROR(hipGetLastError()); } } /** end of separable convolution */ //- - - - - - - - - - - - - - - - - - - - - - - - - - - - - // Element-wise multiplication __global__ void elmult(float *inA, float *inB, int length) { int idx = threadIdx.x + blockDim.x * blockIdx.x; if (idx < length) inA[idx] *= inB[idx]; } void d_elmult(float *d_inA, float *d_inB, int length) { dim3 BpG((length + NIPET_CU_THREADS - 1) / NIPET_CU_THREADS, 1, 1); dim3 TpB(NIPET_CU_THREADS, 1, 1); hipLaunchKernelGGL(( elmult), dim3(BpG), dim3(TpB), 0, 0, d_inA, d_inB, length); } //- - - - - - - - - - - - - - - - - - - - - - - - - - - - - //- - - - - - - - - - - - - - - - - - - - - - - - - - - - - // Element-wise division with result stored in first input variable __global__ void eldiv0(float *inA, float *inB, int length) { int idx = threadIdx.x + blockDim.x * blockIdx.x; if (idx >= length) return; if (FLOAT_WITHIN_EPS(inB[idx])) inA[idx] = 0; else inA[idx] /= inB[idx]; } void d_eldiv(float *d_inA, float *d_inB, int length) { dim3 BpG((length + NIPET_CU_THREADS - 1) / NIPET_CU_THREADS, 1, 1); dim3 TpB(NIPET_CU_THREADS, 1, 1); hipLaunchKernelGGL(( eldiv0), dim3(BpG), dim3(TpB), 0, 0, d_inA, d_inB, length); } //- - - - - - - - - - - - - - - - - - - - - - - - - - - - - //- - - - - - - - - - - - - - - - - - - - - - - - - - - - - __global__ void sneldiv(float *inA, unsigned short *inB, int *sub, int Nprj, int snno) { int idz = threadIdx.x + blockDim.x * blockIdx.x; if (!(blockIdx.y < Nprj && idz < snno)) return; // inA > only active bins of the subset // inB > all sinogram bins float b = (float)inB[snno * sub[blockIdx.y] + idz]; if (FLOAT_WITHIN_EPS(inA[snno * blockIdx.y + idz])) b = 0; else b /= inA[snno * blockIdx.y + idz]; // sub[blockIdx.y] inA[snno * blockIdx.y + idz] = b; // sub[blockIdx.y] } void d_sneldiv(float *d_inA, unsigned short *d_inB, int *d_sub, int Nprj, int snno) { dim3 BpG((snno + NIPET_CU_THREADS - 1) / NIPET_CU_THREADS, Nprj, 1); dim3 TpB(NIPET_CU_THREADS, 1, 1); hipLaunchKernelGGL(( sneldiv), dim3(BpG), dim3(TpB), 0, 0, d_inA, d_inB, d_sub, Nprj, snno); } //- - - - - - - - - - - - - - - - - - - - - - - - - - - - - //- - - - - - - - - - - - - - - - - - - - - - - - - - - - - __global__ void sneladd(float *inA, float *inB, int *sub, int Nprj, int snno) { int idz = threadIdx.x + blockDim.x * blockIdx.x; if (blockIdx.y < Nprj && idz < snno) inA[snno * blockIdx.y + idz] += inB[snno * sub[blockIdx.y] + idz]; // sub[blockIdx.y] } void d_sneladd(float *d_inA, float *d_inB, int *d_sub, int Nprj, int snno) { dim3 BpG((snno + NIPET_CU_THREADS - 1) / NIPET_CU_THREADS, Nprj, 1); dim3 TpB(NIPET_CU_THREADS, 1, 1); hipLaunchKernelGGL(( sneladd), dim3(BpG), dim3(TpB), 0, 0, d_inA, d_inB, d_sub, Nprj, snno); } //- - - - - - - - - - - - - - - - - - - - - - - - - - - - - //- - - - - - - - - - - - - - - - - - - - - - - - - - - - - __global__ void eladd(float *inA, float *inB, int length) { int idx = threadIdx.x + blockDim.x * blockIdx.x; if (idx < length) inA[idx] += inB[idx]; } void d_eladd(float *d_inA, float *d_inB, int length) { dim3 BpG((length + NIPET_CU_THREADS - 1) / NIPET_CU_THREADS, 1, 1); dim3 TpB(NIPET_CU_THREADS, 1, 1); hipLaunchKernelGGL(( eladd), dim3(BpG), dim3(TpB), 0, 0, d_inA, d_inB, length); } //- - - - - - - - - - - - - - - - - - - - - - - - - - - - - //- - - - - - - - - - - - - - - - - - - - - - - - - - - - - __global__ void elmsk(float *inA, float *inB, bool *msk, int length) { int idx = threadIdx.x + blockDim.x * blockIdx.x; if (idx < length) { if (msk[idx]) inA[idx] *= inB[idx]; else inA[idx] = 0; } } void d_elmsk(float *d_inA, float *d_inB, bool *d_msk, int length) { dim3 BpG((length + NIPET_CU_THREADS - 1) / NIPET_CU_THREADS, 1, 1); dim3 TpB(NIPET_CU_THREADS, 1, 1); hipLaunchKernelGGL(( elmsk), dim3(BpG), dim3(TpB), 0, 0, d_inA, d_inB, d_msk, length); } //- - - - - - - - - - - - - - - - - - - - - - - - - - - - - void osem(float *imgout, bool *rncmsk, unsigned short *psng, float *rsng, float *ssng, float *nsng, float *asng, int *subs, float *sensimg, float *krnl, float *li2rng, short *li2sn, char *li2nos, short *s2c, float *crs, int Nsub, int Nprj, int N0crs, Cnst Cnt) { int dev_id; hipGetDevice(&dev_id); if (Cnt.LOG <= LOGDEBUG) printf("i> using CUDA device #%d\n", dev_id); //--- TRANSAXIAL COMPONENT float4 *d_crs; HANDLE_ERROR(hipMalloc(&d_crs, N0crs * sizeof(float4))); HANDLE_ERROR(hipMemcpy(d_crs, crs, N0crs * sizeof(float4), hipMemcpyHostToDevice)); short2 *d_s2c; HANDLE_ERROR(hipMalloc(&d_s2c, AW * sizeof(short2))); HANDLE_ERROR(hipMemcpy(d_s2c, s2c, AW * sizeof(short2), hipMemcpyHostToDevice)); float *d_tt; HANDLE_ERROR(hipMalloc(&d_tt, N_TT * AW * sizeof(float))); unsigned char *d_tv; HANDLE_ERROR(hipMalloc(&d_tv, N_TV * AW * sizeof(unsigned char))); HANDLE_ERROR(hipMemset(d_tv, 0, N_TV * AW * sizeof(unsigned char))); //------------------------------------------------- gpu_siddon_tx(d_crs, d_s2c, d_tt, d_tv); //------------------------------------------------- // array of subset projection bins int *d_subs; HANDLE_ERROR(hipMalloc(&d_subs, Nsub * Nprj * sizeof(int))); HANDLE_ERROR(hipMemcpy(d_subs, subs, Nsub * Nprj * sizeof(int), hipMemcpyHostToDevice)); //--- // number of sinos short snno = -1; if (Cnt.SPN == 1) snno = NSINOS; else if (Cnt.SPN == 11) snno = NSINOS11; // full sinos (3D) unsigned short *d_psng; HANDLE_ERROR(hipMalloc(&d_psng, AW * snno * sizeof(unsigned short))); HANDLE_ERROR( hipMemcpy(d_psng, psng, AW * snno * sizeof(unsigned short), hipMemcpyHostToDevice)); float *d_rsng; HANDLE_ERROR(hipMalloc(&d_rsng, AW * snno * sizeof(float))); HANDLE_ERROR(hipMemcpy(d_rsng, rsng, AW * snno * sizeof(float), hipMemcpyHostToDevice)); float *d_ssng; HANDLE_ERROR(hipMalloc(&d_ssng, AW * snno * sizeof(float))); HANDLE_ERROR(hipMemcpy(d_ssng, ssng, AW * snno * sizeof(float), hipMemcpyHostToDevice)); // add scatter and randoms together d_eladd(d_rsng, d_ssng, snno * AW); hipFree(d_ssng); float *d_nsng; HANDLE_ERROR(hipMalloc(&d_nsng, AW * snno * sizeof(float))); HANDLE_ERROR(hipMemcpy(d_nsng, nsng, AW * snno * sizeof(float), hipMemcpyHostToDevice)); // join norm and attenuation factors float *d_ansng; HANDLE_ERROR(hipMalloc(&d_ansng, snno * AW * sizeof(float))); HANDLE_ERROR(hipMemcpy(d_ansng, asng, snno * AW * sizeof(float), hipMemcpyHostToDevice)); // combine attenuation and normalisation in one sinogram d_elmult(d_ansng, d_nsng, snno * AW); hipFree(d_nsng); // divide randoms+scatter by attenuation and norm factors d_eldiv(d_rsng, d_ansng, snno * AW); float *d_imgout; HANDLE_ERROR(hipMalloc(&d_imgout, SZ_IMX * SZ_IMY * SZ_IMZ * sizeof(float))); HANDLE_ERROR(hipMemcpy(d_imgout, imgout, SZ_IMX * SZ_IMY * SZ_IMZ * sizeof(float), hipMemcpyHostToDevice)); bool *d_rcnmsk; HANDLE_ERROR(hipMalloc(&d_rcnmsk, SZ_IMX * SZ_IMY * SZ_IMZ * sizeof(bool))); HANDLE_ERROR(hipMemcpy(d_rcnmsk, rncmsk, SZ_IMX * SZ_IMY * SZ_IMZ * sizeof(bool), hipMemcpyHostToDevice)); // allocate sino for estimation (esng) float *d_esng; HANDLE_ERROR(hipMalloc(&d_esng, Nprj * snno * sizeof(float))); //--sensitivity image (images for all subsets) float *d_sensim; HANDLE_ERROR(hipMalloc(&d_sensim, Nsub * SZ_IMZ * SZ_IMX * SZ_IMY * sizeof(float))); HANDLE_ERROR(hipMemcpy(d_sensim, sensimg, Nsub * SZ_IMX * SZ_IMY * SZ_IMZ * sizeof(float), hipMemcpyHostToDevice)); // hipMemset(d_sensim, 0, Nsub * SZ_IMZ*SZ_IMX*SZ_IMY*sizeof(float)); // for(int i=0; i<Nsub; i++){ // rec_bprj(&d_sensim[i*SZ_IMZ*SZ_IMX*SZ_IMY], d_ansng, &d_subs[i*Nprj+1], subs[i*Nprj], // d_tt, d_tv, li2rng, li2sn, li2nos, span); // } // //~~~~testing // printf("-->> The sensitivity pointer has size of %d and it's value is %lu \n", // sizeof(d_sensim), &d_sensim); // //~~~~ // resolution modelling kernel setConvolutionKernel(krnl); float *d_convTmp; HANDLE_ERROR(hipMalloc(&d_convTmp, SZ_IMX * SZ_IMY * (SZ_IMZ + 1) * sizeof(float))); float *d_convSrc; HANDLE_ERROR(hipMalloc(&d_convSrc, SZ_IMX * SZ_IMY * (SZ_IMZ + 1) * sizeof(float))); float *d_convDst; HANDLE_ERROR(hipMalloc(&d_convDst, SZ_IMX * SZ_IMY * (SZ_IMZ + 1) * sizeof(float))); // resolution modelling sensitivity image for (int i = 0; i < Nsub && krnl[0] >= 0; i++) { d_pad(d_convSrc, &d_sensim[i * SZ_IMZ * SZ_IMX * SZ_IMY]); d_conv(d_convTmp, d_convDst, d_convSrc, SZ_IMX, SZ_IMY, SZ_IMZ + 1); d_unpad(&d_sensim[i * SZ_IMZ * SZ_IMX * SZ_IMY], d_convDst); } // resolution modelling image float *d_imgout_rm; HANDLE_ERROR(hipMalloc(&d_imgout_rm, SZ_IMX * SZ_IMY * SZ_IMZ * sizeof(float))); //--back-propagated image float *d_bimg; HANDLE_ERROR(hipMalloc(&d_bimg, SZ_IMY * SZ_IMY * SZ_IMZ * sizeof(float))); if (Cnt.LOG <= LOGDEBUG) printf("i> loaded variables in device memory for image reconstruction.\n"); getMemUse(Cnt); for (int i = 0; i < Nsub; i++) { if (Cnt.LOG <= LOGDEBUG) printf("<> subset %d-th <>\n", i); // resolution modelling current image if (krnl[0] >= 0) { d_pad(d_convSrc, d_imgout); d_conv(d_convTmp, d_convDst, d_convSrc, SZ_IMX, SZ_IMY, SZ_IMZ + 1); d_unpad(d_imgout_rm, d_convDst); } // forward project hipMemset(d_esng, 0, Nprj * snno * sizeof(float)); rec_fprj(d_esng, krnl[0] >= 0 ? d_imgout_rm : d_imgout, &d_subs[i * Nprj + 1], subs[i * Nprj], d_tt, d_tv, li2rng, li2sn, li2nos, Cnt); // add the randoms+scatter d_sneladd(d_esng, d_rsng, &d_subs[i * Nprj + 1], subs[i * Nprj], snno); // divide to get the correction d_sneldiv(d_esng, d_psng, &d_subs[i * Nprj + 1], subs[i * Nprj], snno); // back-project the correction hipMemset(d_bimg, 0, SZ_IMZ * SZ_IMX * SZ_IMY * sizeof(float)); rec_bprj(d_bimg, d_esng, &d_subs[i * Nprj + 1], subs[i * Nprj], d_tt, d_tv, li2rng, li2sn, li2nos, Cnt); // resolution modelling backprojection if (krnl[0] >= 0) { d_pad(d_convSrc, d_bimg); d_conv(d_convTmp, d_convDst, d_convSrc, SZ_IMX, SZ_IMY, SZ_IMZ + 1); d_unpad(d_bimg, d_convDst); } // divide by sensitivity image d_eldiv(d_bimg, &d_sensim[i * SZ_IMZ * SZ_IMX * SZ_IMY], SZ_IMZ * SZ_IMX * SZ_IMY); // apply the recon mask to the back-projected image d_elmsk(d_imgout, d_bimg, d_rcnmsk, SZ_IMZ * SZ_IMX * SZ_IMY); } HANDLE_ERROR(hipMemcpy(imgout, d_imgout, SZ_IMZ * SZ_IMX * SZ_IMY * sizeof(float), hipMemcpyDeviceToHost)); hipFree(d_crs); hipFree(d_s2c); hipFree(d_tt); hipFree(d_tv); hipFree(d_subs); hipFree(d_psng); hipFree(d_rsng); hipFree(d_ansng); hipFree(d_esng); hipFree(d_sensim); hipFree(d_convTmp); hipFree(d_convSrc); hipFree(d_convDst); hipFree(d_imgout); hipFree(d_imgout_rm); hipFree(d_bimg); hipFree(d_rcnmsk); }
90aaae71803df348d8e3cfbcf5a394943a8940c7.cu
/*------------------------------------------------------------------------ CUDA C extention for Python Provides functionality for PET image reconstruction. Copyrights: 2018-2020 Pawel Markiewicz 2020 Casper da Costa-Luis ------------------------------------------------------------------------*/ #include "recon.h" #include <cassert> #define FLOAT_WITHIN_EPS(x) (-0.000001f < x && x < 0.000001f) /// z: how many Z-slices to add __global__ void pad(float *dst, float *src, const int z) { int i = threadIdx.x + blockDim.x * blockIdx.x; if (i >= SZ_IMX) return; int j = threadIdx.y + blockDim.y * blockIdx.y; if (j >= SZ_IMY) return; src += i * SZ_IMY * SZ_IMZ + j * SZ_IMZ; dst += i * SZ_IMY * (SZ_IMZ + z) + j * (SZ_IMZ + z); for (int k = 0; k < SZ_IMZ; ++k) dst[k] = src[k]; } void d_pad(float *dst, float *src, const int z = COLUMNS_BLOCKDIM_X - SZ_IMZ % COLUMNS_BLOCKDIM_X) { HANDLE_ERROR(cudaMemset(dst, 0, SZ_IMX * SZ_IMY * (SZ_IMZ + z) * sizeof(float))); dim3 BpG((SZ_IMX + NIPET_CU_THREADS / 32 - 1) / (NIPET_CU_THREADS / 32), (SZ_IMY + 31) / 32); dim3 TpB(NIPET_CU_THREADS / 32, 32); pad<<<BpG, TpB>>>(dst, src, z); } /// z: how many Z-slices to remove __global__ void unpad(float *dst, float *src, const int z) { int i = threadIdx.x + blockDim.x * blockIdx.x; if (i >= SZ_IMX) return; int j = threadIdx.y + blockDim.y * blockIdx.y; if (j >= SZ_IMY) return; dst += i * SZ_IMY * SZ_IMZ + j * SZ_IMZ; src += i * SZ_IMY * (SZ_IMZ + z) + j * (SZ_IMZ + z); for (int k = 0; k < SZ_IMZ; ++k) dst[k] = src[k]; } void d_unpad(float *dst, float *src, const int z = COLUMNS_BLOCKDIM_X - SZ_IMZ % COLUMNS_BLOCKDIM_X) { dim3 BpG((SZ_IMX + NIPET_CU_THREADS / 32 - 1) / (NIPET_CU_THREADS / 32), (SZ_IMY + 31) / 32); dim3 TpB(NIPET_CU_THREADS / 32, 32); unpad<<<BpG, TpB>>>(dst, src, z); } /** separable convolution */ /// Convolution kernel array __constant__ float c_Kernel[3 * KERNEL_LENGTH]; void setConvolutionKernel(float *krnl) { // krnl: separable three kernels for x, y and z cudaMemcpyToSymbol(c_Kernel, krnl, 3 * KERNEL_LENGTH * sizeof(float)); } /// sigma: Gaussian sigma void setKernelGaussian(float sigma) { float knlRM[KERNEL_LENGTH * 3]; const double tmpE = -1.0 / (2 * sigma * sigma); for (int i = 0; i < KERNEL_LENGTH; ++i) knlRM[i] = (float)exp(tmpE * pow(RSZ_PSF_KRNL - i, 2)); // normalise double knlSum = 0; for (size_t i = 0; i < KERNEL_LENGTH; ++i) knlSum += knlRM[i]; for (size_t i = 0; i < KERNEL_LENGTH; ++i) { knlRM[i] /= knlSum; // also fill in other dimensions knlRM[i + KERNEL_LENGTH] = knlRM[i]; knlRM[i + KERNEL_LENGTH * 2] = knlRM[i]; } setConvolutionKernel(knlRM); } /// Row convolution filter __global__ void cnv_rows(float *d_Dst, float *d_Src, int imageW, int imageH, int pitch) { __shared__ float s_Data[ROWS_BLOCKDIM_Y] [(ROWS_RESULT_STEPS + 2 * ROWS_HALO_STEPS) * ROWS_BLOCKDIM_X]; // Offset to the left halo edge const int baseX = (blockIdx.x * ROWS_RESULT_STEPS - ROWS_HALO_STEPS) * ROWS_BLOCKDIM_X + threadIdx.x; const int baseY = blockIdx.y * ROWS_BLOCKDIM_Y + threadIdx.y; d_Src += baseY * pitch + baseX; d_Dst += baseY * pitch + baseX; // Load main data #pragma unroll for (int i = ROWS_HALO_STEPS; i < ROWS_HALO_STEPS + ROWS_RESULT_STEPS; i++) { s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = d_Src[i * ROWS_BLOCKDIM_X]; } // Load left halo #pragma unroll for (int i = 0; i < ROWS_HALO_STEPS; i++) { s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = (baseX >= -i * ROWS_BLOCKDIM_X) ? d_Src[i * ROWS_BLOCKDIM_X] : 0; } // Load right halo #pragma unroll for (int i = ROWS_HALO_STEPS + ROWS_RESULT_STEPS; i < ROWS_HALO_STEPS + ROWS_RESULT_STEPS + ROWS_HALO_STEPS; i++) { s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = (imageW - baseX > i * ROWS_BLOCKDIM_X) ? d_Src[i * ROWS_BLOCKDIM_X] : 0; } // Compute and store results __syncthreads(); #pragma unroll for (int i = ROWS_HALO_STEPS; i < ROWS_HALO_STEPS + ROWS_RESULT_STEPS; i++) { float sum = 0; #pragma unroll for (int j = -RSZ_PSF_KRNL; j <= RSZ_PSF_KRNL; j++) { sum += c_Kernel[RSZ_PSF_KRNL - j] * s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X + j]; } d_Dst[i * ROWS_BLOCKDIM_X] = sum; } } /// Column convolution filter __global__ void cnv_columns(float *d_Dst, float *d_Src, int imageW, int imageH, int pitch, int offKrnl // kernel offset for asymmetric kernels // x, y, z (still the same dims though) ) { __shared__ float s_Data[COLUMNS_BLOCKDIM_X] [(COLUMNS_RESULT_STEPS + 2 * COLUMNS_HALO_STEPS) * COLUMNS_BLOCKDIM_Y + 1]; // Offset to the upper halo edge const int baseX = blockIdx.x * COLUMNS_BLOCKDIM_X + threadIdx.x; const int baseY = (blockIdx.y * COLUMNS_RESULT_STEPS - COLUMNS_HALO_STEPS) * COLUMNS_BLOCKDIM_Y + threadIdx.y; d_Src += baseY * pitch + baseX; d_Dst += baseY * pitch + baseX; // Main data #pragma unroll for (int i = COLUMNS_HALO_STEPS; i < COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS; i++) { s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y] = d_Src[i * COLUMNS_BLOCKDIM_Y * pitch]; } // Upper halo #pragma unroll for (int i = 0; i < COLUMNS_HALO_STEPS; i++) { s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y] = (baseY >= -i * COLUMNS_BLOCKDIM_Y) ? d_Src[i * COLUMNS_BLOCKDIM_Y * pitch] : 0; } // Lower halo #pragma unroll for (int i = COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS; i < COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS + COLUMNS_HALO_STEPS; i++) { s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y] = (imageH - baseY > i * COLUMNS_BLOCKDIM_Y) ? d_Src[i * COLUMNS_BLOCKDIM_Y * pitch] : 0; } // Compute and store results __syncthreads(); #pragma unroll for (int i = COLUMNS_HALO_STEPS; i < COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS; i++) { float sum = 0; #pragma unroll for (int j = -RSZ_PSF_KRNL; j <= RSZ_PSF_KRNL; j++) { sum += c_Kernel[offKrnl + RSZ_PSF_KRNL - j] * s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y + j]; } d_Dst[i * COLUMNS_BLOCKDIM_Y * pitch] = sum; } } /// d_buff: temporary image buffer void d_conv(float *d_buff, float *d_imgout, float *d_imgint, int Nvk, int Nvj, int Nvi) { assert(d_imgout != d_imgint); assert(ROWS_BLOCKDIM_X * ROWS_HALO_STEPS >= RSZ_PSF_KRNL); assert(Nvk % (ROWS_RESULT_STEPS * ROWS_BLOCKDIM_X) == 0); assert(Nvj % ROWS_BLOCKDIM_Y == 0); assert(COLUMNS_BLOCKDIM_Y * COLUMNS_HALO_STEPS >= RSZ_PSF_KRNL); assert(Nvk % COLUMNS_BLOCKDIM_X == 0); assert(Nvj % (COLUMNS_RESULT_STEPS * COLUMNS_BLOCKDIM_Y) == 0); assert(Nvi % COLUMNS_BLOCKDIM_X == 0); HANDLE_ERROR(cudaMemset(d_imgout, 0, Nvk * Nvj * Nvi * sizeof(float))); // perform smoothing for (int k = 0; k < Nvk; k++) { //------ ROWS ------- dim3 blocks(Nvi / (ROWS_RESULT_STEPS * ROWS_BLOCKDIM_X), Nvj / ROWS_BLOCKDIM_Y); dim3 threads(ROWS_BLOCKDIM_X, ROWS_BLOCKDIM_Y); cnv_rows<<<blocks, threads>>>(d_imgout + k * Nvi * Nvj, d_imgint + k * Nvi * Nvj, Nvi, Nvj, Nvi); HANDLE_ERROR(cudaGetLastError()); //----- COLUMNS ---- dim3 blocks2(Nvi / COLUMNS_BLOCKDIM_X, Nvj / (COLUMNS_RESULT_STEPS * COLUMNS_BLOCKDIM_Y)); dim3 threads2(COLUMNS_BLOCKDIM_X, COLUMNS_BLOCKDIM_Y); cnv_columns<<<blocks2, threads2>>>(d_buff + k * Nvi * Nvj, d_imgout + k * Nvi * Nvj, Nvi, Nvj, Nvi, KERNEL_LENGTH); HANDLE_ERROR(cudaGetLastError()); } //----- THIRD DIM ---- for (int j = 0; j < Nvj; j++) { dim3 blocks3(Nvi / COLUMNS_BLOCKDIM_X, Nvk / (COLUMNS_RESULT_STEPS * COLUMNS_BLOCKDIM_Y)); dim3 threads3(COLUMNS_BLOCKDIM_X, COLUMNS_BLOCKDIM_Y); cnv_columns<<<blocks3, threads3>>>(d_imgout + j * Nvi, d_buff + j * Nvi, Nvi, Nvk, Nvi * Nvj, 2 * KERNEL_LENGTH); HANDLE_ERROR(cudaGetLastError()); } } /** end of separable convolution */ //- - - - - - - - - - - - - - - - - - - - - - - - - - - - - // Element-wise multiplication __global__ void elmult(float *inA, float *inB, int length) { int idx = threadIdx.x + blockDim.x * blockIdx.x; if (idx < length) inA[idx] *= inB[idx]; } void d_elmult(float *d_inA, float *d_inB, int length) { dim3 BpG((length + NIPET_CU_THREADS - 1) / NIPET_CU_THREADS, 1, 1); dim3 TpB(NIPET_CU_THREADS, 1, 1); elmult<<<BpG, TpB>>>(d_inA, d_inB, length); } //- - - - - - - - - - - - - - - - - - - - - - - - - - - - - //- - - - - - - - - - - - - - - - - - - - - - - - - - - - - // Element-wise division with result stored in first input variable __global__ void eldiv0(float *inA, float *inB, int length) { int idx = threadIdx.x + blockDim.x * blockIdx.x; if (idx >= length) return; if (FLOAT_WITHIN_EPS(inB[idx])) inA[idx] = 0; else inA[idx] /= inB[idx]; } void d_eldiv(float *d_inA, float *d_inB, int length) { dim3 BpG((length + NIPET_CU_THREADS - 1) / NIPET_CU_THREADS, 1, 1); dim3 TpB(NIPET_CU_THREADS, 1, 1); eldiv0<<<BpG, TpB>>>(d_inA, d_inB, length); } //- - - - - - - - - - - - - - - - - - - - - - - - - - - - - //- - - - - - - - - - - - - - - - - - - - - - - - - - - - - __global__ void sneldiv(float *inA, unsigned short *inB, int *sub, int Nprj, int snno) { int idz = threadIdx.x + blockDim.x * blockIdx.x; if (!(blockIdx.y < Nprj && idz < snno)) return; // inA > only active bins of the subset // inB > all sinogram bins float b = (float)inB[snno * sub[blockIdx.y] + idz]; if (FLOAT_WITHIN_EPS(inA[snno * blockIdx.y + idz])) b = 0; else b /= inA[snno * blockIdx.y + idz]; // sub[blockIdx.y] inA[snno * blockIdx.y + idz] = b; // sub[blockIdx.y] } void d_sneldiv(float *d_inA, unsigned short *d_inB, int *d_sub, int Nprj, int snno) { dim3 BpG((snno + NIPET_CU_THREADS - 1) / NIPET_CU_THREADS, Nprj, 1); dim3 TpB(NIPET_CU_THREADS, 1, 1); sneldiv<<<BpG, TpB>>>(d_inA, d_inB, d_sub, Nprj, snno); } //- - - - - - - - - - - - - - - - - - - - - - - - - - - - - //- - - - - - - - - - - - - - - - - - - - - - - - - - - - - __global__ void sneladd(float *inA, float *inB, int *sub, int Nprj, int snno) { int idz = threadIdx.x + blockDim.x * blockIdx.x; if (blockIdx.y < Nprj && idz < snno) inA[snno * blockIdx.y + idz] += inB[snno * sub[blockIdx.y] + idz]; // sub[blockIdx.y] } void d_sneladd(float *d_inA, float *d_inB, int *d_sub, int Nprj, int snno) { dim3 BpG((snno + NIPET_CU_THREADS - 1) / NIPET_CU_THREADS, Nprj, 1); dim3 TpB(NIPET_CU_THREADS, 1, 1); sneladd<<<BpG, TpB>>>(d_inA, d_inB, d_sub, Nprj, snno); } //- - - - - - - - - - - - - - - - - - - - - - - - - - - - - //- - - - - - - - - - - - - - - - - - - - - - - - - - - - - __global__ void eladd(float *inA, float *inB, int length) { int idx = threadIdx.x + blockDim.x * blockIdx.x; if (idx < length) inA[idx] += inB[idx]; } void d_eladd(float *d_inA, float *d_inB, int length) { dim3 BpG((length + NIPET_CU_THREADS - 1) / NIPET_CU_THREADS, 1, 1); dim3 TpB(NIPET_CU_THREADS, 1, 1); eladd<<<BpG, TpB>>>(d_inA, d_inB, length); } //- - - - - - - - - - - - - - - - - - - - - - - - - - - - - //- - - - - - - - - - - - - - - - - - - - - - - - - - - - - __global__ void elmsk(float *inA, float *inB, bool *msk, int length) { int idx = threadIdx.x + blockDim.x * blockIdx.x; if (idx < length) { if (msk[idx]) inA[idx] *= inB[idx]; else inA[idx] = 0; } } void d_elmsk(float *d_inA, float *d_inB, bool *d_msk, int length) { dim3 BpG((length + NIPET_CU_THREADS - 1) / NIPET_CU_THREADS, 1, 1); dim3 TpB(NIPET_CU_THREADS, 1, 1); elmsk<<<BpG, TpB>>>(d_inA, d_inB, d_msk, length); } //- - - - - - - - - - - - - - - - - - - - - - - - - - - - - void osem(float *imgout, bool *rncmsk, unsigned short *psng, float *rsng, float *ssng, float *nsng, float *asng, int *subs, float *sensimg, float *krnl, float *li2rng, short *li2sn, char *li2nos, short *s2c, float *crs, int Nsub, int Nprj, int N0crs, Cnst Cnt) { int dev_id; cudaGetDevice(&dev_id); if (Cnt.LOG <= LOGDEBUG) printf("i> using CUDA device #%d\n", dev_id); //--- TRANSAXIAL COMPONENT float4 *d_crs; HANDLE_ERROR(cudaMalloc(&d_crs, N0crs * sizeof(float4))); HANDLE_ERROR(cudaMemcpy(d_crs, crs, N0crs * sizeof(float4), cudaMemcpyHostToDevice)); short2 *d_s2c; HANDLE_ERROR(cudaMalloc(&d_s2c, AW * sizeof(short2))); HANDLE_ERROR(cudaMemcpy(d_s2c, s2c, AW * sizeof(short2), cudaMemcpyHostToDevice)); float *d_tt; HANDLE_ERROR(cudaMalloc(&d_tt, N_TT * AW * sizeof(float))); unsigned char *d_tv; HANDLE_ERROR(cudaMalloc(&d_tv, N_TV * AW * sizeof(unsigned char))); HANDLE_ERROR(cudaMemset(d_tv, 0, N_TV * AW * sizeof(unsigned char))); //------------------------------------------------- gpu_siddon_tx(d_crs, d_s2c, d_tt, d_tv); //------------------------------------------------- // array of subset projection bins int *d_subs; HANDLE_ERROR(cudaMalloc(&d_subs, Nsub * Nprj * sizeof(int))); HANDLE_ERROR(cudaMemcpy(d_subs, subs, Nsub * Nprj * sizeof(int), cudaMemcpyHostToDevice)); //--- // number of sinos short snno = -1; if (Cnt.SPN == 1) snno = NSINOS; else if (Cnt.SPN == 11) snno = NSINOS11; // full sinos (3D) unsigned short *d_psng; HANDLE_ERROR(cudaMalloc(&d_psng, AW * snno * sizeof(unsigned short))); HANDLE_ERROR( cudaMemcpy(d_psng, psng, AW * snno * sizeof(unsigned short), cudaMemcpyHostToDevice)); float *d_rsng; HANDLE_ERROR(cudaMalloc(&d_rsng, AW * snno * sizeof(float))); HANDLE_ERROR(cudaMemcpy(d_rsng, rsng, AW * snno * sizeof(float), cudaMemcpyHostToDevice)); float *d_ssng; HANDLE_ERROR(cudaMalloc(&d_ssng, AW * snno * sizeof(float))); HANDLE_ERROR(cudaMemcpy(d_ssng, ssng, AW * snno * sizeof(float), cudaMemcpyHostToDevice)); // add scatter and randoms together d_eladd(d_rsng, d_ssng, snno * AW); cudaFree(d_ssng); float *d_nsng; HANDLE_ERROR(cudaMalloc(&d_nsng, AW * snno * sizeof(float))); HANDLE_ERROR(cudaMemcpy(d_nsng, nsng, AW * snno * sizeof(float), cudaMemcpyHostToDevice)); // join norm and attenuation factors float *d_ansng; HANDLE_ERROR(cudaMalloc(&d_ansng, snno * AW * sizeof(float))); HANDLE_ERROR(cudaMemcpy(d_ansng, asng, snno * AW * sizeof(float), cudaMemcpyHostToDevice)); // combine attenuation and normalisation in one sinogram d_elmult(d_ansng, d_nsng, snno * AW); cudaFree(d_nsng); // divide randoms+scatter by attenuation and norm factors d_eldiv(d_rsng, d_ansng, snno * AW); float *d_imgout; HANDLE_ERROR(cudaMalloc(&d_imgout, SZ_IMX * SZ_IMY * SZ_IMZ * sizeof(float))); HANDLE_ERROR(cudaMemcpy(d_imgout, imgout, SZ_IMX * SZ_IMY * SZ_IMZ * sizeof(float), cudaMemcpyHostToDevice)); bool *d_rcnmsk; HANDLE_ERROR(cudaMalloc(&d_rcnmsk, SZ_IMX * SZ_IMY * SZ_IMZ * sizeof(bool))); HANDLE_ERROR(cudaMemcpy(d_rcnmsk, rncmsk, SZ_IMX * SZ_IMY * SZ_IMZ * sizeof(bool), cudaMemcpyHostToDevice)); // allocate sino for estimation (esng) float *d_esng; HANDLE_ERROR(cudaMalloc(&d_esng, Nprj * snno * sizeof(float))); //--sensitivity image (images for all subsets) float *d_sensim; HANDLE_ERROR(cudaMalloc(&d_sensim, Nsub * SZ_IMZ * SZ_IMX * SZ_IMY * sizeof(float))); HANDLE_ERROR(cudaMemcpy(d_sensim, sensimg, Nsub * SZ_IMX * SZ_IMY * SZ_IMZ * sizeof(float), cudaMemcpyHostToDevice)); // cudaMemset(d_sensim, 0, Nsub * SZ_IMZ*SZ_IMX*SZ_IMY*sizeof(float)); // for(int i=0; i<Nsub; i++){ // rec_bprj(&d_sensim[i*SZ_IMZ*SZ_IMX*SZ_IMY], d_ansng, &d_subs[i*Nprj+1], subs[i*Nprj], // d_tt, d_tv, li2rng, li2sn, li2nos, span); // } // //~~~~testing // printf("-->> The sensitivity pointer has size of %d and it's value is %lu \n", // sizeof(d_sensim), &d_sensim); // //~~~~ // resolution modelling kernel setConvolutionKernel(krnl); float *d_convTmp; HANDLE_ERROR(cudaMalloc(&d_convTmp, SZ_IMX * SZ_IMY * (SZ_IMZ + 1) * sizeof(float))); float *d_convSrc; HANDLE_ERROR(cudaMalloc(&d_convSrc, SZ_IMX * SZ_IMY * (SZ_IMZ + 1) * sizeof(float))); float *d_convDst; HANDLE_ERROR(cudaMalloc(&d_convDst, SZ_IMX * SZ_IMY * (SZ_IMZ + 1) * sizeof(float))); // resolution modelling sensitivity image for (int i = 0; i < Nsub && krnl[0] >= 0; i++) { d_pad(d_convSrc, &d_sensim[i * SZ_IMZ * SZ_IMX * SZ_IMY]); d_conv(d_convTmp, d_convDst, d_convSrc, SZ_IMX, SZ_IMY, SZ_IMZ + 1); d_unpad(&d_sensim[i * SZ_IMZ * SZ_IMX * SZ_IMY], d_convDst); } // resolution modelling image float *d_imgout_rm; HANDLE_ERROR(cudaMalloc(&d_imgout_rm, SZ_IMX * SZ_IMY * SZ_IMZ * sizeof(float))); //--back-propagated image float *d_bimg; HANDLE_ERROR(cudaMalloc(&d_bimg, SZ_IMY * SZ_IMY * SZ_IMZ * sizeof(float))); if (Cnt.LOG <= LOGDEBUG) printf("i> loaded variables in device memory for image reconstruction.\n"); getMemUse(Cnt); for (int i = 0; i < Nsub; i++) { if (Cnt.LOG <= LOGDEBUG) printf("<> subset %d-th <>\n", i); // resolution modelling current image if (krnl[0] >= 0) { d_pad(d_convSrc, d_imgout); d_conv(d_convTmp, d_convDst, d_convSrc, SZ_IMX, SZ_IMY, SZ_IMZ + 1); d_unpad(d_imgout_rm, d_convDst); } // forward project cudaMemset(d_esng, 0, Nprj * snno * sizeof(float)); rec_fprj(d_esng, krnl[0] >= 0 ? d_imgout_rm : d_imgout, &d_subs[i * Nprj + 1], subs[i * Nprj], d_tt, d_tv, li2rng, li2sn, li2nos, Cnt); // add the randoms+scatter d_sneladd(d_esng, d_rsng, &d_subs[i * Nprj + 1], subs[i * Nprj], snno); // divide to get the correction d_sneldiv(d_esng, d_psng, &d_subs[i * Nprj + 1], subs[i * Nprj], snno); // back-project the correction cudaMemset(d_bimg, 0, SZ_IMZ * SZ_IMX * SZ_IMY * sizeof(float)); rec_bprj(d_bimg, d_esng, &d_subs[i * Nprj + 1], subs[i * Nprj], d_tt, d_tv, li2rng, li2sn, li2nos, Cnt); // resolution modelling backprojection if (krnl[0] >= 0) { d_pad(d_convSrc, d_bimg); d_conv(d_convTmp, d_convDst, d_convSrc, SZ_IMX, SZ_IMY, SZ_IMZ + 1); d_unpad(d_bimg, d_convDst); } // divide by sensitivity image d_eldiv(d_bimg, &d_sensim[i * SZ_IMZ * SZ_IMX * SZ_IMY], SZ_IMZ * SZ_IMX * SZ_IMY); // apply the recon mask to the back-projected image d_elmsk(d_imgout, d_bimg, d_rcnmsk, SZ_IMZ * SZ_IMX * SZ_IMY); } HANDLE_ERROR(cudaMemcpy(imgout, d_imgout, SZ_IMZ * SZ_IMX * SZ_IMY * sizeof(float), cudaMemcpyDeviceToHost)); cudaFree(d_crs); cudaFree(d_s2c); cudaFree(d_tt); cudaFree(d_tv); cudaFree(d_subs); cudaFree(d_psng); cudaFree(d_rsng); cudaFree(d_ansng); cudaFree(d_esng); cudaFree(d_sensim); cudaFree(d_convTmp); cudaFree(d_convSrc); cudaFree(d_convDst); cudaFree(d_imgout); cudaFree(d_imgout_rm); cudaFree(d_bimg); cudaFree(d_rcnmsk); }
4fe906b5726f4bf11e76e4a312ec2c57b762852a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include"cuda_runtime.h" #include"cuda.h" #include"hip/hip_runtime_api.h" #include<iostream> #include<fstream> #include<cstdio> #include<vector> #include<set> #include<map> #include<bitset> #include "error.h" #include "time.h" #include "ResizableArray.h" #include "device_launch_parameters.h" #define THREADNUM 1024 #define BLOCKNUM 20 int countforgpu=0; int countforcpu=0; struct ItemDetail{ int id; int realId; vector<int> tid; ItemDetail(int i = -1, int r = -1){ id = i; realId = r; } }; struct Item{ int id; int* db; int support; Item(int i, int*d, int s){ id = i; db = d; support = s; } }; struct EClass{ vector<Item> items; vector<int> parents; }; const unsigned int Bit32Table[32] = { 2147483648UL, 1073741824UL, 536870912UL, 268435456UL, 134217728, 67108864, 33554432, 16777216, 8388608, 4194304, 2097152, 1048576, 524288, 262144, 131072, 65536, 32768, 16384, 8192, 4096, 2048, 1024, 512, 256, 128, 64, 32, 16, 8, 4, 2, 1 }; const int SIZE_OF_INT = sizeof(int)* 8; using namespace std; void ReadInput(FILE *inputFile, int *tNum, int *iNum, int *&index, float supPer, EClass* &root); void mineGPU(EClass* eClass, int minSup, int* index, int length); void mineCPU(EClass* eClass, int minSup, int* index, int length); int NumberOfSetBits(int i); __global__ void kunion(int *output,int *a,int length,int size,int ids,int blocknum,int threadnum); auto out = &cout; int main(int argc, char** argv){ clock_t tProgramStart = clock(); bool cpu = true; bool gpu = true; char* inFileName = NULL; // the input file name float supPer = 0;// user specified minimun support percentage if ( argc != 4){//input argument wrong, print usage, return error; ErrorHandler(ERROR_INPUT); } //set arguments inFileName = argv[1]; if ((supPer = atof(argv[2])) == 0 || supPer > 100 || supPer < 0) ErrorHandler(ERROR_MINSUP); ofstream ofs; ofs.open(argv[3], ofstream::out | ofstream::trunc); out = &ofs; cout << "inFileName = " << inFileName << endl; cout << "minSup = " << supPer << endl; FILE *inputFile; // input file pointer int tNumbers = 0; // Transaction numbers int iNumbers = 0; // Item numbers int *index = NULL; // the index of item in the database, cause we only want to leave the items that are frequent EClass *root = new EClass(); if ((inputFile = fopen(inFileName, "r")) == 0) ErrorHandler(ERROR_INFILE); ReadInput(inputFile, &tNumbers, &iNumbers, index, supPer, root); int length = tNumbers + SIZE_OF_INT - (tNumbers%SIZE_OF_INT); length /= SIZE_OF_INT; int minSup = tNumbers * supPer + 1; if (gpu){ clock_t tGPUMiningStart = clock(); mineGPU(root, minSup, index, length); cout << "Time on GPU Mining: " << (double)(clock() - tGPUMiningStart) / CLOCKS_PER_SEC << endl; } if (cpu){ clock_t tCPUMiningStart = clock(); mineCPU(root, minSup, index, length); cout << "Time on CPU Mining: " << (double)(clock() - tCPUMiningStart) / CLOCKS_PER_SEC << endl; } for (auto item : root->items){ delete[] item.db; } delete root; delete index; return 0; } /** * Read the input from database and store it in memory * Would filter the items without minimun support * * @params * inputFile: input file pointer * tNum: record the transaction numbers * iNum: record the item numbers * index: conversion from id to real id, used for result output * supPer: minimun support percentage * eNum: record the effective item numbers (item with support > minimun support) */ void ReadInput(FILE *inputFile, int *tNum, int *iNum, int *&index, float supPer, EClass*&root){ *tNum = 0; map<int, ItemDetail> mapIndex; // store the real id of items and the corresponding ItemDetail. char c = 0; int temp = 0; // read db and convert horizontal database to vertical database and store in the vector of the item in the map while ((c = getc(inputFile)) != EOF){ if (c == ' ' || c == ',' || c == '\n'){ if (mapIndex.find(temp) == mapIndex.end()){ mapIndex[temp] = ItemDetail(0, temp); mapIndex[temp].tid.push_back(*tNum); } else mapIndex.find(temp)->second.tid.push_back(*tNum); temp = 0; if (c == '\n') (*tNum)++; } else if (47 < c <58){ temp *= 10; temp += c - 48; } } //remove the item without minimun support int minSup = (*tNum)*supPer + 1; for (map<int, ItemDetail>::iterator it = mapIndex.begin(); it != mapIndex.end();){ if (it->second.tid.size() < minSup) { map<int, ItemDetail>::iterator toErase = it; ++it; mapIndex.erase(toErase); } else ++it; } // convert the tidset into bit vector and store in db, build index int bitLength = (*tNum) + SIZE_OF_INT - (*tNum) % SIZE_OF_INT; temp = 0; index = new int[mapIndex.size()]; for (map<int, ItemDetail>::iterator it = mapIndex.begin(); it != mapIndex.end(); ++it){ it->second.id = temp; index[temp] = it->second.realId; //int * bitVector = (db + temp * bitLength / SIZE_OF_INT); int* bitVector = new int[bitLength / SIZE_OF_INT]; memset(bitVector, 0, sizeof(int)* bitLength / SIZE_OF_INT); for (int i = it->second.tid.size() - 1; i >= 0; i--){ bitVector[it->second.tid[i] / SIZE_OF_INT] |= Bit32Table[it->second.tid[i] % SIZE_OF_INT]; } (*root).items.push_back(Item(temp, bitVector, it->second.tid.size())); temp++; } *iNum = mapIndex.size(); } /** * Mining Frequent itemset on GPU * * @Params * eClass: pointer to the equivalent class to explore * minSup: minimun support * index: array that map item id to real id, used for result output * length: the length of tidset in integer * */ __global__ void kunion(int *output,int *a,int length,int size,int ids,int blocknum,int threadnum) { const int bid=blockIdx.x; const int tid=threadIdx.x; int i; for(i=(ids+1)*length+bid * threadnum + tid;i<length*size;i+=BLOCKNUM * THREADNUM) { int j = i % length; output[i]=a[i]&a[ids*length+j]; } __syncthreads(); } void mineGPU(EClass *eClass, int minSup, int* index, int length){ int size = eClass->items.size(); countforgpu+=1; //printf("gpu here %d =%d \n",countforgpu,size); int *cudaforadb; // adb is for *a int *cudaforoutdb; hipMalloc((void**) &cudaforadb,(size)*sizeof(int)*length); hipMalloc((void**) &cudaforoutdb,(size)*sizeof(int)*length); for(int ide=0;ide<size;ide++) { hipMemcpy(cudaforadb+ide*length, eClass->items[ide].db, sizeof(int)*length, hipMemcpyHostToDevice); } for (int i = 0; i < size; i++){ EClass* children = new EClass(); children->parents = eClass->parents; children->parents.push_back(eClass->items[i].id); int *a = eClass->items[i].db; int support = 0; //cout<<"i here "<<i<<endl; // bdb is for *b //outdb is for answer; int *tempout = new int[size*length]; //cout<<"alive and i = "<<i<<endl; //hipMalloc((void**) &cudaforbdb,(size)*sizeof(int)*length); //hipMalloc((void**) &cudaforoutdb,(size)*sizeof(int)*length); /* int j=i+1; for(int ide2=0;ide2<size-i-1;ide2++) { hipMemcpy(cudaforbdb+ide2*length, eClass->items[j].db, sizeof(int)*length, hipMemcpyHostToDevice); j=j+1; } */ hipLaunchKernelGGL(( kunion), dim3(BLOCKNUM),dim3(THREADNUM), 0, 0, cudaforoutdb,cudaforadb,length,size,i,BLOCKNUM,THREADNUM); hipDeviceSynchronize(); hipMemcpy(tempout+(i+1)*length, cudaforoutdb+(i+1)*length, (size-i-1)*sizeof(int)*length, hipMemcpyDeviceToHost); //hipDeviceSynchronize(); for(int k=(i+1);k<size;k++) { int *temp2 = new int[length]; support=0; memcpy(temp2,&tempout[k*length],length*sizeof(int)); for(int l=0;l<length;l++) { //if(tempout[k*length+l]!=0) // cout<<"temp here"<<tempout[k*length+l]<<endl; support += NumberOfSetBits(temp2[l]); //if (support!=0) // cout<<"it's support="<<support<<endl; } //cout<<"true support"<<support<<endl; if (support >= minSup) { //cout<<"this is gpu support"<<support<<endl; //cout<<"id= "<<eClass->items[k].id<<endl; children->items.push_back(Item(eClass->items[k].id, temp2, support)); } else delete[] temp2; } /* for(int ide=i+1;ide<size;ide++) { hipMemcpy(cudaforbdb+(ide-1)*length,eclass->items[ide].db,sizeof(int)*length,hipMemcpyHostToDevice); } */ //hipFree(cudaforadb); //hipFree(cudaforoutdb); delete[] tempout; if (children->items.size() != 0) mineGPU(children, minSup, index, length); for (auto item : children->items){ delete[] item.db; } delete children; } hipFree(cudaforadb); hipFree(cudaforoutdb); for (auto item : eClass->items){ for (auto i : eClass->parents) *out << index[i] << " "; *out << index[item.id] << "(" << item.support << ")" << endl; } /* ./fim.out retail.txt 0.001 outfile int size = eClass->items.size(); countforgpu+=1; printf("here %d =%d \n",countforgpu,size); for (int i = 0; i < size; i++){ EClass* children = new EClass(); children->parents = eClass->parents; children->parents.push_back(eClass->items[i].id); int *cudaforadb; // adb is for *a hipMalloc((void**) &cudaforadb,sizeof(int)*length); int *a = eClass->items[i].db; hipMemcpy(cudaforadb, a, sizeof(int)*length, hipMemcpyHostToDevice); for (int j = i + 1; j < size; j++){ int * temp = new int[length]; int *b = eClass->items[j].db; int support = 0; int *cudaforbdb; // bdb is for *b int *cudaforoutdb;//outdb is for answer; int *tempout = new int[length]; hipMalloc((void**) &cudaforbdb,sizeof(int)*length); hipMalloc((void**) &cudaforoutdb,sizeof(int)*length); hipMemcpy(cudaforbdb, b, sizeof(int)*length, hipMemcpyHostToDevice); hipLaunchKernelGGL(( kunion), dim3(BLOCKNUM),dim3(THREADNUM), 0, 0, cudaforoutdb,cudaforadb,cudaforbdb,length,BLOCKNUM,THREADNUM); hipMemcpy(tempout, cudaforoutdb, sizeof(int)*length, hipMemcpyDeviceToHost); for (int k = 0; k < length; k++){ //temp[k] = a[k] & b[k]; support += NumberOfSetBits(tempout[k]); } hipFree(cudaforbdb); hipFree(cudaforoutdb); if (support >= minSup){ children->items.push_back(Item(eClass->items[j].id, tempout, support)); } else delete[] tempout; } hipFree(cudaforadb); if (children->items.size() != 0) mineGPU(children, minSup, index, length); for (auto item : children->items){ delete[] item.db; } delete children; } */ } void mineCPU(EClass *eClass, int minSup, int* index, int length){ int size = eClass->items.size(); countforcpu=countforcpu+1; //printf("cpu here%d = %d \n",countforcpu,size); for (int i = 0; i < size; i++){ EClass* children = new EClass(); children->parents = eClass->parents; children->parents.push_back(eClass->items[i].id); int *a = eClass->items[i].db; for (int j = i + 1; j < size; j++){ int * temp = new int[length]; int *b = eClass->items[j].db; int support = 0; for (int k = 0; k < length; k++){ temp[k] = a[k] & b[k]; support += NumberOfSetBits(temp[k]); //cout<<"tempk="<<support<<endl; } if (support >= minSup){ //cout<<"this is cpu support"<<support<<endl; //cout<<"cpu id = "<<eClass->items[j].id<<endl; children->items.push_back(Item(eClass->items[j].id, temp, support)); } else delete[] temp; } if (children->items.size() != 0) mineCPU(children, minSup, index, length); for (auto item : children->items){ delete[] item.db; } delete children; } /* for (auto item : eClass->items){ for (auto i : eClass->parents) *out << index[i] << " "; *out << index[item.id] << "(" << item.support << ")" << endl; } */ } int NumberOfSetBits(int i) { i = i - ((i >> 1) & 0x55555555); i = (i & 0x33333333) + ((i >> 2) & 0x33333333); return (((i + (i >> 4)) & 0x0F0F0F0F) * 0x01010101) >> 24; }
4fe906b5726f4bf11e76e4a312ec2c57b762852a.cu
#include"cuda_runtime.h" #include"cuda.h" #include"cuda_runtime_api.h" #include<iostream> #include<fstream> #include<cstdio> #include<vector> #include<set> #include<map> #include<bitset> #include "error.h" #include "time.h" #include "ResizableArray.h" #include "device_launch_parameters.h" #define THREADNUM 1024 #define BLOCKNUM 20 int countforgpu=0; int countforcpu=0; struct ItemDetail{ int id; int realId; vector<int> tid; ItemDetail(int i = -1, int r = -1){ id = i; realId = r; } }; struct Item{ int id; int* db; int support; Item(int i, int*d, int s){ id = i; db = d; support = s; } }; struct EClass{ vector<Item> items; vector<int> parents; }; const unsigned int Bit32Table[32] = { 2147483648UL, 1073741824UL, 536870912UL, 268435456UL, 134217728, 67108864, 33554432, 16777216, 8388608, 4194304, 2097152, 1048576, 524288, 262144, 131072, 65536, 32768, 16384, 8192, 4096, 2048, 1024, 512, 256, 128, 64, 32, 16, 8, 4, 2, 1 }; const int SIZE_OF_INT = sizeof(int)* 8; using namespace std; void ReadInput(FILE *inputFile, int *tNum, int *iNum, int *&index, float supPer, EClass* &root); void mineGPU(EClass* eClass, int minSup, int* index, int length); void mineCPU(EClass* eClass, int minSup, int* index, int length); int NumberOfSetBits(int i); __global__ void kunion(int *output,int *a,int length,int size,int ids,int blocknum,int threadnum); auto out = &cout; int main(int argc, char** argv){ clock_t tProgramStart = clock(); bool cpu = true; bool gpu = true; char* inFileName = NULL; // the input file name float supPer = 0;// user specified minimun support percentage if ( argc != 4){//input argument wrong, print usage, return error; ErrorHandler(ERROR_INPUT); } //set arguments inFileName = argv[1]; if ((supPer = atof(argv[2])) == 0 || supPer > 100 || supPer < 0) ErrorHandler(ERROR_MINSUP); ofstream ofs; ofs.open(argv[3], ofstream::out | ofstream::trunc); out = &ofs; cout << "inFileName = " << inFileName << endl; cout << "minSup = " << supPer << endl; FILE *inputFile; // input file pointer int tNumbers = 0; // Transaction numbers int iNumbers = 0; // Item numbers int *index = NULL; // the index of item in the database, cause we only want to leave the items that are frequent EClass *root = new EClass(); if ((inputFile = fopen(inFileName, "r")) == 0) ErrorHandler(ERROR_INFILE); ReadInput(inputFile, &tNumbers, &iNumbers, index, supPer, root); int length = tNumbers + SIZE_OF_INT - (tNumbers%SIZE_OF_INT); length /= SIZE_OF_INT; int minSup = tNumbers * supPer + 1; if (gpu){ clock_t tGPUMiningStart = clock(); mineGPU(root, minSup, index, length); cout << "Time on GPU Mining: " << (double)(clock() - tGPUMiningStart) / CLOCKS_PER_SEC << endl; } if (cpu){ clock_t tCPUMiningStart = clock(); mineCPU(root, minSup, index, length); cout << "Time on CPU Mining: " << (double)(clock() - tCPUMiningStart) / CLOCKS_PER_SEC << endl; } for (auto item : root->items){ delete[] item.db; } delete root; delete index; return 0; } /** * Read the input from database and store it in memory * Would filter the items without minimun support * * @params * inputFile: input file pointer * tNum: record the transaction numbers * iNum: record the item numbers * index: conversion from id to real id, used for result output * supPer: minimun support percentage * eNum: record the effective item numbers (item with support > minimun support) */ void ReadInput(FILE *inputFile, int *tNum, int *iNum, int *&index, float supPer, EClass*&root){ *tNum = 0; map<int, ItemDetail> mapIndex; // store the real id of items and the corresponding ItemDetail. char c = 0; int temp = 0; // read db and convert horizontal database to vertical database and store in the vector of the item in the map while ((c = getc(inputFile)) != EOF){ if (c == ' ' || c == ',' || c == '\n'){ if (mapIndex.find(temp) == mapIndex.end()){ mapIndex[temp] = ItemDetail(0, temp); mapIndex[temp].tid.push_back(*tNum); } else mapIndex.find(temp)->second.tid.push_back(*tNum); temp = 0; if (c == '\n') (*tNum)++; } else if (47 < c <58){ temp *= 10; temp += c - 48; } } //remove the item without minimun support int minSup = (*tNum)*supPer + 1; for (map<int, ItemDetail>::iterator it = mapIndex.begin(); it != mapIndex.end();){ if (it->second.tid.size() < minSup) { map<int, ItemDetail>::iterator toErase = it; ++it; mapIndex.erase(toErase); } else ++it; } // convert the tidset into bit vector and store in db, build index int bitLength = (*tNum) + SIZE_OF_INT - (*tNum) % SIZE_OF_INT; temp = 0; index = new int[mapIndex.size()]; for (map<int, ItemDetail>::iterator it = mapIndex.begin(); it != mapIndex.end(); ++it){ it->second.id = temp; index[temp] = it->second.realId; //int * bitVector = (db + temp * bitLength / SIZE_OF_INT); int* bitVector = new int[bitLength / SIZE_OF_INT]; memset(bitVector, 0, sizeof(int)* bitLength / SIZE_OF_INT); for (int i = it->second.tid.size() - 1; i >= 0; i--){ bitVector[it->second.tid[i] / SIZE_OF_INT] |= Bit32Table[it->second.tid[i] % SIZE_OF_INT]; } (*root).items.push_back(Item(temp, bitVector, it->second.tid.size())); temp++; } *iNum = mapIndex.size(); } /** * Mining Frequent itemset on GPU * * @Params * eClass: pointer to the equivalent class to explore * minSup: minimun support * index: array that map item id to real id, used for result output * length: the length of tidset in integer * */ __global__ void kunion(int *output,int *a,int length,int size,int ids,int blocknum,int threadnum) { const int bid=blockIdx.x; const int tid=threadIdx.x; int i; for(i=(ids+1)*length+bid * threadnum + tid;i<length*size;i+=BLOCKNUM * THREADNUM) { int j = i % length; output[i]=a[i]&a[ids*length+j]; } __syncthreads(); } void mineGPU(EClass *eClass, int minSup, int* index, int length){ int size = eClass->items.size(); countforgpu+=1; //printf("gpu here %d =%d \n",countforgpu,size); int *cudaforadb; // adb is for *a int *cudaforoutdb; cudaMalloc((void**) &cudaforadb,(size)*sizeof(int)*length); cudaMalloc((void**) &cudaforoutdb,(size)*sizeof(int)*length); for(int ide=0;ide<size;ide++) { cudaMemcpy(cudaforadb+ide*length, eClass->items[ide].db, sizeof(int)*length, cudaMemcpyHostToDevice); } for (int i = 0; i < size; i++){ EClass* children = new EClass(); children->parents = eClass->parents; children->parents.push_back(eClass->items[i].id); int *a = eClass->items[i].db; int support = 0; //cout<<"i here "<<i<<endl; // bdb is for *b //outdb is for answer; int *tempout = new int[size*length]; //cout<<"alive and i = "<<i<<endl; //cudaMalloc((void**) &cudaforbdb,(size)*sizeof(int)*length); //cudaMalloc((void**) &cudaforoutdb,(size)*sizeof(int)*length); /* int j=i+1; for(int ide2=0;ide2<size-i-1;ide2++) { cudaMemcpy(cudaforbdb+ide2*length, eClass->items[j].db, sizeof(int)*length, cudaMemcpyHostToDevice); j=j+1; } */ kunion<<<BLOCKNUM,THREADNUM>>>(cudaforoutdb,cudaforadb,length,size,i,BLOCKNUM,THREADNUM); cudaDeviceSynchronize(); cudaMemcpy(tempout+(i+1)*length, cudaforoutdb+(i+1)*length, (size-i-1)*sizeof(int)*length, cudaMemcpyDeviceToHost); //cudaDeviceSynchronize(); for(int k=(i+1);k<size;k++) { int *temp2 = new int[length]; support=0; memcpy(temp2,&tempout[k*length],length*sizeof(int)); for(int l=0;l<length;l++) { //if(tempout[k*length+l]!=0) // cout<<"temp here"<<tempout[k*length+l]<<endl; support += NumberOfSetBits(temp2[l]); //if (support!=0) // cout<<"it's support="<<support<<endl; } //cout<<"true support"<<support<<endl; if (support >= minSup) { //cout<<"this is gpu support"<<support<<endl; //cout<<"id= "<<eClass->items[k].id<<endl; children->items.push_back(Item(eClass->items[k].id, temp2, support)); } else delete[] temp2; } /* for(int ide=i+1;ide<size;ide++) { cudaMemcpy(cudaforbdb+(ide-1)*length,eclass->items[ide].db,sizeof(int)*length,cudaMemcpyHostToDevice); } */ //cudaFree(cudaforadb); //cudaFree(cudaforoutdb); delete[] tempout; if (children->items.size() != 0) mineGPU(children, minSup, index, length); for (auto item : children->items){ delete[] item.db; } delete children; } cudaFree(cudaforadb); cudaFree(cudaforoutdb); for (auto item : eClass->items){ for (auto i : eClass->parents) *out << index[i] << " "; *out << index[item.id] << "(" << item.support << ")" << endl; } /* ./fim.out retail.txt 0.001 outfile int size = eClass->items.size(); countforgpu+=1; printf("here %d =%d \n",countforgpu,size); for (int i = 0; i < size; i++){ EClass* children = new EClass(); children->parents = eClass->parents; children->parents.push_back(eClass->items[i].id); int *cudaforadb; // adb is for *a cudaMalloc((void**) &cudaforadb,sizeof(int)*length); int *a = eClass->items[i].db; cudaMemcpy(cudaforadb, a, sizeof(int)*length, cudaMemcpyHostToDevice); for (int j = i + 1; j < size; j++){ int * temp = new int[length]; int *b = eClass->items[j].db; int support = 0; int *cudaforbdb; // bdb is for *b int *cudaforoutdb;//outdb is for answer; int *tempout = new int[length]; cudaMalloc((void**) &cudaforbdb,sizeof(int)*length); cudaMalloc((void**) &cudaforoutdb,sizeof(int)*length); cudaMemcpy(cudaforbdb, b, sizeof(int)*length, cudaMemcpyHostToDevice); kunion<<<BLOCKNUM,THREADNUM>>>(cudaforoutdb,cudaforadb,cudaforbdb,length,BLOCKNUM,THREADNUM); cudaMemcpy(tempout, cudaforoutdb, sizeof(int)*length, cudaMemcpyDeviceToHost); for (int k = 0; k < length; k++){ //temp[k] = a[k] & b[k]; support += NumberOfSetBits(tempout[k]); } cudaFree(cudaforbdb); cudaFree(cudaforoutdb); if (support >= minSup){ children->items.push_back(Item(eClass->items[j].id, tempout, support)); } else delete[] tempout; } cudaFree(cudaforadb); if (children->items.size() != 0) mineGPU(children, minSup, index, length); for (auto item : children->items){ delete[] item.db; } delete children; } */ } void mineCPU(EClass *eClass, int minSup, int* index, int length){ int size = eClass->items.size(); countforcpu=countforcpu+1; //printf("cpu here%d = %d \n",countforcpu,size); for (int i = 0; i < size; i++){ EClass* children = new EClass(); children->parents = eClass->parents; children->parents.push_back(eClass->items[i].id); int *a = eClass->items[i].db; for (int j = i + 1; j < size; j++){ int * temp = new int[length]; int *b = eClass->items[j].db; int support = 0; for (int k = 0; k < length; k++){ temp[k] = a[k] & b[k]; support += NumberOfSetBits(temp[k]); //cout<<"tempk="<<support<<endl; } if (support >= minSup){ //cout<<"this is cpu support"<<support<<endl; //cout<<"cpu id = "<<eClass->items[j].id<<endl; children->items.push_back(Item(eClass->items[j].id, temp, support)); } else delete[] temp; } if (children->items.size() != 0) mineCPU(children, minSup, index, length); for (auto item : children->items){ delete[] item.db; } delete children; } /* for (auto item : eClass->items){ for (auto i : eClass->parents) *out << index[i] << " "; *out << index[item.id] << "(" << item.support << ")" << endl; } */ } int NumberOfSetBits(int i) { i = i - ((i >> 1) & 0x55555555); i = (i & 0x33333333) + ((i >> 2) & 0x33333333); return (((i + (i >> 4)) & 0x0F0F0F0F) * 0x01010101) >> 24; }
48b1bcc7a882ca67fe4bdc65c12f139b1e0275e2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * Copyright 2022 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "ctcgreedydecoder_impl.cuh" #include <hipcub/hipcub.hpp> #include <algorithm> #include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/util.cuh" template <typename T> __global__ void CTCGreedyDecoder(const T *input, const int bound, const size_t outer_size, const size_t batch_size, int64_t *decoded_values_temp, T *log_probability) { for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < outer_size; pos += gridDim.x * blockDim.x) { int idx = 0; size_t input_offset = pos * bound; T max_data = input[input_offset]; for (int i = 1; i < bound; i++) { input_offset = pos * bound + i; auto input_data = input[input_offset]; if (input_data > max_data) { idx = i; max_data = input_data; } } decoded_values_temp[pos] = idx; log_probability[pos] = -max_data; } return; } template <typename T> __global__ void values_merge(int64_t *decoded_values_temp, const int32_t *sequence_length, const size_t batch_size, const int bound, const bool merge_ok, T *log_probability, int64_t *nums_count) { const int blank_idx = bound - 1; for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < batch_size; pos += gridDim.x * blockDim.x) { if (sequence_length[pos] <= 0) { nums_count[pos] = 0; log_probability[pos] = 0; nums_count[pos] = 0; return; } size_t cnt = 0; for (size_t i = 0, idx = pos; i < sequence_length[pos]; i++, idx += batch_size) { if (idx != pos) { log_probability[pos] += log_probability[idx]; } if (decoded_values_temp[idx] == blank_idx || merge_ok && idx != pos && decoded_values_temp[idx] == decoded_values_temp[idx - batch_size]) { continue; } decoded_values_temp[cnt * batch_size + pos] = decoded_values_temp[idx]; cnt++; } nums_count[pos] = cnt; } return; } __global__ void indicesCompute(const int64_t *decoded_values_temp, const int64_t *nums_count, const size_t batch_size, int64_t *decoded_indices, int64_t *decoded_values, int64_t *decoded_shape, int64_t *nums_count_pre_sum) { for (size_t batch_pos = blockIdx.y * blockDim.y + threadIdx.y; batch_pos < batch_size; batch_pos += gridDim.y * blockDim.y) { for (size_t nums_count_pos = threadIdx.x; nums_count_pos < nums_count[batch_pos]; nums_count_pos += blockDim.x) { decoded_indices[(nums_count_pre_sum[batch_pos] + nums_count_pos) * 2] = batch_pos; decoded_indices[(nums_count_pre_sum[batch_pos] + nums_count_pos) * 2 + 1] = nums_count_pos; decoded_values[nums_count_pre_sum[batch_pos] + nums_count_pos] = decoded_values_temp[nums_count_pos * batch_size + batch_pos]; } if (threadIdx.x == 0) { MsAtomicMax(decoded_shape + 1, nums_count[batch_pos]); } } decoded_shape[0] = batch_size; } template <typename T> void CalCTCGreedyDecoder(const T *input, const int bound, const size_t outer_size, const size_t batch_size, int64_t *decoded_values_temp, T *log_probability, const uint32_t &device_id, hipStream_t cuda_stream) { hipLaunchKernelGGL(( CTCGreedyDecoder), dim3(CUDA_BLOCKS(device_id, outer_size)), dim3(CUDA_THREADS(device_id)), 0, cuda_stream, input, bound, outer_size, batch_size, decoded_values_temp, log_probability); return; } template <typename T> void Calmerge(int64_t *decoded_values_temp, const int32_t *sequence_length, const size_t batch_size, const int bound, const bool merge_ok, T *log_probability, int64_t *nums_count, const uint32_t &device_id, hipStream_t cuda_stream) { hipLaunchKernelGGL(( values_merge), dim3(CUDA_BLOCKS(device_id, batch_size)), dim3(CUDA_THREADS(device_id)), 0, cuda_stream, decoded_values_temp, sequence_length, batch_size, bound, merge_ok, log_probability, nums_count); return; } int64_t Calindices(const int64_t *decoded_values_temp, const int64_t *nums_count, const size_t batch_size, int64_t *decoded_indices, int64_t *decoded_values, int64_t *decoded_shape, const uint32_t &device_id, hipStream_t cuda_stream) { size_t temp_storage_bytes = 0; int64_t *nums_count_pre_sum = nullptr; hipMalloc(&nums_count_pre_sum, sizeof(int64_t) * (batch_size + 1)); hipMemset(nums_count_pre_sum, 0, sizeof(int64_t) * (batch_size + 1)); hipMemset(decoded_shape, 0, sizeof(int64_t) * 2); (void)hipcub::DeviceScan::InclusiveSum(nullptr, temp_storage_bytes, nums_count, nums_count_pre_sum + 1, static_cast<int64_t>(batch_size), cuda_stream); void *d_temp_storage = nullptr; hipStreamSynchronize(cuda_stream); (void)hipMalloc(&d_temp_storage, temp_storage_bytes); (void)hipcub::DeviceScan::InclusiveSum(d_temp_storage, temp_storage_bytes, nums_count, nums_count_pre_sum + 1, static_cast<int64_t>(batch_size), cuda_stream); hipStreamSynchronize(cuda_stream); (void)hipFree(d_temp_storage); int64_t sum_num_count = 0; hipMemcpy(&sum_num_count, nums_count_pre_sum + batch_size, sizeof(int64_t), hipMemcpyDeviceToHost); int64_t avg_num_count = sum_num_count / batch_size == 0 ? 1 : sum_num_count / batch_size; size_t thread_x_num = avg_num_count > 32 ? 32 : avg_num_count; size_t thread_y_num = 512 / thread_x_num; dim3 thread_num(thread_x_num, thread_y_num); hipDeviceProp_t prop; (void)hipGetDeviceProperties(&prop, device_id); int max_blocks = prop.multiProcessorCount; int block_num = min(static_cast<int>(((avg_num_count * batch_size - 1) / (thread_x_num * thread_y_num)) + 1), max_blocks); hipLaunchKernelGGL(( indicesCompute), dim3(block_num), dim3(thread_num), 0, cuda_stream, decoded_values_temp, nums_count, batch_size, decoded_indices, decoded_values, decoded_shape, nums_count_pre_sum); hipFree(nums_count_pre_sum); return sum_num_count; } template CUDA_LIB_EXPORT void CalCTCGreedyDecoder<float>(const float *input, const int bound, const size_t outer_size, const size_t batch_size, int64_t *decoded_values_temp, float *log_probability, const uint32_t &device_id, hipStream_t cuda_stream); template CUDA_LIB_EXPORT void CalCTCGreedyDecoder<double>(const double *input, const int bound, const size_t outer_size, const size_t batch_size, int64_t *decoded_values_temp, double *log_probability, const uint32_t &device_id, hipStream_t cuda_stream); template CUDA_LIB_EXPORT void Calmerge<float>(int64_t *decoded_values_temp, const int32_t *sequence_length, const size_t batch_size, const int bound, const bool merge_ok, float *log_probability, int64_t *nums_count, const uint32_t &device_id, hipStream_t cuda_stream); template CUDA_LIB_EXPORT void Calmerge<double>(int64_t *decoded_values_temp, const int32_t *sequence_length, const size_t batch_size, const int bound, const bool merge_ok, double *log_probability, int64_t *nums_count, const uint32_t &device_id, hipStream_t cuda_stream);
48b1bcc7a882ca67fe4bdc65c12f139b1e0275e2.cu
/** * Copyright 2022 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "ctcgreedydecoder_impl.cuh" #include <cub/cub.cuh> #include <algorithm> #include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/util.cuh" template <typename T> __global__ void CTCGreedyDecoder(const T *input, const int bound, const size_t outer_size, const size_t batch_size, int64_t *decoded_values_temp, T *log_probability) { for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < outer_size; pos += gridDim.x * blockDim.x) { int idx = 0; size_t input_offset = pos * bound; T max_data = input[input_offset]; for (int i = 1; i < bound; i++) { input_offset = pos * bound + i; auto input_data = input[input_offset]; if (input_data > max_data) { idx = i; max_data = input_data; } } decoded_values_temp[pos] = idx; log_probability[pos] = -max_data; } return; } template <typename T> __global__ void values_merge(int64_t *decoded_values_temp, const int32_t *sequence_length, const size_t batch_size, const int bound, const bool merge_ok, T *log_probability, int64_t *nums_count) { const int blank_idx = bound - 1; for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < batch_size; pos += gridDim.x * blockDim.x) { if (sequence_length[pos] <= 0) { nums_count[pos] = 0; log_probability[pos] = 0; nums_count[pos] = 0; return; } size_t cnt = 0; for (size_t i = 0, idx = pos; i < sequence_length[pos]; i++, idx += batch_size) { if (idx != pos) { log_probability[pos] += log_probability[idx]; } if (decoded_values_temp[idx] == blank_idx || merge_ok && idx != pos && decoded_values_temp[idx] == decoded_values_temp[idx - batch_size]) { continue; } decoded_values_temp[cnt * batch_size + pos] = decoded_values_temp[idx]; cnt++; } nums_count[pos] = cnt; } return; } __global__ void indicesCompute(const int64_t *decoded_values_temp, const int64_t *nums_count, const size_t batch_size, int64_t *decoded_indices, int64_t *decoded_values, int64_t *decoded_shape, int64_t *nums_count_pre_sum) { for (size_t batch_pos = blockIdx.y * blockDim.y + threadIdx.y; batch_pos < batch_size; batch_pos += gridDim.y * blockDim.y) { for (size_t nums_count_pos = threadIdx.x; nums_count_pos < nums_count[batch_pos]; nums_count_pos += blockDim.x) { decoded_indices[(nums_count_pre_sum[batch_pos] + nums_count_pos) * 2] = batch_pos; decoded_indices[(nums_count_pre_sum[batch_pos] + nums_count_pos) * 2 + 1] = nums_count_pos; decoded_values[nums_count_pre_sum[batch_pos] + nums_count_pos] = decoded_values_temp[nums_count_pos * batch_size + batch_pos]; } if (threadIdx.x == 0) { MsAtomicMax(decoded_shape + 1, nums_count[batch_pos]); } } decoded_shape[0] = batch_size; } template <typename T> void CalCTCGreedyDecoder(const T *input, const int bound, const size_t outer_size, const size_t batch_size, int64_t *decoded_values_temp, T *log_probability, const uint32_t &device_id, cudaStream_t cuda_stream) { CTCGreedyDecoder<<<CUDA_BLOCKS(device_id, outer_size), CUDA_THREADS(device_id), 0, cuda_stream>>>( input, bound, outer_size, batch_size, decoded_values_temp, log_probability); return; } template <typename T> void Calmerge(int64_t *decoded_values_temp, const int32_t *sequence_length, const size_t batch_size, const int bound, const bool merge_ok, T *log_probability, int64_t *nums_count, const uint32_t &device_id, cudaStream_t cuda_stream) { values_merge<<<CUDA_BLOCKS(device_id, batch_size), CUDA_THREADS(device_id), 0, cuda_stream>>>( decoded_values_temp, sequence_length, batch_size, bound, merge_ok, log_probability, nums_count); return; } int64_t Calindices(const int64_t *decoded_values_temp, const int64_t *nums_count, const size_t batch_size, int64_t *decoded_indices, int64_t *decoded_values, int64_t *decoded_shape, const uint32_t &device_id, cudaStream_t cuda_stream) { size_t temp_storage_bytes = 0; int64_t *nums_count_pre_sum = nullptr; cudaMalloc(&nums_count_pre_sum, sizeof(int64_t) * (batch_size + 1)); cudaMemset(nums_count_pre_sum, 0, sizeof(int64_t) * (batch_size + 1)); cudaMemset(decoded_shape, 0, sizeof(int64_t) * 2); (void)cub::DeviceScan::InclusiveSum(nullptr, temp_storage_bytes, nums_count, nums_count_pre_sum + 1, static_cast<int64_t>(batch_size), cuda_stream); void *d_temp_storage = nullptr; cudaStreamSynchronize(cuda_stream); (void)cudaMalloc(&d_temp_storage, temp_storage_bytes); (void)cub::DeviceScan::InclusiveSum(d_temp_storage, temp_storage_bytes, nums_count, nums_count_pre_sum + 1, static_cast<int64_t>(batch_size), cuda_stream); cudaStreamSynchronize(cuda_stream); (void)cudaFree(d_temp_storage); int64_t sum_num_count = 0; cudaMemcpy(&sum_num_count, nums_count_pre_sum + batch_size, sizeof(int64_t), cudaMemcpyDeviceToHost); int64_t avg_num_count = sum_num_count / batch_size == 0 ? 1 : sum_num_count / batch_size; size_t thread_x_num = avg_num_count > 32 ? 32 : avg_num_count; size_t thread_y_num = 512 / thread_x_num; dim3 thread_num(thread_x_num, thread_y_num); cudaDeviceProp prop; (void)cudaGetDeviceProperties(&prop, device_id); int max_blocks = prop.multiProcessorCount; int block_num = min(static_cast<int>(((avg_num_count * batch_size - 1) / (thread_x_num * thread_y_num)) + 1), max_blocks); indicesCompute<<<block_num, thread_num, 0, cuda_stream>>>(decoded_values_temp, nums_count, batch_size, decoded_indices, decoded_values, decoded_shape, nums_count_pre_sum); cudaFree(nums_count_pre_sum); return sum_num_count; } template CUDA_LIB_EXPORT void CalCTCGreedyDecoder<float>(const float *input, const int bound, const size_t outer_size, const size_t batch_size, int64_t *decoded_values_temp, float *log_probability, const uint32_t &device_id, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT void CalCTCGreedyDecoder<double>(const double *input, const int bound, const size_t outer_size, const size_t batch_size, int64_t *decoded_values_temp, double *log_probability, const uint32_t &device_id, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT void Calmerge<float>(int64_t *decoded_values_temp, const int32_t *sequence_length, const size_t batch_size, const int bound, const bool merge_ok, float *log_probability, int64_t *nums_count, const uint32_t &device_id, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT void Calmerge<double>(int64_t *decoded_values_temp, const int32_t *sequence_length, const size_t batch_size, const int bound, const bool merge_ok, double *log_probability, int64_t *nums_count, const uint32_t &device_id, cudaStream_t cuda_stream);
d01d7ddf6bdc231340f9645ab9810defaccd8e06.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <bits/stdc++.h> #include <fstream> #include <sstream> #include <string> #include "math.h" #include "limits.h" #define MIN -99 #define M 104 #define N 1500 #define trainFileName "train_full.csv" #define testFileName "test_full.csv" #define features 55 #define K 10 #define trainData(row,col) trainData[col+row*M] #define testData(row,col) testData[col+row*M] #define THRESHOLD 0 using namespace std; vector <vector <double> > train_file; vector <vector <double> > test_file; vector <vector <double> > trainFile_Full; vector <vector <double> > testFile_Full; int *device_trainFileData, *device_cardinality; float *infoGainsInitializer; __device__ float device_infoGainOfData; dim3 blocks(M); dim3 threads(N); struct Node{ int number_of_children; int branch_value; int split_attribute; int leaf_value; struct Node *children[10]; }; typedef struct Node node; node* create(){ node* n = new node; n->number_of_children = 0; n->branch_value = -1; n->split_attribute = -1; n->leaf_value = -1; return n; } void chooseRandomFeatures(){ vector<vector<double> > trainFileRandom( N , vector<double> (M, 0)); for(int i=0; i<50; i++){ int guess = rand() % 103; trainFileRandom[i]=train_file[guess]; } train_file=trainFileRandom; } double cosine_distance(double *A, double *B, int Vector_Length) { double dot = 0.0, denominator_a = 0.0, denominator_b = 0.0 ; for( int i = 0u; i < Vector_Length; ++i) { dot += A[i] * B[i] ; denominator_a += A[i] * A[i] ; denominator_b += B[i] * B[i] ; } return dot / (sqrt( denominator_a) * sqrt( denominator_b)) ; } void k_means(){ double trainFile1[N][M]; int minima[features]={INT_MAX}; int maxima[features]={INT_MIN}; int cluster[N]; int t=0; for(int i=0; i<N; i++){ for(int j=0; j<features; j++){ if( trainFile1[i][j]<minima[j]){ minima[j]= trainFile1[i][j]; } if( trainFile1[i][j]>maxima[j]){ maxima[j]= trainFile1[i][j]; } } } double mean_arr[K][features]; for(int i=0; i<K; i++){ for(int j=0; j<features; j++){ int num = (rand() % (maxima[j] - minima[j] + 1)) + minima[j]; mean_arr[i][j]=num; } } for (int i = 0; i < t; i++) { for (int j = 0; j < N; j++) { double* dists = new double[K]; for (int p = 0; p < K; p++) { dists[p] = cosine_distance( trainFile1[j], mean_arr[p], M); } cluster[j] = std::min_element(dists, dists + K) - dists; delete[] dists; } double sum[K][M]={0}; int count[K]={0}; for (int f = 0; f < N; f++) { for (int p = 0; p < M; p++) { sum[cluster[f]][p]+= trainFile1[f][p]; } count[cluster[f]]++; } for (int f = 0; f < K; f++) { for (int p = 0; p < M; p++) { mean_arr[f][p]=sum[f][p]/count[f]; } } } } void read_files(string file_name){ if(file_name.compare("training")==0){ string line; ifstream ifs(trainFileName); while(getline(ifs,line)){ vector <double> entry; stringstream lineStream(line); string value; while(getline(lineStream,value,',')){ entry.push_back(stof(value)); } train_file.push_back(entry); } ifs.close(); } else if(file_name.compare("testing")==0){ string line1; ifstream ifs1(testFileName); while(getline(ifs1,line1)){ vector <double> entry; stringstream lineStream1(line1); string value; while(getline(lineStream1,value,',')){ entry.push_back(stof(value)); } test_file.push_back(entry); } ifs1.close(); } } __global__ void getInformationGains(int *attr,int *data,int dataSize,float *infoGains,int *trainData,int *cardinality) { if(attr[blockIdx.x]==0 && blockIdx.x!=0 && blockIdx.x!=M-1){ int threadid,blockid,j; threadid=threadIdx.x; blockid=blockIdx.x; __shared__ int value_attribute[10]; __shared__ int value_class_attribute[10][10]; if(threadid<10){ value_attribute[threadid]=0; for(j=0;j<10;j++){ value_class_attribute[threadid][j]=0; } } __syncthreads(); int classVal = trainData(data[threadid],M-1); int attribute_value = trainData(data[threadid],blockid); atomicAdd(&value_attribute[attribute_value],1); atomicAdd(&value_class_attribute[attribute_value][classVal],1); __syncthreads(); if(threadid==0){ int i,j; float information_gain,intermediateGain; information_gain=0; for(i=1;i<=cardinality[blockid];i++){ intermediateGain=0; if(value_attribute[i]==0){ continue; } for(j=1;j<=cardinality[M-1];j++){ if(value_class_attribute[i][j]==0){ continue; } intermediateGain+=(float(value_class_attribute[i][j])/(float)value_attribute[i])*(log((float)value_class_attribute[i][j]/(float)value_attribute[i])/log((float)2)); } intermediateGain*=(float(value_attribute[i])/(float)dataSize); information_gain-=intermediateGain; } infoGains[blockid]=information_gain; } } } __global__ void getInfoGainOfData(int *data,int dataSize,int *trainData,int *cardinality) { __shared__ int value_class_count[10]; int classVal,i,threadid; float information_gain; threadid=threadIdx.x; if(threadid<10){ value_class_count[threadid]=0; } __syncthreads(); classVal=trainData(data[threadIdx.x],M-1); atomicAdd(&value_class_count[classVal],1); __syncthreads(); if(threadid==0){ information_gain=0; for(i=1;i<=cardinality[M-1];i++){ if(value_class_count[i]==0){ continue; } information_gain+=((float)value_class_count[i]/(float)dataSize)*(log((float)value_class_count[i]/(float)dataSize)/log((float)2)); } device_infoGainOfData=-1*information_gain; } } int majority_vote(int *data,int dataSize) { int i,outputClass,ans,maxVal; map <int, int> dataCount; map <int, int>::iterator iterator; for(i=0;i<dataSize;i++){ outputClass = train_file[data[i]][M-1]; if(dataCount.find(outputClass)==dataCount.end()){ dataCount.insert(make_pair(outputClass,1)); } else{ dataCount[outputClass]++; } } maxVal = MIN; for(iterator=dataCount.begin();iterator!=dataCount.end();iterator++){ if(iterator->second > maxVal){ ans = iterator->first; } } return ans; } void make_decision(int *host_attributes, int *host_data, node *root, int host_datasize) { int flag, host_selectedAttribute, i; k_means(); if(host_datasize<=THRESHOLD){ return; } float maxGain; flag=1; for(i=1;i<host_datasize;i++){ if(train_file[host_data[i]][M-1]!=train_file[host_data[i-1]][M-1]){ flag=0; break; } } if(flag==1){ root->leaf_value=train_file[host_data[0]][M-1]; return; } int *device_attr, *device_data; float *device_infoGains; float host_informationGains[M]; float host_infoGainOfData; hipMalloc((void**)&device_attr,M*sizeof(int)); hipMalloc((void**)&device_data,host_datasize*sizeof(int)); hipMalloc(&device_infoGains,M*sizeof(float)); hipMemcpy((void*)device_attr,(void*)host_attributes,M*sizeof(int),hipMemcpyHostToDevice); hipMemcpy((void*)device_data,(void*)host_data,host_datasize*sizeof(int),hipMemcpyHostToDevice); hipMemcpy(device_infoGains, infoGainsInitializer, M*sizeof(float),hipMemcpyHostToDevice); hipLaunchKernelGGL(( getInformationGains), dim3(blocks),dim3(host_datasize), 0, 0, device_attr,device_data,host_datasize,device_infoGains,device_trainFileData,device_cardinality); hipMemcpy((void*)host_informationGains,(void*)device_infoGains,M*sizeof(float),hipMemcpyDeviceToHost); hipFree(device_attr); hipFree(device_infoGains); hipLaunchKernelGGL(( getInfoGainOfData), dim3(1),dim3(host_datasize), 0, 0, device_data,host_datasize,device_trainFileData,device_cardinality); hipMemcpyFromSymbol(&host_infoGainOfData,device_infoGainOfData,sizeof(float),0,hipMemcpyDeviceToHost); hipFree(device_data); maxGain=MIN; host_selectedAttribute=-1; for(i=1;i<M-1;i++){ if(host_attributes[i]==0){ host_informationGains[i]=host_infoGainOfData-host_informationGains[i]; if(host_informationGains[i]>maxGain){ maxGain=host_informationGains[i]; host_selectedAttribute=i; } } } root->split_attribute = host_selectedAttribute; host_attributes[host_selectedAttribute]=1; if(host_selectedAttribute==-1){ root->leaf_value = majority_vote(host_data, host_datasize); return; } map<int, vector <int> > dividedData; map<int, vector <int> >::iterator iterator; int attribute_value; for(i=0;i<host_datasize;i++){ attribute_value = train_file[host_data[i]][host_selectedAttribute]; if(dividedData.find(attribute_value) == dividedData.end()){ vector <int> x; x.push_back(host_data[i]); dividedData.insert(make_pair(attribute_value,x)); } else{ dividedData[attribute_value].push_back(host_data[i]); } } for(i=0,iterator=dividedData.begin(); iterator!=dividedData.end(); iterator++,i++){ root->number_of_children++; node* childNode; childNode = create(); root->children[i] = childNode; childNode->branch_value = iterator->first; int new_attr[M]; for(int z=0;z<M;z++){ new_attr[z]=host_attributes[z]; } int* host_childData = &(iterator->second[0]); make_decision(new_attr, host_childData, childNode, iterator->second.size()); } } __global__ void getCardinality(int *trainData, int *cardinality) { __shared__ int x[10]; int blockid, threadid,i; blockid = blockIdx.x; threadid = threadIdx.x; if(threadid<10){ x[threadid]=0; } __syncthreads(); if(blockIdx.x!=0){ x[trainData(threadid, blockid)] = 1; __syncthreads(); for(i=1;i<10;i*=2){ int index = 2*i*threadid; if(index+i<10){ x[index]+=x[index+i]; } __syncthreads(); } if(threadid==0){ cardinality[blockid]=x[0]; } } __syncthreads(); } void fillTrainFile(vector <vector <double> > trainFile_Full, int index){ for(int j=0; j<trainFile_Full.size(); j++){ vector<double> temp; for(int i=0; i<(M-1); i++){ temp.push_back(trainFile_Full[j][i]); } temp.push_back(trainFile_Full[j][index]); train_file.push_back(temp); } } void fillTestFile(vector <vector <double> > testFile_Full, int index){ for(int j=0; j<testFile_Full.size(); j++){ vector<double> temp; for(int i=0; i<(M-1); i++){ temp.push_back(testFile_Full[j][i]); } temp.push_back(testFile_Full[j][index]); test_file.push_back(temp); } } void test(node* root, int index) { int i,pos,neg,noResult,attr,attribute_value,j,flag; node* temp; pos=0; neg=0; noResult=0; // readCSV("testing"); // read_files("testing"); fillTestFile(testFile_Full, index); for(i=0;i<test_file.size();i++){ temp=root; flag=0; while(temp->leaf_value==-1 && temp->split_attribute!=-1){ attr = temp->split_attribute; attribute_value=test_file[i][attr]; for(j=0;j<temp->number_of_children;j++){ if(temp->children[j]->branch_value == attribute_value){ break; } } if(j==temp->number_of_children){ flag=1; break; } else{ temp=temp->children[j]; } } if(temp->leaf_value == test_file[i][M-1]){ pos++; } else{ neg++; } if(temp->leaf_value == -1 || flag==1){ noResult++; } } cout << "Class" << (index - 102) << " : "; cout << "Accuracy: " << max(pos, neg)/(pos+neg+0.0)*1.0; return; } void extractFull(string str) { if(str.compare("training")==0){ ifstream ifs(trainFileName); string line; while(getline(ifs,line)){ stringstream lineStream(line); string cell; vector <double> values; while(getline(lineStream,cell,',')){ values.push_back(stof(cell)); } trainFile_Full.push_back(values); } ifs.close(); } else if(str.compare("testing")==0){ ifstream ifs1(testFileName); string line1; while(getline(ifs1,line1)){ stringstream lineStream1(line1); string cell1; vector <double> values1; while(getline(lineStream1,cell1,',')){ values1.push_back(stof(cell1)); } testFile_Full.push_back(values1); } ifs1.close(); } } int main() { int i; node* root; extractFull("training"); extractFull("testing"); // readCSV("training"); // read_files("training"); for(int index=103; index<117; index++){ train_file.clear(); test_file.clear(); fillTrainFile(trainFile_Full, index); // chooseRandom(); int host_trainFileData[N*M+5]={0}; for(i=0;i<N*M;i++){ host_trainFileData[i] = train_file[i/M][i%M]; } int host_data[N], host_attributes[M]; for(i=0;i<N;i++){ host_data[i]=i; } for(i=0;i<M;i++){ host_attributes[i]=0; } hipMalloc((void**)&device_trainFileData, N*M*sizeof(int)); hipMemcpy((void*)device_trainFileData,(void*)host_trainFileData, M*N*sizeof(int), hipMemcpyHostToDevice); hipMalloc((void**)&device_cardinality,M*sizeof(int)); hipMemset(device_cardinality, 0, M*sizeof(int)); hipLaunchKernelGGL(( getCardinality), dim3(blocks),dim3(threads), 0, 0, device_trainFileData, device_cardinality); root = create(); infoGainsInitializer = (float*)malloc( M * sizeof(float)); for(i=0; i<M; i++){ infoGainsInitializer[i]=MIN; } make_decision(host_attributes, host_data, root, N); hipFree(device_trainFileData); hipFree(device_cardinality); test(root, index); cout << endl; } return 0; }
d01d7ddf6bdc231340f9645ab9810defaccd8e06.cu
#include <iostream> #include <bits/stdc++.h> #include <fstream> #include <sstream> #include <string> #include "math.h" #include "limits.h" #define MIN -99 #define M 104 #define N 1500 #define trainFileName "train_full.csv" #define testFileName "test_full.csv" #define features 55 #define K 10 #define trainData(row,col) trainData[col+row*M] #define testData(row,col) testData[col+row*M] #define THRESHOLD 0 using namespace std; vector <vector <double> > train_file; vector <vector <double> > test_file; vector <vector <double> > trainFile_Full; vector <vector <double> > testFile_Full; int *device_trainFileData, *device_cardinality; float *infoGainsInitializer; __device__ float device_infoGainOfData; dim3 blocks(M); dim3 threads(N); struct Node{ int number_of_children; int branch_value; int split_attribute; int leaf_value; struct Node *children[10]; }; typedef struct Node node; node* create(){ node* n = new node; n->number_of_children = 0; n->branch_value = -1; n->split_attribute = -1; n->leaf_value = -1; return n; } void chooseRandomFeatures(){ vector<vector<double> > trainFileRandom( N , vector<double> (M, 0)); for(int i=0; i<50; i++){ int guess = rand() % 103; trainFileRandom[i]=train_file[guess]; } train_file=trainFileRandom; } double cosine_distance(double *A, double *B, int Vector_Length) { double dot = 0.0, denominator_a = 0.0, denominator_b = 0.0 ; for( int i = 0u; i < Vector_Length; ++i) { dot += A[i] * B[i] ; denominator_a += A[i] * A[i] ; denominator_b += B[i] * B[i] ; } return dot / (sqrt( denominator_a) * sqrt( denominator_b)) ; } void k_means(){ double trainFile1[N][M]; int minima[features]={INT_MAX}; int maxima[features]={INT_MIN}; int cluster[N]; int t=0; for(int i=0; i<N; i++){ for(int j=0; j<features; j++){ if( trainFile1[i][j]<minima[j]){ minima[j]= trainFile1[i][j]; } if( trainFile1[i][j]>maxima[j]){ maxima[j]= trainFile1[i][j]; } } } double mean_arr[K][features]; for(int i=0; i<K; i++){ for(int j=0; j<features; j++){ int num = (rand() % (maxima[j] - minima[j] + 1)) + minima[j]; mean_arr[i][j]=num; } } for (int i = 0; i < t; i++) { for (int j = 0; j < N; j++) { double* dists = new double[K]; for (int p = 0; p < K; p++) { dists[p] = cosine_distance( trainFile1[j], mean_arr[p], M); } cluster[j] = std::min_element(dists, dists + K) - dists; delete[] dists; } double sum[K][M]={0}; int count[K]={0}; for (int f = 0; f < N; f++) { for (int p = 0; p < M; p++) { sum[cluster[f]][p]+= trainFile1[f][p]; } count[cluster[f]]++; } for (int f = 0; f < K; f++) { for (int p = 0; p < M; p++) { mean_arr[f][p]=sum[f][p]/count[f]; } } } } void read_files(string file_name){ if(file_name.compare("training")==0){ string line; ifstream ifs(trainFileName); while(getline(ifs,line)){ vector <double> entry; stringstream lineStream(line); string value; while(getline(lineStream,value,',')){ entry.push_back(stof(value)); } train_file.push_back(entry); } ifs.close(); } else if(file_name.compare("testing")==0){ string line1; ifstream ifs1(testFileName); while(getline(ifs1,line1)){ vector <double> entry; stringstream lineStream1(line1); string value; while(getline(lineStream1,value,',')){ entry.push_back(stof(value)); } test_file.push_back(entry); } ifs1.close(); } } __global__ void getInformationGains(int *attr,int *data,int dataSize,float *infoGains,int *trainData,int *cardinality) { if(attr[blockIdx.x]==0 && blockIdx.x!=0 && blockIdx.x!=M-1){ int threadid,blockid,j; threadid=threadIdx.x; blockid=blockIdx.x; __shared__ int value_attribute[10]; __shared__ int value_class_attribute[10][10]; if(threadid<10){ value_attribute[threadid]=0; for(j=0;j<10;j++){ value_class_attribute[threadid][j]=0; } } __syncthreads(); int classVal = trainData(data[threadid],M-1); int attribute_value = trainData(data[threadid],blockid); atomicAdd(&value_attribute[attribute_value],1); atomicAdd(&value_class_attribute[attribute_value][classVal],1); __syncthreads(); if(threadid==0){ int i,j; float information_gain,intermediateGain; information_gain=0; for(i=1;i<=cardinality[blockid];i++){ intermediateGain=0; if(value_attribute[i]==0){ continue; } for(j=1;j<=cardinality[M-1];j++){ if(value_class_attribute[i][j]==0){ continue; } intermediateGain+=(float(value_class_attribute[i][j])/(float)value_attribute[i])*(log((float)value_class_attribute[i][j]/(float)value_attribute[i])/log((float)2)); } intermediateGain*=(float(value_attribute[i])/(float)dataSize); information_gain-=intermediateGain; } infoGains[blockid]=information_gain; } } } __global__ void getInfoGainOfData(int *data,int dataSize,int *trainData,int *cardinality) { __shared__ int value_class_count[10]; int classVal,i,threadid; float information_gain; threadid=threadIdx.x; if(threadid<10){ value_class_count[threadid]=0; } __syncthreads(); classVal=trainData(data[threadIdx.x],M-1); atomicAdd(&value_class_count[classVal],1); __syncthreads(); if(threadid==0){ information_gain=0; for(i=1;i<=cardinality[M-1];i++){ if(value_class_count[i]==0){ continue; } information_gain+=((float)value_class_count[i]/(float)dataSize)*(log((float)value_class_count[i]/(float)dataSize)/log((float)2)); } device_infoGainOfData=-1*information_gain; } } int majority_vote(int *data,int dataSize) { int i,outputClass,ans,maxVal; map <int, int> dataCount; map <int, int>::iterator iterator; for(i=0;i<dataSize;i++){ outputClass = train_file[data[i]][M-1]; if(dataCount.find(outputClass)==dataCount.end()){ dataCount.insert(make_pair(outputClass,1)); } else{ dataCount[outputClass]++; } } maxVal = MIN; for(iterator=dataCount.begin();iterator!=dataCount.end();iterator++){ if(iterator->second > maxVal){ ans = iterator->first; } } return ans; } void make_decision(int *host_attributes, int *host_data, node *root, int host_datasize) { int flag, host_selectedAttribute, i; k_means(); if(host_datasize<=THRESHOLD){ return; } float maxGain; flag=1; for(i=1;i<host_datasize;i++){ if(train_file[host_data[i]][M-1]!=train_file[host_data[i-1]][M-1]){ flag=0; break; } } if(flag==1){ root->leaf_value=train_file[host_data[0]][M-1]; return; } int *device_attr, *device_data; float *device_infoGains; float host_informationGains[M]; float host_infoGainOfData; cudaMalloc((void**)&device_attr,M*sizeof(int)); cudaMalloc((void**)&device_data,host_datasize*sizeof(int)); cudaMalloc(&device_infoGains,M*sizeof(float)); cudaMemcpy((void*)device_attr,(void*)host_attributes,M*sizeof(int),cudaMemcpyHostToDevice); cudaMemcpy((void*)device_data,(void*)host_data,host_datasize*sizeof(int),cudaMemcpyHostToDevice); cudaMemcpy(device_infoGains, infoGainsInitializer, M*sizeof(float),cudaMemcpyHostToDevice); getInformationGains<<<blocks,host_datasize>>>(device_attr,device_data,host_datasize,device_infoGains,device_trainFileData,device_cardinality); cudaMemcpy((void*)host_informationGains,(void*)device_infoGains,M*sizeof(float),cudaMemcpyDeviceToHost); cudaFree(device_attr); cudaFree(device_infoGains); getInfoGainOfData<<<1,host_datasize>>>(device_data,host_datasize,device_trainFileData,device_cardinality); cudaMemcpyFromSymbol(&host_infoGainOfData,device_infoGainOfData,sizeof(float),0,cudaMemcpyDeviceToHost); cudaFree(device_data); maxGain=MIN; host_selectedAttribute=-1; for(i=1;i<M-1;i++){ if(host_attributes[i]==0){ host_informationGains[i]=host_infoGainOfData-host_informationGains[i]; if(host_informationGains[i]>maxGain){ maxGain=host_informationGains[i]; host_selectedAttribute=i; } } } root->split_attribute = host_selectedAttribute; host_attributes[host_selectedAttribute]=1; if(host_selectedAttribute==-1){ root->leaf_value = majority_vote(host_data, host_datasize); return; } map<int, vector <int> > dividedData; map<int, vector <int> >::iterator iterator; int attribute_value; for(i=0;i<host_datasize;i++){ attribute_value = train_file[host_data[i]][host_selectedAttribute]; if(dividedData.find(attribute_value) == dividedData.end()){ vector <int> x; x.push_back(host_data[i]); dividedData.insert(make_pair(attribute_value,x)); } else{ dividedData[attribute_value].push_back(host_data[i]); } } for(i=0,iterator=dividedData.begin(); iterator!=dividedData.end(); iterator++,i++){ root->number_of_children++; node* childNode; childNode = create(); root->children[i] = childNode; childNode->branch_value = iterator->first; int new_attr[M]; for(int z=0;z<M;z++){ new_attr[z]=host_attributes[z]; } int* host_childData = &(iterator->second[0]); make_decision(new_attr, host_childData, childNode, iterator->second.size()); } } __global__ void getCardinality(int *trainData, int *cardinality) { __shared__ int x[10]; int blockid, threadid,i; blockid = blockIdx.x; threadid = threadIdx.x; if(threadid<10){ x[threadid]=0; } __syncthreads(); if(blockIdx.x!=0){ x[trainData(threadid, blockid)] = 1; __syncthreads(); for(i=1;i<10;i*=2){ int index = 2*i*threadid; if(index+i<10){ x[index]+=x[index+i]; } __syncthreads(); } if(threadid==0){ cardinality[blockid]=x[0]; } } __syncthreads(); } void fillTrainFile(vector <vector <double> > trainFile_Full, int index){ for(int j=0; j<trainFile_Full.size(); j++){ vector<double> temp; for(int i=0; i<(M-1); i++){ temp.push_back(trainFile_Full[j][i]); } temp.push_back(trainFile_Full[j][index]); train_file.push_back(temp); } } void fillTestFile(vector <vector <double> > testFile_Full, int index){ for(int j=0; j<testFile_Full.size(); j++){ vector<double> temp; for(int i=0; i<(M-1); i++){ temp.push_back(testFile_Full[j][i]); } temp.push_back(testFile_Full[j][index]); test_file.push_back(temp); } } void test(node* root, int index) { int i,pos,neg,noResult,attr,attribute_value,j,flag; node* temp; pos=0; neg=0; noResult=0; // readCSV("testing"); // read_files("testing"); fillTestFile(testFile_Full, index); for(i=0;i<test_file.size();i++){ temp=root; flag=0; while(temp->leaf_value==-1 && temp->split_attribute!=-1){ attr = temp->split_attribute; attribute_value=test_file[i][attr]; for(j=0;j<temp->number_of_children;j++){ if(temp->children[j]->branch_value == attribute_value){ break; } } if(j==temp->number_of_children){ flag=1; break; } else{ temp=temp->children[j]; } } if(temp->leaf_value == test_file[i][M-1]){ pos++; } else{ neg++; } if(temp->leaf_value == -1 || flag==1){ noResult++; } } cout << "Class" << (index - 102) << " : "; cout << "Accuracy: " << max(pos, neg)/(pos+neg+0.0)*1.0; return; } void extractFull(string str) { if(str.compare("training")==0){ ifstream ifs(trainFileName); string line; while(getline(ifs,line)){ stringstream lineStream(line); string cell; vector <double> values; while(getline(lineStream,cell,',')){ values.push_back(stof(cell)); } trainFile_Full.push_back(values); } ifs.close(); } else if(str.compare("testing")==0){ ifstream ifs1(testFileName); string line1; while(getline(ifs1,line1)){ stringstream lineStream1(line1); string cell1; vector <double> values1; while(getline(lineStream1,cell1,',')){ values1.push_back(stof(cell1)); } testFile_Full.push_back(values1); } ifs1.close(); } } int main() { int i; node* root; extractFull("training"); extractFull("testing"); // readCSV("training"); // read_files("training"); for(int index=103; index<117; index++){ train_file.clear(); test_file.clear(); fillTrainFile(trainFile_Full, index); // chooseRandom(); int host_trainFileData[N*M+5]={0}; for(i=0;i<N*M;i++){ host_trainFileData[i] = train_file[i/M][i%M]; } int host_data[N], host_attributes[M]; for(i=0;i<N;i++){ host_data[i]=i; } for(i=0;i<M;i++){ host_attributes[i]=0; } cudaMalloc((void**)&device_trainFileData, N*M*sizeof(int)); cudaMemcpy((void*)device_trainFileData,(void*)host_trainFileData, M*N*sizeof(int), cudaMemcpyHostToDevice); cudaMalloc((void**)&device_cardinality,M*sizeof(int)); cudaMemset(device_cardinality, 0, M*sizeof(int)); getCardinality<<<blocks,threads>>>(device_trainFileData, device_cardinality); root = create(); infoGainsInitializer = (float*)malloc( M * sizeof(float)); for(i=0; i<M; i++){ infoGainsInitializer[i]=MIN; } make_decision(host_attributes, host_data, root, N); cudaFree(device_trainFileData); cudaFree(device_cardinality); test(root, index); cout << endl; } return 0; }
624b87664b7a15e846f056e3816f61bc845334d7.hip
// !!! This is a file automatically generated by hipify!!! #include <cfloat> #include <rocblas.h> #include <cusolverDn.h> #include "struct.h" #include "constants.h" void tauInitial(double *, double *, fcndata &); void vertexToEdge(double *, double *, int *, int, int); void vertexToBottom(double *, double *, int *, int, int); void computeFrame(double *, double *, double *, fcndata &); void assembleFemMatrix(double *, double *, double *, double *, double *, fcndata &); void yankTauActivity(double *, double *, fcndata &); void reactionTauActivity(double *, double *, fcndata &); void computeKernel(double *, double *, fcndata &); void addEpsIdentity(double *, double, int); void cholesky(double *, fcndata &); void computeExternalYank(double *, double *, double *, double *, fcndata &); int ipcg(double *, double *, fcndata &); void vectoraxpby(double *, double, double *, double, double *, int); void assembleFemVector(double *, double *, double *, fcndata &); void deform(double *h_objPtr, double *h_posVec, fcndata &fcnObj) { int lmkNum = fcnObj.prm.lmkNum; int elmNum = fcnObj.prm.elmNum; int btmElmNum = fcnObj.prm.btmElmNum; int timeNum = fcnObj.prm.timeNum; double timeStp = fcnObj.prm.timeStp; hipMemcpy(fcnObj.d_lmkStk, fcnObj.prm.d_lmkIniMat, sizeof(double) * lmkNum * DIMNUM, hipMemcpyDeviceToDevice); hipMemset(fcnObj.d_vlcStk, 0, sizeof(double) * lmkNum * DIMNUM); hipMemset(fcnObj.d_pnlMat, 0, sizeof(double) * lmkNum * DIMNUM); tauInitial(fcnObj.d_tauMat, h_posVec, fcnObj); vertexToEdge(fcnObj.d_lmkNowEdgMat, fcnObj.prm.d_lmkIniMat, fcnObj.elm.d_elmVtxMat, lmkNum, elmNum); computeFrame(fcnObj.d_tanNowMat, fcnObj.d_tsvNowMat, fcnObj.prm.d_lmkIniMat, fcnObj); assembleFemMatrix(fcnObj.d_ppdNowMat, fcnObj.d_ggdNowMat, fcnObj.d_lmkNowEdgMat, fcnObj.d_tanNowMat, fcnObj.d_tsvNowMat, fcnObj); for ( int timeIdx = 0; timeIdx < timeNum - 1; ++timeIdx ) { fcnObj.d_lmkNowMat = fcnObj.d_lmkStk + timeIdx * lmkNum * DIMNUM; fcnObj.d_lmkNxtMat = fcnObj.d_lmkStk + (timeIdx + 1) * lmkNum * DIMNUM; fcnObj.d_tauNowVec = fcnObj.d_tauMat + timeIdx * lmkNum; fcnObj.d_tauNxtVec = fcnObj.d_tauMat + (timeIdx + 1) * lmkNum; fcnObj.d_vlcMat = fcnObj.d_vlcStk + timeIdx * lmkNum * DIMNUM; vertexToBottom(fcnObj.d_lmkNowBtmMat, fcnObj.d_lmkNowMat, fcnObj.elm.d_btmVtxMat, lmkNum, btmElmNum); yankTauActivity(fcnObj.d_ynkActFcnNowVec, fcnObj.d_tauNowVec, fcnObj); reactionTauActivity(fcnObj.d_reaActFcnNowVec, fcnObj.d_tauNowVec, fcnObj); computeKernel(fcnObj.d_knlMat, fcnObj.d_lmkNowMat, fcnObj); addEpsIdentity(fcnObj.d_knlMat, fcnObj.prm.knlEps, lmkNum); hipMemcpy(fcnObj.d_knLMat, fcnObj.d_knlMat, sizeof(double) * lmkNum * lmkNum, hipMemcpyDeviceToDevice); cholesky(fcnObj.d_knLMat, fcnObj); computeExternalYank(fcnObj.d_exYMat, fcnObj.d_lmkNowMat, fcnObj.d_lmkNowEdgMat, fcnObj.d_ynkActFcnNowVec, fcnObj); int cgStatus = ipcg(fcnObj.d_vlcMat, fcnObj.d_exYMat, fcnObj); if ( cgStatus != 0 ) { *h_objPtr = DBL_MAX; return; } if ( timeIdx < timeNum - 2 ) { hipMemcpy(fcnObj.d_vlcMat + lmkNum * DIMNUM, fcnObj.d_vlcMat, sizeof(double) * lmkNum * DIMNUM, hipMemcpyDeviceToDevice); } vectoraxpby(fcnObj.d_lmkNxtMat, 1.0, fcnObj.d_lmkNowMat, timeStp, fcnObj.d_vlcMat, lmkNum * DIMNUM); vertexToEdge(fcnObj.d_lmkNxtEdgMat, fcnObj.d_lmkNxtMat, fcnObj.elm.d_elmVtxMat, lmkNum, elmNum); computeFrame(fcnObj.d_tanNxtMat, fcnObj.d_tsvNxtMat, fcnObj.d_lmkNxtMat, fcnObj); assembleFemMatrix(fcnObj.d_ppdNxtMat, fcnObj.d_ggdNxtMat, fcnObj.d_lmkNxtEdgMat, fcnObj.d_tanNxtMat, fcnObj.d_tsvNxtMat, fcnObj); assembleFemVector(fcnObj.d_femRpdVec, fcnObj.d_lmkNowEdgMat, fcnObj.d_reaActFcnNowVec, fcnObj); vectoraxpby(fcnObj.d_femLftMat, 1.0, fcnObj.d_ppdNxtMat, timeStp, fcnObj.d_ggdNxtMat, lmkNum * lmkNum); hipMemcpy(fcnObj.d_femPpdMat, fcnObj.d_ppdNowMat, sizeof(double) * lmkNum * lmkNum, hipMemcpyDeviceToDevice); double oneVal = 1.0; hipblasDgemv(fcnObj.blasHdl, HIPBLAS_OP_N, lmkNum, lmkNum, &oneVal, fcnObj.d_femPpdMat, lmkNum, fcnObj.d_tauNowVec, 1, &timeStp, fcnObj.d_femRpdVec, 1); hipsolverDnDpotrf(fcnObj.solvHdl, HIPBLAS_FILL_MODE_LOWER, lmkNum, fcnObj.d_femLftMat, lmkNum, fcnObj.d_workspace, fcnObj.h_Lwork, fcnObj.d_status); hipMemcpy(fcnObj.d_tauNxtVec, fcnObj.d_femRpdVec, sizeof(double) * lmkNum, hipMemcpyDeviceToDevice); hipsolverDnDpotrs(fcnObj.solvHdl, HIPBLAS_FILL_MODE_LOWER, lmkNum, DIMNUM, fcnObj.d_femLftMat, lmkNum, fcnObj.d_tauNxtVec, lmkNum, fcnObj.d_status); hipMemcpy(fcnObj.d_lmkNowEdgMat, fcnObj.d_lmkNxtEdgMat, sizeof(double) * elmNum * DIMNUM * (VTXNUM - 1), hipMemcpyDeviceToDevice); hipMemcpy(fcnObj.d_tanNowMat, fcnObj.d_tanNxtMat, sizeof(double) * elmNum * DIMNUM, hipMemcpyDeviceToDevice); hipMemcpy(fcnObj.d_tsvNowMat, fcnObj.d_tsvNxtMat, sizeof(double) * elmNum * DIMNUM, hipMemcpyDeviceToDevice); hipMemcpy(fcnObj.d_ppdNowMat, fcnObj.d_ppdNxtMat, sizeof(double) * lmkNum * lmkNum, hipMemcpyDeviceToDevice); hipMemcpy(fcnObj.d_ggdNowMat, fcnObj.d_ggdNxtMat, sizeof(double) * lmkNum * lmkNum, hipMemcpyDeviceToDevice); } return; }
624b87664b7a15e846f056e3816f61bc845334d7.cu
#include <cfloat> #include <cublas_v2.h> #include <cusolverDn.h> #include "struct.h" #include "constants.h" void tauInitial(double *, double *, fcndata &); void vertexToEdge(double *, double *, int *, int, int); void vertexToBottom(double *, double *, int *, int, int); void computeFrame(double *, double *, double *, fcndata &); void assembleFemMatrix(double *, double *, double *, double *, double *, fcndata &); void yankTauActivity(double *, double *, fcndata &); void reactionTauActivity(double *, double *, fcndata &); void computeKernel(double *, double *, fcndata &); void addEpsIdentity(double *, double, int); void cholesky(double *, fcndata &); void computeExternalYank(double *, double *, double *, double *, fcndata &); int ipcg(double *, double *, fcndata &); void vectoraxpby(double *, double, double *, double, double *, int); void assembleFemVector(double *, double *, double *, fcndata &); void deform(double *h_objPtr, double *h_posVec, fcndata &fcnObj) { int lmkNum = fcnObj.prm.lmkNum; int elmNum = fcnObj.prm.elmNum; int btmElmNum = fcnObj.prm.btmElmNum; int timeNum = fcnObj.prm.timeNum; double timeStp = fcnObj.prm.timeStp; cudaMemcpy(fcnObj.d_lmkStk, fcnObj.prm.d_lmkIniMat, sizeof(double) * lmkNum * DIMNUM, cudaMemcpyDeviceToDevice); cudaMemset(fcnObj.d_vlcStk, 0, sizeof(double) * lmkNum * DIMNUM); cudaMemset(fcnObj.d_pnlMat, 0, sizeof(double) * lmkNum * DIMNUM); tauInitial(fcnObj.d_tauMat, h_posVec, fcnObj); vertexToEdge(fcnObj.d_lmkNowEdgMat, fcnObj.prm.d_lmkIniMat, fcnObj.elm.d_elmVtxMat, lmkNum, elmNum); computeFrame(fcnObj.d_tanNowMat, fcnObj.d_tsvNowMat, fcnObj.prm.d_lmkIniMat, fcnObj); assembleFemMatrix(fcnObj.d_ppdNowMat, fcnObj.d_ggdNowMat, fcnObj.d_lmkNowEdgMat, fcnObj.d_tanNowMat, fcnObj.d_tsvNowMat, fcnObj); for ( int timeIdx = 0; timeIdx < timeNum - 1; ++timeIdx ) { fcnObj.d_lmkNowMat = fcnObj.d_lmkStk + timeIdx * lmkNum * DIMNUM; fcnObj.d_lmkNxtMat = fcnObj.d_lmkStk + (timeIdx + 1) * lmkNum * DIMNUM; fcnObj.d_tauNowVec = fcnObj.d_tauMat + timeIdx * lmkNum; fcnObj.d_tauNxtVec = fcnObj.d_tauMat + (timeIdx + 1) * lmkNum; fcnObj.d_vlcMat = fcnObj.d_vlcStk + timeIdx * lmkNum * DIMNUM; vertexToBottom(fcnObj.d_lmkNowBtmMat, fcnObj.d_lmkNowMat, fcnObj.elm.d_btmVtxMat, lmkNum, btmElmNum); yankTauActivity(fcnObj.d_ynkActFcnNowVec, fcnObj.d_tauNowVec, fcnObj); reactionTauActivity(fcnObj.d_reaActFcnNowVec, fcnObj.d_tauNowVec, fcnObj); computeKernel(fcnObj.d_knlMat, fcnObj.d_lmkNowMat, fcnObj); addEpsIdentity(fcnObj.d_knlMat, fcnObj.prm.knlEps, lmkNum); cudaMemcpy(fcnObj.d_knLMat, fcnObj.d_knlMat, sizeof(double) * lmkNum * lmkNum, cudaMemcpyDeviceToDevice); cholesky(fcnObj.d_knLMat, fcnObj); computeExternalYank(fcnObj.d_exYMat, fcnObj.d_lmkNowMat, fcnObj.d_lmkNowEdgMat, fcnObj.d_ynkActFcnNowVec, fcnObj); int cgStatus = ipcg(fcnObj.d_vlcMat, fcnObj.d_exYMat, fcnObj); if ( cgStatus != 0 ) { *h_objPtr = DBL_MAX; return; } if ( timeIdx < timeNum - 2 ) { cudaMemcpy(fcnObj.d_vlcMat + lmkNum * DIMNUM, fcnObj.d_vlcMat, sizeof(double) * lmkNum * DIMNUM, cudaMemcpyDeviceToDevice); } vectoraxpby(fcnObj.d_lmkNxtMat, 1.0, fcnObj.d_lmkNowMat, timeStp, fcnObj.d_vlcMat, lmkNum * DIMNUM); vertexToEdge(fcnObj.d_lmkNxtEdgMat, fcnObj.d_lmkNxtMat, fcnObj.elm.d_elmVtxMat, lmkNum, elmNum); computeFrame(fcnObj.d_tanNxtMat, fcnObj.d_tsvNxtMat, fcnObj.d_lmkNxtMat, fcnObj); assembleFemMatrix(fcnObj.d_ppdNxtMat, fcnObj.d_ggdNxtMat, fcnObj.d_lmkNxtEdgMat, fcnObj.d_tanNxtMat, fcnObj.d_tsvNxtMat, fcnObj); assembleFemVector(fcnObj.d_femRpdVec, fcnObj.d_lmkNowEdgMat, fcnObj.d_reaActFcnNowVec, fcnObj); vectoraxpby(fcnObj.d_femLftMat, 1.0, fcnObj.d_ppdNxtMat, timeStp, fcnObj.d_ggdNxtMat, lmkNum * lmkNum); cudaMemcpy(fcnObj.d_femPpdMat, fcnObj.d_ppdNowMat, sizeof(double) * lmkNum * lmkNum, cudaMemcpyDeviceToDevice); double oneVal = 1.0; cublasDgemv(fcnObj.blasHdl, CUBLAS_OP_N, lmkNum, lmkNum, &oneVal, fcnObj.d_femPpdMat, lmkNum, fcnObj.d_tauNowVec, 1, &timeStp, fcnObj.d_femRpdVec, 1); cusolverDnDpotrf(fcnObj.solvHdl, CUBLAS_FILL_MODE_LOWER, lmkNum, fcnObj.d_femLftMat, lmkNum, fcnObj.d_workspace, fcnObj.h_Lwork, fcnObj.d_status); cudaMemcpy(fcnObj.d_tauNxtVec, fcnObj.d_femRpdVec, sizeof(double) * lmkNum, cudaMemcpyDeviceToDevice); cusolverDnDpotrs(fcnObj.solvHdl, CUBLAS_FILL_MODE_LOWER, lmkNum, DIMNUM, fcnObj.d_femLftMat, lmkNum, fcnObj.d_tauNxtVec, lmkNum, fcnObj.d_status); cudaMemcpy(fcnObj.d_lmkNowEdgMat, fcnObj.d_lmkNxtEdgMat, sizeof(double) * elmNum * DIMNUM * (VTXNUM - 1), cudaMemcpyDeviceToDevice); cudaMemcpy(fcnObj.d_tanNowMat, fcnObj.d_tanNxtMat, sizeof(double) * elmNum * DIMNUM, cudaMemcpyDeviceToDevice); cudaMemcpy(fcnObj.d_tsvNowMat, fcnObj.d_tsvNxtMat, sizeof(double) * elmNum * DIMNUM, cudaMemcpyDeviceToDevice); cudaMemcpy(fcnObj.d_ppdNowMat, fcnObj.d_ppdNxtMat, sizeof(double) * lmkNum * lmkNum, cudaMemcpyDeviceToDevice); cudaMemcpy(fcnObj.d_ggdNowMat, fcnObj.d_ggdNxtMat, sizeof(double) * lmkNum * lmkNum, cudaMemcpyDeviceToDevice); } return; }
e95f9a210eb4141aa70392d462cd41ef5110c025.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "srad.h" #include <stdio.h> __global__ void srad_cuda_1( float *E_C, float *W_C, float *N_C, float *S_C, float * J_cuda, float * C_cuda, int cols, int rows, float q0sqr ) { //block id int bx = blockIdx.x; int by = blockIdx.y; //thread id int tx = threadIdx.x; int ty = threadIdx.y; //indices int index = cols * BLOCK_SIZE * by + BLOCK_SIZE * bx + cols * ty + tx; int index_n = cols * BLOCK_SIZE * by + BLOCK_SIZE * bx + tx - cols; int index_s = cols * BLOCK_SIZE * by + BLOCK_SIZE * bx + cols * BLOCK_SIZE + tx; int index_w = cols * BLOCK_SIZE * by + BLOCK_SIZE * bx + cols * ty - 1; int index_e = cols * BLOCK_SIZE * by + BLOCK_SIZE * bx + cols * ty + BLOCK_SIZE; float n, w, e, s, jc, g2, l, num, den, qsqr, c; //shared memory allocation __shared__ float temp[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float temp_result[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float north[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float south[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float east[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float west[BLOCK_SIZE][BLOCK_SIZE]; //load data to shared memory north[ty][tx] = J_cuda[index_n]; south[ty][tx] = J_cuda[index_s]; if ( by == 0 ){ north[ty][tx] = J_cuda[BLOCK_SIZE * bx + tx]; } else if ( by == gridDim.y - 1 ){ south[ty][tx] = J_cuda[cols * BLOCK_SIZE * (gridDim.y - 1) + BLOCK_SIZE * bx + cols * ( BLOCK_SIZE - 1 ) + tx]; } __syncthreads(); if ( bx == 0 ){ west[ty][tx] = J_cuda[cols * BLOCK_SIZE * by + cols * ty]; east[ty][tx] = J_cuda[index_e]; } else if ( bx == gridDim.x - 1 ){ west[ty][tx] = J_cuda[index_w]; east[ty][tx] = J_cuda[cols * BLOCK_SIZE * by + BLOCK_SIZE * ( gridDim.x - 1) + cols * ty + BLOCK_SIZE-1]; } else { west[ty][tx] = J_cuda[index_w]; east[ty][tx] = J_cuda[index_e]; } temp[ty][tx] = J_cuda[index]; __syncthreads(); jc = temp[ty][tx]; if ( ty == 0 && tx == 0 ){ //nw n = north[ty][tx] - jc; s = temp[ty+1][tx] - jc; w = west[ty][tx] - jc; e = temp[ty][tx+1] - jc; } else if ( ty == 0 && tx == BLOCK_SIZE-1 ){ //ne n = north[ty][tx] - jc; s = temp[ty+1][tx] - jc; w = temp[ty][tx-1] - jc; e = east[ty][tx] - jc; } else if ( ty == BLOCK_SIZE -1 && tx == BLOCK_SIZE - 1){ //se n = temp[ty-1][tx] - jc; s = south[ty][tx] - jc; w = temp[ty][tx-1] - jc; e = east[ty][tx] - jc; } else if ( ty == BLOCK_SIZE -1 && tx == 0 ){//sw n = temp[ty-1][tx] - jc; s = south[ty][tx] - jc; w = west[ty][tx] - jc; e = temp[ty][tx+1] - jc; } else if ( ty == 0 ){ //n n = north[ty][tx] - jc; s = temp[ty+1][tx] - jc; w = temp[ty][tx-1] - jc; e = temp[ty][tx+1] - jc; } else if ( tx == BLOCK_SIZE -1 ){ //e n = temp[ty-1][tx] - jc; s = temp[ty+1][tx] - jc; w = temp[ty][tx-1] - jc; e = east[ty][tx] - jc; } else if ( ty == BLOCK_SIZE -1){ //s n = temp[ty-1][tx] - jc; s = south[ty][tx] - jc; w = temp[ty][tx-1] - jc; e = temp[ty][tx+1] - jc; } else if ( tx == 0 ){ //w n = temp[ty-1][tx] - jc; s = temp[ty+1][tx] - jc; w = west[ty][tx] - jc; e = temp[ty][tx+1] - jc; } else{ //the data elements which are not on the borders n = temp[ty-1][tx] - jc; s = temp[ty+1][tx] - jc; w = temp[ty][tx-1] - jc; e = temp[ty][tx+1] - jc; } g2 = ( n * n + s * s + w * w + e * e ) / (jc * jc); l = ( n + s + w + e ) / jc; num = (0.5*g2) - ((1.0/16.0)*(l*l)) ; den = 1 + (.25*l); qsqr = num/(den*den); // diffusion coefficent (equ 33) den = (qsqr-q0sqr) / (q0sqr * (1+q0sqr)) ; c = min(max(1.0f / (1.0f+den),0.0f),1.0f) ; // saturate diffusion coefficent temp_result[ty][tx] = c; __syncthreads(); C_cuda[index] = temp_result[ty][tx]; E_C[index] = e; W_C[index] = w; S_C[index] = s; N_C[index] = n; } __global__ void srad_cuda_2( float *E_C, float *W_C, float *N_C, float *S_C, float * J_cuda, float * C_cuda, int cols, int rows, float lambda, float q0sqr ) { //block id int bx = blockIdx.x; int by = blockIdx.y; //thread id int tx = threadIdx.x; int ty = threadIdx.y; //indices int index = cols * BLOCK_SIZE * by + BLOCK_SIZE * bx + cols * ty + tx; int index_s = cols * BLOCK_SIZE * by + BLOCK_SIZE * bx + cols * BLOCK_SIZE + tx; int index_e = cols * BLOCK_SIZE * by + BLOCK_SIZE * bx + cols * ty + BLOCK_SIZE; float cc, cn, cs, ce, cw, d_sum; //shared memory allocation __shared__ float south_c[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float east_c[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float c_cuda_temp[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float c_cuda_result[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float temp[BLOCK_SIZE][BLOCK_SIZE]; //load data to shared memory temp[ty][tx] = J_cuda[index]; __syncthreads(); south_c[ty][tx] = C_cuda[index_s]; if ( by == gridDim.y - 1 ){ south_c[ty][tx] = C_cuda[cols * BLOCK_SIZE * (gridDim.y - 1) + BLOCK_SIZE * bx + cols * ( BLOCK_SIZE - 1 ) + tx]; } __syncthreads(); east_c[ty][tx] = C_cuda[index_e]; if ( bx == gridDim.x - 1 ){ east_c[ty][tx] = C_cuda[cols * BLOCK_SIZE * by + BLOCK_SIZE * ( gridDim.x - 1) + cols * ty + BLOCK_SIZE-1]; } __syncthreads(); c_cuda_temp[ty][tx] = C_cuda[index]; __syncthreads(); cc = c_cuda_temp[ty][tx]; if ( ty == BLOCK_SIZE -1 && tx == BLOCK_SIZE - 1){ //se cn = cc; cs = south_c[ty][tx]; cw = cc; ce = east_c[ty][tx]; } else if ( tx == BLOCK_SIZE -1 ){ //e cn = cc; cs = c_cuda_temp[ty+1][tx]; cw = cc; ce = east_c[ty][tx]; } else if ( ty == BLOCK_SIZE -1){ //s cn = cc; cs = south_c[ty][tx]; cw = cc; ce = c_cuda_temp[ty][tx+1]; } else{ //the data elements which are not on the borders cn = cc; cs = c_cuda_temp[ty+1][tx]; cw = cc; ce = c_cuda_temp[ty][tx+1]; } // divergence (equ 58) d_sum = cn * N_C[index] + cs * S_C[index] + cw * W_C[index] + ce * E_C[index]; // image update (equ 61) c_cuda_result[ty][tx] = temp[ty][tx] + 0.25 * lambda * d_sum; __syncthreads(); J_cuda[index] = c_cuda_result[ty][tx]; }
e95f9a210eb4141aa70392d462cd41ef5110c025.cu
#include "srad.h" #include <stdio.h> __global__ void srad_cuda_1( float *E_C, float *W_C, float *N_C, float *S_C, float * J_cuda, float * C_cuda, int cols, int rows, float q0sqr ) { //block id int bx = blockIdx.x; int by = blockIdx.y; //thread id int tx = threadIdx.x; int ty = threadIdx.y; //indices int index = cols * BLOCK_SIZE * by + BLOCK_SIZE * bx + cols * ty + tx; int index_n = cols * BLOCK_SIZE * by + BLOCK_SIZE * bx + tx - cols; int index_s = cols * BLOCK_SIZE * by + BLOCK_SIZE * bx + cols * BLOCK_SIZE + tx; int index_w = cols * BLOCK_SIZE * by + BLOCK_SIZE * bx + cols * ty - 1; int index_e = cols * BLOCK_SIZE * by + BLOCK_SIZE * bx + cols * ty + BLOCK_SIZE; float n, w, e, s, jc, g2, l, num, den, qsqr, c; //shared memory allocation __shared__ float temp[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float temp_result[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float north[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float south[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float east[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float west[BLOCK_SIZE][BLOCK_SIZE]; //load data to shared memory north[ty][tx] = J_cuda[index_n]; south[ty][tx] = J_cuda[index_s]; if ( by == 0 ){ north[ty][tx] = J_cuda[BLOCK_SIZE * bx + tx]; } else if ( by == gridDim.y - 1 ){ south[ty][tx] = J_cuda[cols * BLOCK_SIZE * (gridDim.y - 1) + BLOCK_SIZE * bx + cols * ( BLOCK_SIZE - 1 ) + tx]; } __syncthreads(); if ( bx == 0 ){ west[ty][tx] = J_cuda[cols * BLOCK_SIZE * by + cols * ty]; east[ty][tx] = J_cuda[index_e]; } else if ( bx == gridDim.x - 1 ){ west[ty][tx] = J_cuda[index_w]; east[ty][tx] = J_cuda[cols * BLOCK_SIZE * by + BLOCK_SIZE * ( gridDim.x - 1) + cols * ty + BLOCK_SIZE-1]; } else { west[ty][tx] = J_cuda[index_w]; east[ty][tx] = J_cuda[index_e]; } temp[ty][tx] = J_cuda[index]; __syncthreads(); jc = temp[ty][tx]; if ( ty == 0 && tx == 0 ){ //nw n = north[ty][tx] - jc; s = temp[ty+1][tx] - jc; w = west[ty][tx] - jc; e = temp[ty][tx+1] - jc; } else if ( ty == 0 && tx == BLOCK_SIZE-1 ){ //ne n = north[ty][tx] - jc; s = temp[ty+1][tx] - jc; w = temp[ty][tx-1] - jc; e = east[ty][tx] - jc; } else if ( ty == BLOCK_SIZE -1 && tx == BLOCK_SIZE - 1){ //se n = temp[ty-1][tx] - jc; s = south[ty][tx] - jc; w = temp[ty][tx-1] - jc; e = east[ty][tx] - jc; } else if ( ty == BLOCK_SIZE -1 && tx == 0 ){//sw n = temp[ty-1][tx] - jc; s = south[ty][tx] - jc; w = west[ty][tx] - jc; e = temp[ty][tx+1] - jc; } else if ( ty == 0 ){ //n n = north[ty][tx] - jc; s = temp[ty+1][tx] - jc; w = temp[ty][tx-1] - jc; e = temp[ty][tx+1] - jc; } else if ( tx == BLOCK_SIZE -1 ){ //e n = temp[ty-1][tx] - jc; s = temp[ty+1][tx] - jc; w = temp[ty][tx-1] - jc; e = east[ty][tx] - jc; } else if ( ty == BLOCK_SIZE -1){ //s n = temp[ty-1][tx] - jc; s = south[ty][tx] - jc; w = temp[ty][tx-1] - jc; e = temp[ty][tx+1] - jc; } else if ( tx == 0 ){ //w n = temp[ty-1][tx] - jc; s = temp[ty+1][tx] - jc; w = west[ty][tx] - jc; e = temp[ty][tx+1] - jc; } else{ //the data elements which are not on the borders n = temp[ty-1][tx] - jc; s = temp[ty+1][tx] - jc; w = temp[ty][tx-1] - jc; e = temp[ty][tx+1] - jc; } g2 = ( n * n + s * s + w * w + e * e ) / (jc * jc); l = ( n + s + w + e ) / jc; num = (0.5*g2) - ((1.0/16.0)*(l*l)) ; den = 1 + (.25*l); qsqr = num/(den*den); // diffusion coefficent (equ 33) den = (qsqr-q0sqr) / (q0sqr * (1+q0sqr)) ; c = min(max(1.0f / (1.0f+den),0.0f),1.0f) ; // saturate diffusion coefficent temp_result[ty][tx] = c; __syncthreads(); C_cuda[index] = temp_result[ty][tx]; E_C[index] = e; W_C[index] = w; S_C[index] = s; N_C[index] = n; } __global__ void srad_cuda_2( float *E_C, float *W_C, float *N_C, float *S_C, float * J_cuda, float * C_cuda, int cols, int rows, float lambda, float q0sqr ) { //block id int bx = blockIdx.x; int by = blockIdx.y; //thread id int tx = threadIdx.x; int ty = threadIdx.y; //indices int index = cols * BLOCK_SIZE * by + BLOCK_SIZE * bx + cols * ty + tx; int index_s = cols * BLOCK_SIZE * by + BLOCK_SIZE * bx + cols * BLOCK_SIZE + tx; int index_e = cols * BLOCK_SIZE * by + BLOCK_SIZE * bx + cols * ty + BLOCK_SIZE; float cc, cn, cs, ce, cw, d_sum; //shared memory allocation __shared__ float south_c[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float east_c[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float c_cuda_temp[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float c_cuda_result[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float temp[BLOCK_SIZE][BLOCK_SIZE]; //load data to shared memory temp[ty][tx] = J_cuda[index]; __syncthreads(); south_c[ty][tx] = C_cuda[index_s]; if ( by == gridDim.y - 1 ){ south_c[ty][tx] = C_cuda[cols * BLOCK_SIZE * (gridDim.y - 1) + BLOCK_SIZE * bx + cols * ( BLOCK_SIZE - 1 ) + tx]; } __syncthreads(); east_c[ty][tx] = C_cuda[index_e]; if ( bx == gridDim.x - 1 ){ east_c[ty][tx] = C_cuda[cols * BLOCK_SIZE * by + BLOCK_SIZE * ( gridDim.x - 1) + cols * ty + BLOCK_SIZE-1]; } __syncthreads(); c_cuda_temp[ty][tx] = C_cuda[index]; __syncthreads(); cc = c_cuda_temp[ty][tx]; if ( ty == BLOCK_SIZE -1 && tx == BLOCK_SIZE - 1){ //se cn = cc; cs = south_c[ty][tx]; cw = cc; ce = east_c[ty][tx]; } else if ( tx == BLOCK_SIZE -1 ){ //e cn = cc; cs = c_cuda_temp[ty+1][tx]; cw = cc; ce = east_c[ty][tx]; } else if ( ty == BLOCK_SIZE -1){ //s cn = cc; cs = south_c[ty][tx]; cw = cc; ce = c_cuda_temp[ty][tx+1]; } else{ //the data elements which are not on the borders cn = cc; cs = c_cuda_temp[ty+1][tx]; cw = cc; ce = c_cuda_temp[ty][tx+1]; } // divergence (equ 58) d_sum = cn * N_C[index] + cs * S_C[index] + cw * W_C[index] + ce * E_C[index]; // image update (equ 61) c_cuda_result[ty][tx] = temp[ty][tx] + 0.25 * lambda * d_sum; __syncthreads(); J_cuda[index] = c_cuda_result[ty][tx]; }
241741d8984e24575403c2dd63f340ba9be7783f.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include "mpi_utils.h" #include "cuda_utils.h" #include "CudaDomdecRecipComm.h" CudaDomdecRecipComm::CudaDomdecRecipComm(MPI_Comm comm_recip, MPI_Comm comm_direct_recip, int mynode, std::vector<int>& direct_nodes, std::vector<int>& recip_nodes, const bool cudaAware) : DomdecRecipComm(comm_recip, comm_direct_recip, mynode, direct_nodes, recip_nodes), cudaMPI(cudaAware, comm_direct_recip) { h_commbuf_len = 0; h_commbuf = NULL; coord_copy_ptr = NULL; coord_ptr = NULL; } CudaDomdecRecipComm::~CudaDomdecRecipComm() { if (h_commbuf != NULL) deallocate_host<char>(&h_commbuf); } // // Send coordinates to Recip from coord[] // void CudaDomdecRecipComm::send_coord(float4* coord, hipStream_t stream) { const int TAG = 1; if (!isDirect) { std::cout << "CudaDomdecRecipComm::send_coord, only direct nodes are allowed here" << std::endl; exit(1); } coord_ptr = NULL; coord_copy_ptr = NULL; /* if (isDirect && isRecip && direct_nodes.size() == 1) { //----------------------------------------------------------- // Only a single Direct node => set pointer and we are done! //----------------------------------------------------------- coord_ptr = coord; return; } */ if (isRecip) { //------------------------------------------------ // Mixed Direct+Recip node => Copy coordinates //------------------------------------------------ coord_copy_ptr = coord; } else { //------------------------------------------------ // Pure Direct node => Send coordinates via MPI //------------------------------------------------ // Re-allocate h_commbuf if needed if (!cudaMPI.isCudaAware()) { reallocate_host<char>(&h_commbuf, &h_commbuf_len, ncomm.at(0)*sizeof(float4), 1.2f); } // Sync thread here before communication cudaCheck(hipStreamSynchronize(stream)); // Send MPICheck(cudaMPI.Send((void *)coord, ncomm.at(0)*sizeof(float4), recip_nodes.at(0), TAG, h_commbuf)); } } // // Recv coordinates from Direct to coord[] // void CudaDomdecRecipComm::recv_coord(float4* coord, hipStream_t stream) { const int TAG = 1; if (!isRecip) { std::cout << "CudaDomdecRecipComm::recv_coord, only recip nodes are allowed here" << std::endl; exit(1); } /* if (isDirect && isRecip && direct_nodes.size() == 1) { //----------------------------------------------------------- // Only a single Direct node => set pointer and we are done! // NOTE: coord_ptr was already set in the call to send_coord() //----------------------------------------------------------- if (coord_ptr == NULL) { std::cout << "CudaDomdecRecipComm::recv_coord, coord_ptr should have been set by send_coord" << std::endl; exit(1); } return; } */ coord_ptr = NULL; if (isRecip) { //------------------------------------------------ // Recip node => Receive coordinates from Direct //------------------------------------------------ // Re-allocate h_commbuf if needed if (!cudaMPI.isCudaAware()) { // Count the required h_commbuf size int ncoord_buf = 0; for (int i=0;i < direct_nodes.size();i++) { if (mynode != direct_nodes.at(i)) ncoord_buf += ncomm.at(i); } float fac = (recip_nodes.size() == 1) ? 1.0f : 1.2f; reallocate_host<char>(&h_commbuf, &h_commbuf_len, ncoord_buf*sizeof(float4), fac); } float4* h_coordbuf = (float4 *)h_commbuf; int h_coordbuf_pos = 0; for (int i=0;i < direct_nodes.size();i++) { if (mynode != direct_nodes.at(i)) { // Receive via MPI // NOTE: It's ok to NOT to synchronize here since were receiving coordinates, not sending MPICheck(cudaMPI.Recv(&coord[pcomm.at(i)], ncomm.at(i)*sizeof(float4), direct_nodes.at(i), TAG, MPI_STATUS_IGNORE, &h_coordbuf[h_coordbuf_pos])); h_coordbuf_pos += ncomm.at(i); } else { // Copy device buffer // NOTE: We don't have to synchronize stream "stream" since it is the one doing all // the work here assert(coord_copy_ptr != NULL); copy_DtoD<float4>(coord_copy_ptr, &coord[pcomm.at(i)], ncomm.at(i), stream); } } // Store pointer to where coordinates are found coord_ptr = coord; } } // // Send forces to Direct // void CudaDomdecRecipComm::send_force(float3* force, hipStream_t stream) { const int TAG = 1; if (!isRecip) { std::cout << "CudaDomdecRecipComm::send_force, only recip nodes are allowed here" << std::endl; exit(1); } // Re-allocate h_commbuf if needed if (!cudaMPI.isCudaAware()) { float fac = (recip_nodes.size() == 1) ? 1.0f : 1.2f; reallocate_host<char>(&h_commbuf, &h_commbuf_len, pcomm.at(direct_nodes.size())*sizeof(float3), fac); } //--------------------------------------------------- // Recip node => Send forces to Direct nodes //--------------------------------------------------- float3* h_coordbuf = (float3 *)h_commbuf; bool syncDone = false; for (int i=0;i < direct_nodes.size();i++) { if (mynode != direct_nodes.at(i)) { // Send via MPI // Sync thread here before communication if (!syncDone) cudaCheck(hipStreamSynchronize(stream)); syncDone = true; MPICheck(cudaMPI.Send(&force[pcomm.at(i)], ncomm.at(i)*sizeof(float3), direct_nodes.at(i), TAG, &h_coordbuf[pcomm.at(i)])); } } } // // Receive forces from Direct // void CudaDomdecRecipComm::recv_force(float3* force, hipStream_t stream) { const int TAG = 1; if (!isDirect) { std::cout << "CudaDomdecRecipComm::recv_force, only direct nodes are allowed here" << std::endl; exit(1); } force_ptr = NULL; //--------------------------------------------------- // Direct node => Receive forces from Recip node //--------------------------------------------------- if (mynode != recip_nodes.at(0)) { if (isRecip) { std::cout << "CudaDomdecRecipComm::recv_force, must be pure direct node to be here" << std::endl; exit(1); } // Re-allocate h_commbuf if needed if (!cudaMPI.isCudaAware()) { reallocate_host<char>(&h_commbuf, &h_commbuf_len, ncomm.at(0)*sizeof(float3), 1.2f); } // Reveice via MPI MPICheck(cudaMPI.Recv(force, ncomm.at(0)*sizeof(float3), recip_nodes.at(0), TAG, MPI_STATUS_IGNORE, h_commbuf)); force_ptr = force; } else { if (!isRecip) { std::cout << "CudaDomdecRecipComm::recv_force, must be direct+recip node to be here" << std::endl; exit(1); } // This is a Direct+Recip node. No need to receive forces via MPI since they are // already in the force buffer, just need to get the address: // Get a pointer where the forces are stored on a Direct+Recip node force_ptr = &force[pcomm.at(imynode)]; } // Sync thread to wait for all communication to finish cudaCheck(hipStreamSynchronize(stream)); }
241741d8984e24575403c2dd63f340ba9be7783f.cu
#include <iostream> #include "mpi_utils.h" #include "cuda_utils.h" #include "CudaDomdecRecipComm.h" CudaDomdecRecipComm::CudaDomdecRecipComm(MPI_Comm comm_recip, MPI_Comm comm_direct_recip, int mynode, std::vector<int>& direct_nodes, std::vector<int>& recip_nodes, const bool cudaAware) : DomdecRecipComm(comm_recip, comm_direct_recip, mynode, direct_nodes, recip_nodes), cudaMPI(cudaAware, comm_direct_recip) { h_commbuf_len = 0; h_commbuf = NULL; coord_copy_ptr = NULL; coord_ptr = NULL; } CudaDomdecRecipComm::~CudaDomdecRecipComm() { if (h_commbuf != NULL) deallocate_host<char>(&h_commbuf); } // // Send coordinates to Recip from coord[] // void CudaDomdecRecipComm::send_coord(float4* coord, cudaStream_t stream) { const int TAG = 1; if (!isDirect) { std::cout << "CudaDomdecRecipComm::send_coord, only direct nodes are allowed here" << std::endl; exit(1); } coord_ptr = NULL; coord_copy_ptr = NULL; /* if (isDirect && isRecip && direct_nodes.size() == 1) { //----------------------------------------------------------- // Only a single Direct node => set pointer and we are done! //----------------------------------------------------------- coord_ptr = coord; return; } */ if (isRecip) { //------------------------------------------------ // Mixed Direct+Recip node => Copy coordinates //------------------------------------------------ coord_copy_ptr = coord; } else { //------------------------------------------------ // Pure Direct node => Send coordinates via MPI //------------------------------------------------ // Re-allocate h_commbuf if needed if (!cudaMPI.isCudaAware()) { reallocate_host<char>(&h_commbuf, &h_commbuf_len, ncomm.at(0)*sizeof(float4), 1.2f); } // Sync thread here before communication cudaCheck(cudaStreamSynchronize(stream)); // Send MPICheck(cudaMPI.Send((void *)coord, ncomm.at(0)*sizeof(float4), recip_nodes.at(0), TAG, h_commbuf)); } } // // Recv coordinates from Direct to coord[] // void CudaDomdecRecipComm::recv_coord(float4* coord, cudaStream_t stream) { const int TAG = 1; if (!isRecip) { std::cout << "CudaDomdecRecipComm::recv_coord, only recip nodes are allowed here" << std::endl; exit(1); } /* if (isDirect && isRecip && direct_nodes.size() == 1) { //----------------------------------------------------------- // Only a single Direct node => set pointer and we are done! // NOTE: coord_ptr was already set in the call to send_coord() //----------------------------------------------------------- if (coord_ptr == NULL) { std::cout << "CudaDomdecRecipComm::recv_coord, coord_ptr should have been set by send_coord" << std::endl; exit(1); } return; } */ coord_ptr = NULL; if (isRecip) { //------------------------------------------------ // Recip node => Receive coordinates from Direct //------------------------------------------------ // Re-allocate h_commbuf if needed if (!cudaMPI.isCudaAware()) { // Count the required h_commbuf size int ncoord_buf = 0; for (int i=0;i < direct_nodes.size();i++) { if (mynode != direct_nodes.at(i)) ncoord_buf += ncomm.at(i); } float fac = (recip_nodes.size() == 1) ? 1.0f : 1.2f; reallocate_host<char>(&h_commbuf, &h_commbuf_len, ncoord_buf*sizeof(float4), fac); } float4* h_coordbuf = (float4 *)h_commbuf; int h_coordbuf_pos = 0; for (int i=0;i < direct_nodes.size();i++) { if (mynode != direct_nodes.at(i)) { // Receive via MPI // NOTE: It's ok to NOT to synchronize here since were receiving coordinates, not sending MPICheck(cudaMPI.Recv(&coord[pcomm.at(i)], ncomm.at(i)*sizeof(float4), direct_nodes.at(i), TAG, MPI_STATUS_IGNORE, &h_coordbuf[h_coordbuf_pos])); h_coordbuf_pos += ncomm.at(i); } else { // Copy device buffer // NOTE: We don't have to synchronize stream "stream" since it is the one doing all // the work here assert(coord_copy_ptr != NULL); copy_DtoD<float4>(coord_copy_ptr, &coord[pcomm.at(i)], ncomm.at(i), stream); } } // Store pointer to where coordinates are found coord_ptr = coord; } } // // Send forces to Direct // void CudaDomdecRecipComm::send_force(float3* force, cudaStream_t stream) { const int TAG = 1; if (!isRecip) { std::cout << "CudaDomdecRecipComm::send_force, only recip nodes are allowed here" << std::endl; exit(1); } // Re-allocate h_commbuf if needed if (!cudaMPI.isCudaAware()) { float fac = (recip_nodes.size() == 1) ? 1.0f : 1.2f; reallocate_host<char>(&h_commbuf, &h_commbuf_len, pcomm.at(direct_nodes.size())*sizeof(float3), fac); } //--------------------------------------------------- // Recip node => Send forces to Direct nodes //--------------------------------------------------- float3* h_coordbuf = (float3 *)h_commbuf; bool syncDone = false; for (int i=0;i < direct_nodes.size();i++) { if (mynode != direct_nodes.at(i)) { // Send via MPI // Sync thread here before communication if (!syncDone) cudaCheck(cudaStreamSynchronize(stream)); syncDone = true; MPICheck(cudaMPI.Send(&force[pcomm.at(i)], ncomm.at(i)*sizeof(float3), direct_nodes.at(i), TAG, &h_coordbuf[pcomm.at(i)])); } } } // // Receive forces from Direct // void CudaDomdecRecipComm::recv_force(float3* force, cudaStream_t stream) { const int TAG = 1; if (!isDirect) { std::cout << "CudaDomdecRecipComm::recv_force, only direct nodes are allowed here" << std::endl; exit(1); } force_ptr = NULL; //--------------------------------------------------- // Direct node => Receive forces from Recip node //--------------------------------------------------- if (mynode != recip_nodes.at(0)) { if (isRecip) { std::cout << "CudaDomdecRecipComm::recv_force, must be pure direct node to be here" << std::endl; exit(1); } // Re-allocate h_commbuf if needed if (!cudaMPI.isCudaAware()) { reallocate_host<char>(&h_commbuf, &h_commbuf_len, ncomm.at(0)*sizeof(float3), 1.2f); } // Reveice via MPI MPICheck(cudaMPI.Recv(force, ncomm.at(0)*sizeof(float3), recip_nodes.at(0), TAG, MPI_STATUS_IGNORE, h_commbuf)); force_ptr = force; } else { if (!isRecip) { std::cout << "CudaDomdecRecipComm::recv_force, must be direct+recip node to be here" << std::endl; exit(1); } // This is a Direct+Recip node. No need to receive forces via MPI since they are // already in the force buffer, just need to get the address: // Get a pointer where the forces are stored on a Direct+Recip node force_ptr = &force[pcomm.at(imynode)]; } // Sync thread to wait for all communication to finish cudaCheck(cudaStreamSynchronize(stream)); }
4215851f849dac8d7adb32c7d02b46d79f3a85cc.hip
// !!! This is a file automatically generated by hipify!!! #include "cuda_sparse_matrix.h" #include "cuda_utils.cuh" #define CudaAssert(call) CUDA_CHECK_SUCCESS(call) // #ifdef DEAL_II_WITH_CUDA #define CUSPARSE_CHECK_SUCCESS(call,msg) \ do { \ hipsparseStatus_t status = call; \ if (status != HIPSPARSE_STATUS_SUCCESS) { \ fprintf(stderr,"%s\n",msg); \ exit(1); \ } \ } while(0) // DEAL_II_NAMESPACE_OPEN namespace CUDAWrappers { // generic functions template <typename Number> void hyb2csr(const hipsparseHandle_t handle, const hipsparseMatDescr_t descr, const cusparseHybMat_t hyb, Number *mat_val, int *mat_ptr, int *mat_ind); template <typename Number> void csr2hyb(const hipsparseHandle_t handle, unsigned int n_rows, unsigned int n_cols, const hipsparseMatDescr_t descr, const Number *mat_val, const int *mat_ptr, const int *mat_ind, cusparseHybMat_t hyb); template <typename Number> void csrmv(const hipsparseHandle_t handle, int m, int n, int nnz, const Number *alpha, const hipsparseMatDescr_t descrA, const Number *csrValA, const int *csrRowPtrA, const int *csrColIndA, const Number *x, const Number *beta, Number *y); template <typename Number> void hybmv(const hipsparseHandle_t handle, const Number *alpha, const hipsparseMatDescr_t descrA, const cusparseHybMat_t hybA, const Number *x, const Number *beta, Number *y); // implementations template <> void hybmv(const hipsparseHandle_t handle, const double *alpha, const hipsparseMatDescr_t descrA, const cusparseHybMat_t hybA, const double *x, const double *beta, double *y) { CUSPARSE_CHECK_SUCCESS(cusparseDhybmv(handle,HIPSPARSE_OPERATION_NON_TRANSPOSE, alpha, descrA, hybA, x, beta, y), "CUSPARSE: hybmv failed"); } template <> void hybmv(const hipsparseHandle_t handle, const float *alpha, const hipsparseMatDescr_t descrA, const cusparseHybMat_t hybA, const float *x, const float *beta, float *y) { CUSPARSE_CHECK_SUCCESS(cusparseShybmv(handle,HIPSPARSE_OPERATION_NON_TRANSPOSE, alpha, descrA, hybA, x, beta, y), "CUSPARSE: hybmv failed"); } template <> void csrmv(const hipsparseHandle_t handle, int m, int n, int nnz, const double *alpha, const hipsparseMatDescr_t descrA, const double *csrValA, const int *csrRowPtrA, const int *csrColIndA, const double *x, const double *beta, double *y) { CUSPARSE_CHECK_SUCCESS(hipsparseDcsrmv(handle,HIPSPARSE_OPERATION_NON_TRANSPOSE, m, n, nnz, alpha, descrA, csrValA, csrRowPtrA, csrColIndA, x, beta, y), "CUSPARSE: csrmv failed"); } template <> void csrmv(const hipsparseHandle_t handle, int m, int n, int nnz, const float *alpha, const hipsparseMatDescr_t descrA, const float *csrValA, const int *csrRowPtrA, const int *csrColIndA, const float *x, const float *beta, float *y) { CUSPARSE_CHECK_SUCCESS(hipsparseScsrmv(handle,HIPSPARSE_OPERATION_NON_TRANSPOSE, m, n, nnz, alpha, descrA, csrValA, csrRowPtrA, csrColIndA, x, beta, y), "CUSPARSE: csrmv failed"); } template <> void hyb2csr(const hipsparseHandle_t handle, const hipsparseMatDescr_t descr, const cusparseHybMat_t hyb, double *mat_val, int *mat_ptr, int *mat_ind) { CUSPARSE_CHECK_SUCCESS(cusparseDhyb2csr(handle,descr,hyb,mat_val,mat_ptr, mat_ind), "CUSPARSE: Failed converting matrix format from hyb to csr"); } template <> void hyb2csr(const hipsparseHandle_t handle, const hipsparseMatDescr_t descr, const cusparseHybMat_t hyb, float *mat_val, int *mat_ptr, int *mat_ind) { CUSPARSE_CHECK_SUCCESS(cusparseShyb2csr(handle,descr,hyb,mat_val,mat_ptr, mat_ind), "CUSPARSE: Failed converting matrix format from hyb to csr"); } template <> void csr2hyb(const hipsparseHandle_t handle, unsigned int n_rows, unsigned int n_cols, const hipsparseMatDescr_t descr, const double *mat_val, const int *mat_ptr, const int *mat_ind, cusparseHybMat_t hyb) { CUSPARSE_CHECK_SUCCESS(cusparseDcsr2hyb(handle,n_rows,n_cols,descr,mat_val,mat_ptr, mat_ind,hyb,0,CUSPARSE_HYB_PARTITION_AUTO), "CUSPARSE: Failed converting matrix to hyb format"); } template <> void csr2hyb(const hipsparseHandle_t handle, unsigned int n_rows, unsigned int n_cols, const hipsparseMatDescr_t descr, const float *mat_val, const int *mat_ptr, const int *mat_ind, cusparseHybMat_t hyb) { CUSPARSE_CHECK_SUCCESS(cusparseScsr2hyb(handle,n_rows,n_cols,descr,mat_val,mat_ptr, mat_ind,hyb,0,CUSPARSE_HYB_PARTITION_AUTO), "CUSPARSE: Failed converting matrix to hyb format"); } template<typename Number> SparseMatrix<Number>::SparseMatrix() : initialized(false),n_cols(0), n_rows(0), nnz(0) { mat_val = NULL; mat_ind = NULL; mat_ptr = NULL; } template<typename Number> SparseMatrix<Number>::SparseMatrix(const ::dealii::SparseMatrix<Number> &src_mat) : initialized(false), n_cols(0), n_rows(0), nnz(0) { mat_val = NULL; mat_ind = NULL; mat_ptr = NULL; reinit(src_mat); } template<typename Number> SparseMatrix<Number>::~SparseMatrix() { if(initialized) { #ifdef USE_HYB_MATRIX CUSPARSE_CHECK_SUCCESS(cusparseDestroyHybMat(hyb),"CUSPARSE: Hyb structure destruction failed"); #else CudaAssert(hipFree(mat_val)); CudaAssert(hipFree(mat_ind)); CudaAssert(hipFree(mat_ptr)); #endif CUSPARSE_CHECK_SUCCESS(hipsparseDestroyMatDescr(descr),"CUSPARSE: Matrix descriptor destruction failed"); CUSPARSE_CHECK_SUCCESS(hipsparseDestroy(handle),"CUSPARSE: Library release of resources failed"); } } template<typename Number> void SparseMatrix<Number>::reinit(const ::dealii::SparseMatrix<Number> &src_mat) { if(!initialized) { CUSPARSE_CHECK_SUCCESS(hipsparseCreate(&handle), "CUSPARSE: Failed initializing library"); CUSPARSE_CHECK_SUCCESS(hipsparseCreateMatDescr(&descr), "CUSPARSE: Failed initializing matrix descriptor"); CUSPARSE_CHECK_SUCCESS(hipsparseSetMatType(descr,HIPSPARSE_MATRIX_TYPE_GENERAL), "CUSPARSE: Failed setting matrix type"); CUSPARSE_CHECK_SUCCESS(hipsparseSetMatIndexBase(descr,HIPSPARSE_INDEX_BASE_ZERO), "CUSPARSE: Failed setting matrix index base"); #ifdef USE_HYB_MATRIX CUSPARSE_CHECK_SUCCESS(cusparseCreateHybMat(&hyb),"CUSPARSE: Failed initializing hyb structure"); #endif } else { #ifndef USE_HYB_MATRIX CudaAssert(hipFree(mat_val)); CudaAssert(hipFree(mat_ind)); CudaAssert(hipFree(mat_ptr)); #endif } initialized = true; nnz = src_mat.n_nonzero_elements(); n_rows = src_mat.m(); n_cols = src_mat.n(); // initialize CSR matrix from CPU structure std::vector<int> Ap(n_rows+1,0); std::vector<int> Ai(nnz); std::vector<Number> Av(nnz); for (int row=0; row<n_rows; ++row) { int cursor = Ap[row]; for (typename ::dealii::SparseMatrix<Number>::const_iterator p=src_mat.begin(row); p != src_mat.end(row); ++p) { Ai[cursor] = p->column(); Av[cursor] = p->value(); ++cursor; } Ap[row+1] = cursor; // This row is now initialized, but the diagonal element is first in the // Deal.II world, so we need to resort for CUSPARSE. For simplicity we // just make a series of swaps (this is kind of a single run of // bubble-sort, which gives us the desired result since the array is // already "almost" sorted) for(int i = Ap[row]; (i < Ap[row+1]-1) && (Ai[i] > Ai[i+1]); ++i) { std::swap (Ai[i], Ai[i+1]); std::swap (Av[i], Av[i+1]); } } // allocate device memory CudaAssert(hipMalloc(&mat_ptr,(n_rows+1)*sizeof(int))); CudaAssert(hipMalloc(&mat_ind,nnz*sizeof(int))); CudaAssert(hipMalloc(&mat_val,nnz*sizeof(Number))); // copy from host to device CudaAssert(hipMemcpy(mat_ptr,Ap.data(),Ap.size()*sizeof(int), hipMemcpyHostToDevice)); CudaAssert(hipMemcpy(mat_ind,Ai.data(),Ai.size()*sizeof(int), hipMemcpyHostToDevice)); CudaAssert(hipMemcpy(mat_val,Av.data(),Av.size()*sizeof(Number), hipMemcpyHostToDevice)); #ifdef USE_HYB_MATRIX // convert to hyb format csr2hyb(handle,n_rows,n_cols,descr,mat_val,mat_ptr, mat_ind,hyb,0,CUSPARSE_HYB_PARTITION_AUTO); CudaAssert(hipFree(mat_val)); CudaAssert(hipFree(mat_ind)); CudaAssert(hipFree(mat_ptr)); #endif } template<typename Number> void SparseMatrix<Number>::init() { if(!initialized) { CUSPARSE_CHECK_SUCCESS(hipsparseCreate(&handle), "CUSPARSE: Failed initializing library"); CUSPARSE_CHECK_SUCCESS(hipsparseCreateMatDescr(&descr), "CUSPARSE: Failed initializing matrix descriptor"); CUSPARSE_CHECK_SUCCESS(hipsparseSetMatType(descr,HIPSPARSE_MATRIX_TYPE_GENERAL), "CUSPARSE: Failed setting matrix type"); CUSPARSE_CHECK_SUCCESS(hipsparseSetMatIndexBase(descr,HIPSPARSE_INDEX_BASE_ZERO), "CUSPARSE: Failed setting matrix index base"); #ifdef USE_HYB_MATRIX CUSPARSE_CHECK_SUCCESS(cusparseCreateHybMat(&hyb),"CUSPARSE: Failed initializing hyb structure"); #endif } else { #ifndef USE_HYB_MATRIX CudaAssert(hipFree(mat_val)); CudaAssert(hipFree(mat_ind)); CudaAssert(hipFree(mat_ptr)); #endif } initialized = true; /* Initialize with this matrix: [ 1 0 1 0 1 1 0 1 0 1 0 0 0 0 1 1] */ n_rows = 4; n_cols = 4; std::vector<std::vector<Number>> vals(n_rows); std::vector<std::vector<int>> inds(n_rows); vals[0].push_back(1.0); // A[0,0] = 1.0 inds[0].push_back(0); vals[0].push_back(1.0); // A[0,2] = 1.0 inds[0].push_back(2); vals[1].push_back(1.0); // A[1,0] = 1.0 inds[1].push_back(0); vals[1].push_back(1.0); // A[1,1] = 1.0 inds[1].push_back(1); vals[1].push_back(1.0); // A[1,3] = 1.0 inds[1].push_back(3); vals[2].push_back(1.0); // A[2,1] = 1.0 inds[2].push_back(1); vals[3].push_back(1.0); // A[3,2] = 1.0 inds[3].push_back(2); vals[3].push_back(1.0); // A[3,3] = 1.0 inds[3].push_back(3); // initialize CSR matrix from CPU structure std::vector<int> Ap(n_rows+1,0); std::vector<int> Ai; std::vector<Number> Av; for (int row=0; row<n_rows; ++row) { Av.insert( Av.end(), vals[row].begin(), vals[row].end() ); Ai.insert( Ai.end(), inds[row].begin(), inds[row].end() ); Ap[row+1] = Ap[row]+vals[row].size(); } nnz = Ap[n_rows]; CudaAssert(hipMalloc(&mat_ptr,(n_rows+1)*sizeof(int))); CudaAssert(hipMalloc(&mat_ind,nnz*sizeof(int))); CudaAssert(hipMalloc(&mat_val,nnz*sizeof(Number))); // copy from host to device CudaAssert(hipMemcpy(mat_ptr,Ap.data(),Ap.size()*sizeof(int), hipMemcpyHostToDevice)); CudaAssert(hipMemcpy(mat_ind,Ai.data(),Ai.size()*sizeof(int), hipMemcpyHostToDevice)); CudaAssert(hipMemcpy(mat_val,Av.data(),Av.size()*sizeof(Number), hipMemcpyHostToDevice)); #ifdef USE_HYB_MATRIX // convert to hyb format csr2hyb(handle,n_rows,n_cols,descr,mat_val,mat_ptr, mat_ind,hyb,0,CUSPARSE_HYB_PARTITION_AUTO); CudaAssert(hipFree(mat_val)); CudaAssert(hipFree(mat_ind)); CudaAssert(hipFree(mat_ptr)); #endif } template<typename Number> unsigned int SparseMatrix<Number>::m() const { return n_rows; } template<typename Number> unsigned int SparseMatrix<Number>::n() const { return n_cols; } template<typename Number> void SparseMatrix<Number>::vmult(GpuVector<Number> &dst, const GpuVector<Number> &src) const { const Number zero = 0.0; const Number one = 1.0; Number *dst_buf = dst.getData(); const Number *vec_buf = src.getDataRO(); #ifdef USE_HYB_MATRIX hybmv(handle,&one, descr, hyb, vec_buf, &zero, dst_buf); #else csrmv(handle, n_rows, n_cols, nnz, &one, descr, mat_val, mat_ptr, mat_ind, vec_buf, &zero, dst_buf); #endif } template<typename Number> void SparseMatrix<Number>::print() { #ifdef USE_HYB_MATRIX // allocate device memory CudaAssert(hipMalloc(&mat_ptr,(n_rows+1)*sizeof(int))); CudaAssert(hipMalloc(&mat_ind,nnz*sizeof(int))); CudaAssert(hipMalloc(&mat_val,nnz*sizeof(Number))); // convert to hyb format hyb2csr(handle,descr,hyb,mat_val,mat_ptr, mat_ind); #endif // initialize CSR matrix from CPU structure std::vector<int> Ap(n_rows+1); std::vector<int> Ai(nnz); std::vector<Number> Av(nnz); // copy from host to device CudaAssert(hipMemcpy(Ap.data(),mat_ptr,Ap.size()*sizeof(int), hipMemcpyDeviceToHost)); CudaAssert(hipMemcpy(Ai.data(),mat_ind,Ai.size()*sizeof(int), hipMemcpyDeviceToHost)); CudaAssert(hipMemcpy(Av.data(),mat_val,Av.size()*sizeof(Number), hipMemcpyDeviceToHost)); for(int i=0; i<n_rows; i++) { for (int j=Ap[i]; j < Ap[i+1]; j++) { printf("[%d,%d]: %g\n",i,Ai[j],Av[j]); } } #ifdef USE_HYB_MATRIX CudaAssert(hipFree(mat_val)); CudaAssert(hipFree(mat_ind)); CudaAssert(hipFree(mat_ptr)); #endif } template <typename number> std::size_t SparseMatrix<number>::memory_consumption () const { return nnz*static_cast<std::size_t>(sizeof(number) + sizeof(int)) + (n_cols+1)*static_cast<std::size_t>(sizeof(int)) + sizeof(*this); } } template class CUDAWrappers::SparseMatrix<double>; template class CUDAWrappers::SparseMatrix<float>; // DEAL_II_NAMESPACE_CLOSE // #endif /* DEAL_II_WITH_CUDA */
4215851f849dac8d7adb32c7d02b46d79f3a85cc.cu
#include "cuda_sparse_matrix.h" #include "cuda_utils.cuh" #define CudaAssert(call) CUDA_CHECK_SUCCESS(call) // #ifdef DEAL_II_WITH_CUDA #define CUSPARSE_CHECK_SUCCESS(call,msg) \ do { \ cusparseStatus_t status = call; \ if (status != CUSPARSE_STATUS_SUCCESS) { \ fprintf(stderr,"%s\n",msg); \ exit(1); \ } \ } while(0) // DEAL_II_NAMESPACE_OPEN namespace CUDAWrappers { // generic functions template <typename Number> void hyb2csr(const cusparseHandle_t handle, const cusparseMatDescr_t descr, const cusparseHybMat_t hyb, Number *mat_val, int *mat_ptr, int *mat_ind); template <typename Number> void csr2hyb(const cusparseHandle_t handle, unsigned int n_rows, unsigned int n_cols, const cusparseMatDescr_t descr, const Number *mat_val, const int *mat_ptr, const int *mat_ind, cusparseHybMat_t hyb); template <typename Number> void csrmv(const cusparseHandle_t handle, int m, int n, int nnz, const Number *alpha, const cusparseMatDescr_t descrA, const Number *csrValA, const int *csrRowPtrA, const int *csrColIndA, const Number *x, const Number *beta, Number *y); template <typename Number> void hybmv(const cusparseHandle_t handle, const Number *alpha, const cusparseMatDescr_t descrA, const cusparseHybMat_t hybA, const Number *x, const Number *beta, Number *y); // implementations template <> void hybmv(const cusparseHandle_t handle, const double *alpha, const cusparseMatDescr_t descrA, const cusparseHybMat_t hybA, const double *x, const double *beta, double *y) { CUSPARSE_CHECK_SUCCESS(cusparseDhybmv(handle,CUSPARSE_OPERATION_NON_TRANSPOSE, alpha, descrA, hybA, x, beta, y), "CUSPARSE: hybmv failed"); } template <> void hybmv(const cusparseHandle_t handle, const float *alpha, const cusparseMatDescr_t descrA, const cusparseHybMat_t hybA, const float *x, const float *beta, float *y) { CUSPARSE_CHECK_SUCCESS(cusparseShybmv(handle,CUSPARSE_OPERATION_NON_TRANSPOSE, alpha, descrA, hybA, x, beta, y), "CUSPARSE: hybmv failed"); } template <> void csrmv(const cusparseHandle_t handle, int m, int n, int nnz, const double *alpha, const cusparseMatDescr_t descrA, const double *csrValA, const int *csrRowPtrA, const int *csrColIndA, const double *x, const double *beta, double *y) { CUSPARSE_CHECK_SUCCESS(cusparseDcsrmv(handle,CUSPARSE_OPERATION_NON_TRANSPOSE, m, n, nnz, alpha, descrA, csrValA, csrRowPtrA, csrColIndA, x, beta, y), "CUSPARSE: csrmv failed"); } template <> void csrmv(const cusparseHandle_t handle, int m, int n, int nnz, const float *alpha, const cusparseMatDescr_t descrA, const float *csrValA, const int *csrRowPtrA, const int *csrColIndA, const float *x, const float *beta, float *y) { CUSPARSE_CHECK_SUCCESS(cusparseScsrmv(handle,CUSPARSE_OPERATION_NON_TRANSPOSE, m, n, nnz, alpha, descrA, csrValA, csrRowPtrA, csrColIndA, x, beta, y), "CUSPARSE: csrmv failed"); } template <> void hyb2csr(const cusparseHandle_t handle, const cusparseMatDescr_t descr, const cusparseHybMat_t hyb, double *mat_val, int *mat_ptr, int *mat_ind) { CUSPARSE_CHECK_SUCCESS(cusparseDhyb2csr(handle,descr,hyb,mat_val,mat_ptr, mat_ind), "CUSPARSE: Failed converting matrix format from hyb to csr"); } template <> void hyb2csr(const cusparseHandle_t handle, const cusparseMatDescr_t descr, const cusparseHybMat_t hyb, float *mat_val, int *mat_ptr, int *mat_ind) { CUSPARSE_CHECK_SUCCESS(cusparseShyb2csr(handle,descr,hyb,mat_val,mat_ptr, mat_ind), "CUSPARSE: Failed converting matrix format from hyb to csr"); } template <> void csr2hyb(const cusparseHandle_t handle, unsigned int n_rows, unsigned int n_cols, const cusparseMatDescr_t descr, const double *mat_val, const int *mat_ptr, const int *mat_ind, cusparseHybMat_t hyb) { CUSPARSE_CHECK_SUCCESS(cusparseDcsr2hyb(handle,n_rows,n_cols,descr,mat_val,mat_ptr, mat_ind,hyb,0,CUSPARSE_HYB_PARTITION_AUTO), "CUSPARSE: Failed converting matrix to hyb format"); } template <> void csr2hyb(const cusparseHandle_t handle, unsigned int n_rows, unsigned int n_cols, const cusparseMatDescr_t descr, const float *mat_val, const int *mat_ptr, const int *mat_ind, cusparseHybMat_t hyb) { CUSPARSE_CHECK_SUCCESS(cusparseScsr2hyb(handle,n_rows,n_cols,descr,mat_val,mat_ptr, mat_ind,hyb,0,CUSPARSE_HYB_PARTITION_AUTO), "CUSPARSE: Failed converting matrix to hyb format"); } template<typename Number> SparseMatrix<Number>::SparseMatrix() : initialized(false),n_cols(0), n_rows(0), nnz(0) { mat_val = NULL; mat_ind = NULL; mat_ptr = NULL; } template<typename Number> SparseMatrix<Number>::SparseMatrix(const ::dealii::SparseMatrix<Number> &src_mat) : initialized(false), n_cols(0), n_rows(0), nnz(0) { mat_val = NULL; mat_ind = NULL; mat_ptr = NULL; reinit(src_mat); } template<typename Number> SparseMatrix<Number>::~SparseMatrix() { if(initialized) { #ifdef USE_HYB_MATRIX CUSPARSE_CHECK_SUCCESS(cusparseDestroyHybMat(hyb),"CUSPARSE: Hyb structure destruction failed"); #else CudaAssert(cudaFree(mat_val)); CudaAssert(cudaFree(mat_ind)); CudaAssert(cudaFree(mat_ptr)); #endif CUSPARSE_CHECK_SUCCESS(cusparseDestroyMatDescr(descr),"CUSPARSE: Matrix descriptor destruction failed"); CUSPARSE_CHECK_SUCCESS(cusparseDestroy(handle),"CUSPARSE: Library release of resources failed"); } } template<typename Number> void SparseMatrix<Number>::reinit(const ::dealii::SparseMatrix<Number> &src_mat) { if(!initialized) { CUSPARSE_CHECK_SUCCESS(cusparseCreate(&handle), "CUSPARSE: Failed initializing library"); CUSPARSE_CHECK_SUCCESS(cusparseCreateMatDescr(&descr), "CUSPARSE: Failed initializing matrix descriptor"); CUSPARSE_CHECK_SUCCESS(cusparseSetMatType(descr,CUSPARSE_MATRIX_TYPE_GENERAL), "CUSPARSE: Failed setting matrix type"); CUSPARSE_CHECK_SUCCESS(cusparseSetMatIndexBase(descr,CUSPARSE_INDEX_BASE_ZERO), "CUSPARSE: Failed setting matrix index base"); #ifdef USE_HYB_MATRIX CUSPARSE_CHECK_SUCCESS(cusparseCreateHybMat(&hyb),"CUSPARSE: Failed initializing hyb structure"); #endif } else { #ifndef USE_HYB_MATRIX CudaAssert(cudaFree(mat_val)); CudaAssert(cudaFree(mat_ind)); CudaAssert(cudaFree(mat_ptr)); #endif } initialized = true; nnz = src_mat.n_nonzero_elements(); n_rows = src_mat.m(); n_cols = src_mat.n(); // initialize CSR matrix from CPU structure std::vector<int> Ap(n_rows+1,0); std::vector<int> Ai(nnz); std::vector<Number> Av(nnz); for (int row=0; row<n_rows; ++row) { int cursor = Ap[row]; for (typename ::dealii::SparseMatrix<Number>::const_iterator p=src_mat.begin(row); p != src_mat.end(row); ++p) { Ai[cursor] = p->column(); Av[cursor] = p->value(); ++cursor; } Ap[row+1] = cursor; // This row is now initialized, but the diagonal element is first in the // Deal.II world, so we need to resort for CUSPARSE. For simplicity we // just make a series of swaps (this is kind of a single run of // bubble-sort, which gives us the desired result since the array is // already "almost" sorted) for(int i = Ap[row]; (i < Ap[row+1]-1) && (Ai[i] > Ai[i+1]); ++i) { std::swap (Ai[i], Ai[i+1]); std::swap (Av[i], Av[i+1]); } } // allocate device memory CudaAssert(cudaMalloc(&mat_ptr,(n_rows+1)*sizeof(int))); CudaAssert(cudaMalloc(&mat_ind,nnz*sizeof(int))); CudaAssert(cudaMalloc(&mat_val,nnz*sizeof(Number))); // copy from host to device CudaAssert(cudaMemcpy(mat_ptr,Ap.data(),Ap.size()*sizeof(int), cudaMemcpyHostToDevice)); CudaAssert(cudaMemcpy(mat_ind,Ai.data(),Ai.size()*sizeof(int), cudaMemcpyHostToDevice)); CudaAssert(cudaMemcpy(mat_val,Av.data(),Av.size()*sizeof(Number), cudaMemcpyHostToDevice)); #ifdef USE_HYB_MATRIX // convert to hyb format csr2hyb(handle,n_rows,n_cols,descr,mat_val,mat_ptr, mat_ind,hyb,0,CUSPARSE_HYB_PARTITION_AUTO); CudaAssert(cudaFree(mat_val)); CudaAssert(cudaFree(mat_ind)); CudaAssert(cudaFree(mat_ptr)); #endif } template<typename Number> void SparseMatrix<Number>::init() { if(!initialized) { CUSPARSE_CHECK_SUCCESS(cusparseCreate(&handle), "CUSPARSE: Failed initializing library"); CUSPARSE_CHECK_SUCCESS(cusparseCreateMatDescr(&descr), "CUSPARSE: Failed initializing matrix descriptor"); CUSPARSE_CHECK_SUCCESS(cusparseSetMatType(descr,CUSPARSE_MATRIX_TYPE_GENERAL), "CUSPARSE: Failed setting matrix type"); CUSPARSE_CHECK_SUCCESS(cusparseSetMatIndexBase(descr,CUSPARSE_INDEX_BASE_ZERO), "CUSPARSE: Failed setting matrix index base"); #ifdef USE_HYB_MATRIX CUSPARSE_CHECK_SUCCESS(cusparseCreateHybMat(&hyb),"CUSPARSE: Failed initializing hyb structure"); #endif } else { #ifndef USE_HYB_MATRIX CudaAssert(cudaFree(mat_val)); CudaAssert(cudaFree(mat_ind)); CudaAssert(cudaFree(mat_ptr)); #endif } initialized = true; /* Initialize with this matrix: [ 1 0 1 0 1 1 0 1 0 1 0 0 0 0 1 1] */ n_rows = 4; n_cols = 4; std::vector<std::vector<Number>> vals(n_rows); std::vector<std::vector<int>> inds(n_rows); vals[0].push_back(1.0); // A[0,0] = 1.0 inds[0].push_back(0); vals[0].push_back(1.0); // A[0,2] = 1.0 inds[0].push_back(2); vals[1].push_back(1.0); // A[1,0] = 1.0 inds[1].push_back(0); vals[1].push_back(1.0); // A[1,1] = 1.0 inds[1].push_back(1); vals[1].push_back(1.0); // A[1,3] = 1.0 inds[1].push_back(3); vals[2].push_back(1.0); // A[2,1] = 1.0 inds[2].push_back(1); vals[3].push_back(1.0); // A[3,2] = 1.0 inds[3].push_back(2); vals[3].push_back(1.0); // A[3,3] = 1.0 inds[3].push_back(3); // initialize CSR matrix from CPU structure std::vector<int> Ap(n_rows+1,0); std::vector<int> Ai; std::vector<Number> Av; for (int row=0; row<n_rows; ++row) { Av.insert( Av.end(), vals[row].begin(), vals[row].end() ); Ai.insert( Ai.end(), inds[row].begin(), inds[row].end() ); Ap[row+1] = Ap[row]+vals[row].size(); } nnz = Ap[n_rows]; CudaAssert(cudaMalloc(&mat_ptr,(n_rows+1)*sizeof(int))); CudaAssert(cudaMalloc(&mat_ind,nnz*sizeof(int))); CudaAssert(cudaMalloc(&mat_val,nnz*sizeof(Number))); // copy from host to device CudaAssert(cudaMemcpy(mat_ptr,Ap.data(),Ap.size()*sizeof(int), cudaMemcpyHostToDevice)); CudaAssert(cudaMemcpy(mat_ind,Ai.data(),Ai.size()*sizeof(int), cudaMemcpyHostToDevice)); CudaAssert(cudaMemcpy(mat_val,Av.data(),Av.size()*sizeof(Number), cudaMemcpyHostToDevice)); #ifdef USE_HYB_MATRIX // convert to hyb format csr2hyb(handle,n_rows,n_cols,descr,mat_val,mat_ptr, mat_ind,hyb,0,CUSPARSE_HYB_PARTITION_AUTO); CudaAssert(cudaFree(mat_val)); CudaAssert(cudaFree(mat_ind)); CudaAssert(cudaFree(mat_ptr)); #endif } template<typename Number> unsigned int SparseMatrix<Number>::m() const { return n_rows; } template<typename Number> unsigned int SparseMatrix<Number>::n() const { return n_cols; } template<typename Number> void SparseMatrix<Number>::vmult(GpuVector<Number> &dst, const GpuVector<Number> &src) const { const Number zero = 0.0; const Number one = 1.0; Number *dst_buf = dst.getData(); const Number *vec_buf = src.getDataRO(); #ifdef USE_HYB_MATRIX hybmv(handle,&one, descr, hyb, vec_buf, &zero, dst_buf); #else csrmv(handle, n_rows, n_cols, nnz, &one, descr, mat_val, mat_ptr, mat_ind, vec_buf, &zero, dst_buf); #endif } template<typename Number> void SparseMatrix<Number>::print() { #ifdef USE_HYB_MATRIX // allocate device memory CudaAssert(cudaMalloc(&mat_ptr,(n_rows+1)*sizeof(int))); CudaAssert(cudaMalloc(&mat_ind,nnz*sizeof(int))); CudaAssert(cudaMalloc(&mat_val,nnz*sizeof(Number))); // convert to hyb format hyb2csr(handle,descr,hyb,mat_val,mat_ptr, mat_ind); #endif // initialize CSR matrix from CPU structure std::vector<int> Ap(n_rows+1); std::vector<int> Ai(nnz); std::vector<Number> Av(nnz); // copy from host to device CudaAssert(cudaMemcpy(Ap.data(),mat_ptr,Ap.size()*sizeof(int), cudaMemcpyDeviceToHost)); CudaAssert(cudaMemcpy(Ai.data(),mat_ind,Ai.size()*sizeof(int), cudaMemcpyDeviceToHost)); CudaAssert(cudaMemcpy(Av.data(),mat_val,Av.size()*sizeof(Number), cudaMemcpyDeviceToHost)); for(int i=0; i<n_rows; i++) { for (int j=Ap[i]; j < Ap[i+1]; j++) { printf("[%d,%d]: %g\n",i,Ai[j],Av[j]); } } #ifdef USE_HYB_MATRIX CudaAssert(cudaFree(mat_val)); CudaAssert(cudaFree(mat_ind)); CudaAssert(cudaFree(mat_ptr)); #endif } template <typename number> std::size_t SparseMatrix<number>::memory_consumption () const { return nnz*static_cast<std::size_t>(sizeof(number) + sizeof(int)) + (n_cols+1)*static_cast<std::size_t>(sizeof(int)) + sizeof(*this); } } template class CUDAWrappers::SparseMatrix<double>; template class CUDAWrappers::SparseMatrix<float>; // DEAL_II_NAMESPACE_CLOSE // #endif /* DEAL_II_WITH_CUDA */
8f7f65f94926d91e6b9107c305bf097cf36d1e30.hip
// !!! This is a file automatically generated by hipify!!! // C++ Script to simulate 2D incompressible flow #include <iostream> #include <algorithm> using namespace std; #include "input.h" #include "inputSerial.h" #include "inputParallel.h" #include <hip/hip_runtime.h> // CUDA kernel. Each thread takes care of one element __global__ void timeStep(double* delt,double* ux,double* uy){ // Get global thread ID int id = blockIdx.x*blockDim.x+threadIdx.x; if (id < ncGL) { double umax = 1.0; // for other cases you need to set this or write function //double vmax = 1.0; double C = 0.9; // Courant number double dt1=1; double dt2=1; double dt3=1; double delX = dx; double dely = dy; if((dx!=0)&(dy!=0)){ dt1 = C*dx/umax; dt2 = C*0.25*Re/((1.0/(delX*delX))+(1.0/(dely*dely))); dt3 = C*4/Re; } double dtmin = min(dt1,min(dt2,dt3)); // if (id == 0) { *delt = dtmin; //} } }
8f7f65f94926d91e6b9107c305bf097cf36d1e30.cu
// C++ Script to simulate 2D incompressible flow #include <iostream> #include <algorithm> using namespace std; #include "input.h" #include "inputSerial.h" #include "inputParallel.h" #include <cuda.h> // CUDA kernel. Each thread takes care of one element __global__ void timeStep(double* delt,double* ux,double* uy){ // Get global thread ID int id = blockIdx.x*blockDim.x+threadIdx.x; if (id < ncGL) { double umax = 1.0; // for other cases you need to set this or write function //double vmax = 1.0; double C = 0.9; // Courant number double dt1=1; double dt2=1; double dt3=1; double delX = dx; double dely = dy; if((dx!=0)&(dy!=0)){ dt1 = C*dx/umax; dt2 = C*0.25*Re/((1.0/(delX*delX))+(1.0/(dely*dely))); dt3 = C*4/Re; } double dtmin = min(dt1,min(dt2,dt3)); // if (id == 0) { *delt = dtmin; //} } }
61aa7cf784e7155189e139f66514c7a537dec1ae.hip
// !!! This is a file automatically generated by hipify!!! #include "stencil.cuh" #include <cstdio> #include <cstdlib> #include <iostream> void test1(float *image, float *mask) { image[0] = 4.0; image[1] = 1.0; image[2] = 2.0; image[3] = 3.0; mask[0] = 1.0; mask[1] = 2.0; mask[2] = 1.0; } void test2(float *image, float *mask) { image[0] = 1.0; image[1] = 0.0; image[2] = 0.0; image[3] = 0.0; image[4] = 1.0; image[5] = 0.0; image[6] = 0.0; image[7] = 0.0; image[8] = 1.0; image[9] = 0.0; mask[0] = 1.0; mask[1] = 2.0; mask[2] = 1.0; } void assign_input(float *image, float *mask, unsigned int n, unsigned int R) { for (unsigned int i = 0; i < n; ++i) { image[i] = 1.0; } for (unsigned int i = 0; i < 2 * R + 1; ++i) { mask[i] = 1.0; } } int main(int argc, char *argv[]) { unsigned int n = atoi(argv[1]); unsigned int R = atoi(argv[2]); unsigned int threads_per_block = atoi(argv[3]); float *image, *mask, *output; hipMallocManaged(&image, n * sizeof(float)); hipMallocManaged(&mask, (2 * R + 1) * sizeof(float)); hipMallocManaged(&output, n * sizeof(float)); // sample input if ((n == 4) && (R == 1)) { test1(image, mask); } else if ((n == 10) && (R == 1)) { test2(image, mask); } else { // general input assign_input(image, mask, n, R); } hipEvent_t start; hipEvent_t stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start); stencil(image, mask, output, n, R, threads_per_block); hipEventRecord(stop); hipEventSynchronize(stop); // sample print /* for (unsigned int i = 0; i < n; ++i) { std::cout << output[i] << " "; } */ float ms; hipEventElapsedTime(&ms, start, stop); std::cout << output[n - 1] << '\n'; std::cout << ms << '\n'; hipFree(image); hipFree(mask); hipFree(output); return 0; }
61aa7cf784e7155189e139f66514c7a537dec1ae.cu
#include "stencil.cuh" #include <cstdio> #include <cstdlib> #include <iostream> void test1(float *image, float *mask) { image[0] = 4.0; image[1] = 1.0; image[2] = 2.0; image[3] = 3.0; mask[0] = 1.0; mask[1] = 2.0; mask[2] = 1.0; } void test2(float *image, float *mask) { image[0] = 1.0; image[1] = 0.0; image[2] = 0.0; image[3] = 0.0; image[4] = 1.0; image[5] = 0.0; image[6] = 0.0; image[7] = 0.0; image[8] = 1.0; image[9] = 0.0; mask[0] = 1.0; mask[1] = 2.0; mask[2] = 1.0; } void assign_input(float *image, float *mask, unsigned int n, unsigned int R) { for (unsigned int i = 0; i < n; ++i) { image[i] = 1.0; } for (unsigned int i = 0; i < 2 * R + 1; ++i) { mask[i] = 1.0; } } int main(int argc, char *argv[]) { unsigned int n = atoi(argv[1]); unsigned int R = atoi(argv[2]); unsigned int threads_per_block = atoi(argv[3]); float *image, *mask, *output; cudaMallocManaged(&image, n * sizeof(float)); cudaMallocManaged(&mask, (2 * R + 1) * sizeof(float)); cudaMallocManaged(&output, n * sizeof(float)); // sample input if ((n == 4) && (R == 1)) { test1(image, mask); } else if ((n == 10) && (R == 1)) { test2(image, mask); } else { // general input assign_input(image, mask, n, R); } cudaEvent_t start; cudaEvent_t stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); stencil(image, mask, output, n, R, threads_per_block); cudaEventRecord(stop); cudaEventSynchronize(stop); // sample print /* for (unsigned int i = 0; i < n; ++i) { std::cout << output[i] << " "; } */ float ms; cudaEventElapsedTime(&ms, start, stop); std::cout << output[n - 1] << '\n'; std::cout << ms << '\n'; cudaFree(image); cudaFree(mask); cudaFree(output); return 0; }
f2c726ad4f695d60eff130c519b15308bd9d27c7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // (C) Copyright 2013, University of Illinois. All Rights Reserved // Author: Lijiuan Luo (lluo3@uiuc.edu), Geng Daniel Liu (gengliu2@illinois.edu) #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <parboil.h> #include <deque> #include <iostream> #include "config.h" FILE *fp; typedef int2 Node; typedef int2 Edge; #include "kernel.hip" const int h_top = 1; const int zero = 0; int main(int argc, char** argv) { printf("[BENCH] BFS from Parboil\n"); #ifdef LOCKFREE printf("[BENCH] Lock-free Barrier\n"); #endif #ifdef ATOMIC printf("[BENCH] Atomic Barrier\n"); #endif #ifdef HW_SYNC printf("[BENCH] Hardware Barrier\n"); #endif int num_of_nodes = 0; int num_of_edges = 0; struct pb_Parameters *params; struct pb_TimerSet timers; pb_InitializeTimerSet(&timers); params = pb_ReadParameters(&argc, argv); if ((params->inpFiles[0] == NULL) || (params->inpFiles[1] != NULL)) { fprintf(stderr, "Expecting one input filename\n"); exit(-1); } pb_SwitchToTimer(&timers, pb_TimerID_IO); // printf("Read in Graph from a file\n"); fp = fopen(params->inpFiles[0],"r"); if(!fp) { printf("Error Reading graph file\n"); return 0; } int source; int res; res = fscanf(fp,"%d",&num_of_nodes); // printf("Allocate host memory\n"); Node* h_graph_nodes = (Node*) malloc(sizeof(Node)*num_of_nodes); int *color = (int*) malloc(sizeof(int)*num_of_nodes); int start, edgeno; // printf("Initalize the memory\n"); for( unsigned int i = 0; i < num_of_nodes; i++) { res = fscanf(fp,"%d %d",&start,&edgeno); h_graph_nodes[i].x = start; h_graph_nodes[i].y = edgeno; color[i]=WHITE; } // printf("Read the source node from the file\n"); res = fscanf(fp,"%d",&source); res = fscanf(fp,"%d",&num_of_edges); int id,cost; Edge* h_graph_edges = (Edge*) malloc(sizeof(Edge)*num_of_edges); for(int i=0; i < num_of_edges ; i++) { res = fscanf(fp,"%d",&id); res = fscanf(fp,"%d",&cost); h_graph_edges[i].x = id; h_graph_edges[i].y = cost; } if(res!=1) printf("Reading input failed\n"); if(fp) fclose(fp); // printf("Allocate mem for the result on host side\n"); int* h_cost = (int*) malloc( sizeof(int)*num_of_nodes); for(int i = 0; i < num_of_nodes; i++){ h_cost[i] = INF; } h_cost[source] = 0; pb_SwitchToTimer(&timers, pb_TimerID_COPY); // printf("Copy the Node List to device memory\n"); Node* d_graph_nodes; hipMalloc((void**) &d_graph_nodes, sizeof(Node)*num_of_nodes); hipMemcpy(d_graph_nodes, h_graph_nodes, sizeof(Node)*num_of_nodes, hipMemcpyHostToDevice); // printf("Copy the Edge List to device Memory\n"); Edge* d_graph_edges; hipMalloc((void**) &d_graph_edges, sizeof(Edge)*num_of_edges); hipMemcpy(d_graph_edges, h_graph_edges, sizeof(Edge)*num_of_edges, hipMemcpyHostToDevice); int* d_color; hipMalloc((void**) &d_color, sizeof(int)*num_of_nodes); int* d_cost; hipMalloc((void**) &d_cost, sizeof(int)*num_of_nodes); int * d_q1; int * d_q2; hipMalloc((void**) &d_q1, sizeof(int)*num_of_nodes); hipMalloc((void**) &d_q2, sizeof(int)*num_of_nodes); int * tail; hipMalloc((void**) &tail, sizeof(int)); int *front_cost_d; hipMalloc((void**) &front_cost_d, sizeof(int)); hipMemcpy(d_color, color, sizeof(int)*num_of_nodes, hipMemcpyHostToDevice); hipMemcpy(d_cost, h_cost, sizeof(int)*num_of_nodes, hipMemcpyHostToDevice); //bind the texture memory with global memory hipBindTexture(0,g_graph_node_ref,d_graph_nodes, sizeof(Node)*num_of_nodes); hipBindTexture(0,g_graph_edge_ref,d_graph_edges,sizeof(Edge)*num_of_edges); printf("[BENCH] Starting GPU kernel\n"); hipDeviceSynchronize(); pb_SwitchToTimer(&timers, pb_TimerID_KERNEL); int num_of_blocks; int num_of_threads_per_block; hipMemcpy(tail,&h_top,sizeof(int),hipMemcpyHostToDevice); hipMemcpy(&d_cost[source],&zero,sizeof(int),hipMemcpyHostToDevice); hipMemcpy( &d_q1[0], &source, sizeof(int), hipMemcpyHostToDevice); int num_t;//number of threads int k=0;//BFS level index //whether or not to adjust "k", see comment on "BFS_kernel_multi_blk_inGPU" for more details int * switch_kd; hipMalloc((void**) &switch_kd, sizeof(int)); int * num_td;//number of threads hipMalloc((void**) &num_td, sizeof(int)); //whether to stay within a kernel, used in "BFS_kernel_multi_blk_inGPU" bool *stay; hipMalloc( (void**) &stay, sizeof(bool)); int switch_k; //max number of frontier nodes assigned to a block int * max_nodes_per_block_d; hipMalloc( (void**) &max_nodes_per_block_d, sizeof(int)); int *global_kt_d; hipMalloc( (void**) &global_kt_d, sizeof(int)); hipMemcpy(global_kt_d,&zero, sizeof(int),hipMemcpyHostToDevice); int h_overflow = 0; int *d_overflow; hipMalloc((void**) &d_overflow, sizeof(int)); hipMemcpy(d_overflow, &h_overflow, sizeof(int), hipMemcpyHostToDevice); int count1=0, count2=0, count3=0; int *in, *out; #ifdef LOCKFREE int flag_size = NUM_SM*sizeof(int); hipMalloc((void **)&in, flag_size); hipMalloc((void **)&out, flag_size); hipMemset(in, 0, flag_size); hipMemset(out, 0, flag_size); #endif hipDeviceSynchronize(); do { hipMemcpy( &num_t, tail, sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(tail,&zero,sizeof(int),hipMemcpyHostToDevice); if(num_t == 0) {//frontier is empty hipFree(stay); hipFree(switch_kd); hipFree(num_td); break; } num_of_blocks = 1; num_of_threads_per_block = num_t; if(num_of_threads_per_block <NUM_BIN) num_of_threads_per_block = NUM_BIN; if(num_t>MAX_THREADS_PER_BLOCK) { num_of_blocks = (int)ceil(num_t/(double)MAX_THREADS_PER_BLOCK); num_of_threads_per_block = MAX_THREADS_PER_BLOCK; } if(num_of_blocks == 1)//will call "BFS_in_GPU_kernel" num_of_threads_per_block = MAX_THREADS_PER_BLOCK; if(num_of_blocks >1 && num_of_blocks <= NUM_SM)// will call "BFS_kernel_multi_blk_inGPU" num_of_blocks = NUM_SM; //assume "num_of_blocks" can not be very large dim3 grid( num_of_blocks, 1, 1); dim3 threads( num_of_threads_per_block, 1, 1); if(k%2 == 0) { if(num_of_blocks == 1) { count1 ++; hipLaunchKernelGGL(( BFS_in_GPU_kernel), dim3(grid), dim3(threads) , 0, 0, d_q1,d_q2, d_graph_nodes, d_graph_edges, d_color, d_cost,num_t , tail,GRAY0,k,d_overflow); } else if(num_of_blocks <= NUM_SM) { count2 ++; (hipMemcpy(num_td,&num_t,sizeof(int), hipMemcpyHostToDevice)); hipLaunchKernelGGL(( BFS_kernel_multi_blk_inGPU) , dim3(grid), dim3(threads) , 0, 0, d_q1,d_q2, d_graph_nodes, d_graph_edges, d_color, d_cost, num_td, tail,GRAY0,k, switch_kd, max_nodes_per_block_d, global_kt_d,d_overflow, in, out); (hipMemcpy(&switch_k,switch_kd, sizeof(int), hipMemcpyDeviceToHost)); if(!switch_k) { k--; } } else { count3 ++; hipLaunchKernelGGL(( BFS_kernel), dim3(grid), dim3(threads) , 0, 0, d_q1,d_q2, d_graph_nodes, d_graph_edges, d_color, d_cost, num_t, tail,GRAY0,k,d_overflow); } } else { if(num_of_blocks == 1) { count1 ++; hipLaunchKernelGGL(( BFS_in_GPU_kernel), dim3(grid), dim3(threads) , 0, 0, d_q2,d_q1, d_graph_nodes, d_graph_edges, d_color, d_cost, num_t, tail,GRAY1,k,d_overflow); } else if(num_of_blocks <= NUM_SM) { count2 ++; (hipMemcpy(num_td,&num_t,sizeof(int), hipMemcpyHostToDevice)); hipLaunchKernelGGL(( BFS_kernel_multi_blk_inGPU) , dim3(grid), dim3(threads) , 0, 0, d_q2,d_q1, d_graph_nodes, d_graph_edges, d_color, d_cost, num_td, tail,GRAY1,k, switch_kd, max_nodes_per_block_d, global_kt_d,d_overflow, in, out); (hipMemcpy(&switch_k,switch_kd, sizeof(int), hipMemcpyDeviceToHost)); if(!switch_k) { k--; } } else { count3 ++; hipLaunchKernelGGL(( BFS_kernel), dim3(grid), dim3(threads) , 0, 0, d_q2,d_q1, d_graph_nodes, d_graph_edges, d_color, d_cost, num_t, tail, GRAY1,k,d_overflow); } } k++; hipMemcpy(&h_overflow, d_overflow, sizeof(int), hipMemcpyDeviceToHost); if(h_overflow) { printf("Error: local queue was overflown. Need to increase W_LOCAL_QUEUE\n"); return 0; } } while(1); hipDeviceSynchronize(); #ifdef LOCKFREE hipFree(in); hipFree(out); #endif pb_SwitchToTimer(&timers, pb_TimerID_COPY); printf("[BENCH] GPU kernel done\n"); printf("[BENCH] Kernel called %d times (%d, %d, %d)\n", k, count1, count2, count3); // Copy result from device to host hipMemcpy(h_cost, d_cost, sizeof(int)*num_of_nodes, hipMemcpyDeviceToHost); hipMemcpy(color, d_color, sizeof(int)*num_of_nodes, hipMemcpyDeviceToHost); hipUnbindTexture(g_graph_node_ref); hipUnbindTexture(g_graph_edge_ref); hipFree(d_graph_nodes); hipFree(d_graph_edges); hipFree(d_color); hipFree(d_cost); hipFree(tail); hipFree(front_cost_d); // Store the result into a file pb_SwitchToTimer(&timers, pb_TimerID_IO); FILE *fp = fopen(params->outFile,"w"); fprintf(fp, "%d\n", num_of_nodes); for(int i=0;i<num_of_nodes;i++) fprintf(fp,"%d %d\n",i,h_cost[i]); fclose(fp); // Cleanup memory free( h_graph_nodes); free( h_graph_edges); free( color); free( h_cost); pb_SwitchToTimer(&timers, pb_TimerID_NONE); pb_PrintTimerSet(&timers); pb_FreeParameters(params); return 0; }
f2c726ad4f695d60eff130c519b15308bd9d27c7.cu
// (C) Copyright 2013, University of Illinois. All Rights Reserved // Author: Lijiuan Luo (lluo3@uiuc.edu), Geng Daniel Liu (gengliu2@illinois.edu) #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <parboil.h> #include <deque> #include <iostream> #include "config.h" FILE *fp; typedef int2 Node; typedef int2 Edge; #include "kernel.cu" const int h_top = 1; const int zero = 0; int main(int argc, char** argv) { printf("[BENCH] BFS from Parboil\n"); #ifdef LOCKFREE printf("[BENCH] Lock-free Barrier\n"); #endif #ifdef ATOMIC printf("[BENCH] Atomic Barrier\n"); #endif #ifdef HW_SYNC printf("[BENCH] Hardware Barrier\n"); #endif int num_of_nodes = 0; int num_of_edges = 0; struct pb_Parameters *params; struct pb_TimerSet timers; pb_InitializeTimerSet(&timers); params = pb_ReadParameters(&argc, argv); if ((params->inpFiles[0] == NULL) || (params->inpFiles[1] != NULL)) { fprintf(stderr, "Expecting one input filename\n"); exit(-1); } pb_SwitchToTimer(&timers, pb_TimerID_IO); // printf("Read in Graph from a file\n"); fp = fopen(params->inpFiles[0],"r"); if(!fp) { printf("Error Reading graph file\n"); return 0; } int source; int res; res = fscanf(fp,"%d",&num_of_nodes); // printf("Allocate host memory\n"); Node* h_graph_nodes = (Node*) malloc(sizeof(Node)*num_of_nodes); int *color = (int*) malloc(sizeof(int)*num_of_nodes); int start, edgeno; // printf("Initalize the memory\n"); for( unsigned int i = 0; i < num_of_nodes; i++) { res = fscanf(fp,"%d %d",&start,&edgeno); h_graph_nodes[i].x = start; h_graph_nodes[i].y = edgeno; color[i]=WHITE; } // printf("Read the source node from the file\n"); res = fscanf(fp,"%d",&source); res = fscanf(fp,"%d",&num_of_edges); int id,cost; Edge* h_graph_edges = (Edge*) malloc(sizeof(Edge)*num_of_edges); for(int i=0; i < num_of_edges ; i++) { res = fscanf(fp,"%d",&id); res = fscanf(fp,"%d",&cost); h_graph_edges[i].x = id; h_graph_edges[i].y = cost; } if(res!=1) printf("Reading input failed\n"); if(fp) fclose(fp); // printf("Allocate mem for the result on host side\n"); int* h_cost = (int*) malloc( sizeof(int)*num_of_nodes); for(int i = 0; i < num_of_nodes; i++){ h_cost[i] = INF; } h_cost[source] = 0; pb_SwitchToTimer(&timers, pb_TimerID_COPY); // printf("Copy the Node List to device memory\n"); Node* d_graph_nodes; cudaMalloc((void**) &d_graph_nodes, sizeof(Node)*num_of_nodes); cudaMemcpy(d_graph_nodes, h_graph_nodes, sizeof(Node)*num_of_nodes, cudaMemcpyHostToDevice); // printf("Copy the Edge List to device Memory\n"); Edge* d_graph_edges; cudaMalloc((void**) &d_graph_edges, sizeof(Edge)*num_of_edges); cudaMemcpy(d_graph_edges, h_graph_edges, sizeof(Edge)*num_of_edges, cudaMemcpyHostToDevice); int* d_color; cudaMalloc((void**) &d_color, sizeof(int)*num_of_nodes); int* d_cost; cudaMalloc((void**) &d_cost, sizeof(int)*num_of_nodes); int * d_q1; int * d_q2; cudaMalloc((void**) &d_q1, sizeof(int)*num_of_nodes); cudaMalloc((void**) &d_q2, sizeof(int)*num_of_nodes); int * tail; cudaMalloc((void**) &tail, sizeof(int)); int *front_cost_d; cudaMalloc((void**) &front_cost_d, sizeof(int)); cudaMemcpy(d_color, color, sizeof(int)*num_of_nodes, cudaMemcpyHostToDevice); cudaMemcpy(d_cost, h_cost, sizeof(int)*num_of_nodes, cudaMemcpyHostToDevice); //bind the texture memory with global memory cudaBindTexture(0,g_graph_node_ref,d_graph_nodes, sizeof(Node)*num_of_nodes); cudaBindTexture(0,g_graph_edge_ref,d_graph_edges,sizeof(Edge)*num_of_edges); printf("[BENCH] Starting GPU kernel\n"); cudaThreadSynchronize(); pb_SwitchToTimer(&timers, pb_TimerID_KERNEL); int num_of_blocks; int num_of_threads_per_block; cudaMemcpy(tail,&h_top,sizeof(int),cudaMemcpyHostToDevice); cudaMemcpy(&d_cost[source],&zero,sizeof(int),cudaMemcpyHostToDevice); cudaMemcpy( &d_q1[0], &source, sizeof(int), cudaMemcpyHostToDevice); int num_t;//number of threads int k=0;//BFS level index //whether or not to adjust "k", see comment on "BFS_kernel_multi_blk_inGPU" for more details int * switch_kd; cudaMalloc((void**) &switch_kd, sizeof(int)); int * num_td;//number of threads cudaMalloc((void**) &num_td, sizeof(int)); //whether to stay within a kernel, used in "BFS_kernel_multi_blk_inGPU" bool *stay; cudaMalloc( (void**) &stay, sizeof(bool)); int switch_k; //max number of frontier nodes assigned to a block int * max_nodes_per_block_d; cudaMalloc( (void**) &max_nodes_per_block_d, sizeof(int)); int *global_kt_d; cudaMalloc( (void**) &global_kt_d, sizeof(int)); cudaMemcpy(global_kt_d,&zero, sizeof(int),cudaMemcpyHostToDevice); int h_overflow = 0; int *d_overflow; cudaMalloc((void**) &d_overflow, sizeof(int)); cudaMemcpy(d_overflow, &h_overflow, sizeof(int), cudaMemcpyHostToDevice); int count1=0, count2=0, count3=0; int *in, *out; #ifdef LOCKFREE int flag_size = NUM_SM*sizeof(int); cudaMalloc((void **)&in, flag_size); cudaMalloc((void **)&out, flag_size); cudaMemset(in, 0, flag_size); cudaMemset(out, 0, flag_size); #endif cudaThreadSynchronize(); do { cudaMemcpy( &num_t, tail, sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(tail,&zero,sizeof(int),cudaMemcpyHostToDevice); if(num_t == 0) {//frontier is empty cudaFree(stay); cudaFree(switch_kd); cudaFree(num_td); break; } num_of_blocks = 1; num_of_threads_per_block = num_t; if(num_of_threads_per_block <NUM_BIN) num_of_threads_per_block = NUM_BIN; if(num_t>MAX_THREADS_PER_BLOCK) { num_of_blocks = (int)ceil(num_t/(double)MAX_THREADS_PER_BLOCK); num_of_threads_per_block = MAX_THREADS_PER_BLOCK; } if(num_of_blocks == 1)//will call "BFS_in_GPU_kernel" num_of_threads_per_block = MAX_THREADS_PER_BLOCK; if(num_of_blocks >1 && num_of_blocks <= NUM_SM)// will call "BFS_kernel_multi_blk_inGPU" num_of_blocks = NUM_SM; //assume "num_of_blocks" can not be very large dim3 grid( num_of_blocks, 1, 1); dim3 threads( num_of_threads_per_block, 1, 1); if(k%2 == 0) { if(num_of_blocks == 1) { count1 ++; BFS_in_GPU_kernel<<< grid, threads >>>(d_q1,d_q2, d_graph_nodes, d_graph_edges, d_color, d_cost,num_t , tail,GRAY0,k,d_overflow); } else if(num_of_blocks <= NUM_SM) { count2 ++; (cudaMemcpy(num_td,&num_t,sizeof(int), cudaMemcpyHostToDevice)); BFS_kernel_multi_blk_inGPU <<< grid, threads >>>(d_q1,d_q2, d_graph_nodes, d_graph_edges, d_color, d_cost, num_td, tail,GRAY0,k, switch_kd, max_nodes_per_block_d, global_kt_d,d_overflow, in, out); (cudaMemcpy(&switch_k,switch_kd, sizeof(int), cudaMemcpyDeviceToHost)); if(!switch_k) { k--; } } else { count3 ++; BFS_kernel<<< grid, threads >>>(d_q1,d_q2, d_graph_nodes, d_graph_edges, d_color, d_cost, num_t, tail,GRAY0,k,d_overflow); } } else { if(num_of_blocks == 1) { count1 ++; BFS_in_GPU_kernel<<< grid, threads >>>(d_q2,d_q1, d_graph_nodes, d_graph_edges, d_color, d_cost, num_t, tail,GRAY1,k,d_overflow); } else if(num_of_blocks <= NUM_SM) { count2 ++; (cudaMemcpy(num_td,&num_t,sizeof(int), cudaMemcpyHostToDevice)); BFS_kernel_multi_blk_inGPU <<< grid, threads >>>(d_q2,d_q1, d_graph_nodes, d_graph_edges, d_color, d_cost, num_td, tail,GRAY1,k, switch_kd, max_nodes_per_block_d, global_kt_d,d_overflow, in, out); (cudaMemcpy(&switch_k,switch_kd, sizeof(int), cudaMemcpyDeviceToHost)); if(!switch_k) { k--; } } else { count3 ++; BFS_kernel<<< grid, threads >>>(d_q2,d_q1, d_graph_nodes, d_graph_edges, d_color, d_cost, num_t, tail, GRAY1,k,d_overflow); } } k++; cudaMemcpy(&h_overflow, d_overflow, sizeof(int), cudaMemcpyDeviceToHost); if(h_overflow) { printf("Error: local queue was overflown. Need to increase W_LOCAL_QUEUE\n"); return 0; } } while(1); cudaThreadSynchronize(); #ifdef LOCKFREE cudaFree(in); cudaFree(out); #endif pb_SwitchToTimer(&timers, pb_TimerID_COPY); printf("[BENCH] GPU kernel done\n"); printf("[BENCH] Kernel called %d times (%d, %d, %d)\n", k, count1, count2, count3); // Copy result from device to host cudaMemcpy(h_cost, d_cost, sizeof(int)*num_of_nodes, cudaMemcpyDeviceToHost); cudaMemcpy(color, d_color, sizeof(int)*num_of_nodes, cudaMemcpyDeviceToHost); cudaUnbindTexture(g_graph_node_ref); cudaUnbindTexture(g_graph_edge_ref); cudaFree(d_graph_nodes); cudaFree(d_graph_edges); cudaFree(d_color); cudaFree(d_cost); cudaFree(tail); cudaFree(front_cost_d); // Store the result into a file pb_SwitchToTimer(&timers, pb_TimerID_IO); FILE *fp = fopen(params->outFile,"w"); fprintf(fp, "%d\n", num_of_nodes); for(int i=0;i<num_of_nodes;i++) fprintf(fp,"%d %d\n",i,h_cost[i]); fclose(fp); // Cleanup memory free( h_graph_nodes); free( h_graph_edges); free( color); free( h_cost); pb_SwitchToTimer(&timers, pb_TimerID_NONE); pb_PrintTimerSet(&timers); pb_FreeParameters(params); return 0; }
c7779c6361ed91e48cb9beca3b97978a96e5e267.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*! * Copyright 2019 XGBoost contributors */ #include <thrust/copy.h> #include <thrust/device_vector.h> #include <thrust/execution_policy.h> #include <thrust/iterator/counting_iterator.h> #include <xgboost/logging.h> #include <algorithm> #include <bitset> #include <string> #include <sstream> #include <set> #include "constraints.cuh" #include "param.h" #include "../common/span.h" #include "../common/device_helpers.cuh" namespace xgboost { BitField::value_type constexpr BitField::kValueSize; BitField::value_type constexpr BitField::kOne; size_t FeatureInteractionConstraint::Features() const { return d_sets_ptr_.size() - 1; } void FeatureInteractionConstraint::Configure( tree::TrainParam const& param, int32_t const n_features) { has_constraint_ = true; if (param.interaction_constraints.length() == 0) { has_constraint_ = false; return; } // --- Parse interaction constraints std::istringstream iss(param.interaction_constraints); dmlc::JSONReader reader(&iss); // Interaction constraints parsed from string parameter. After // parsing, this looks like {{0, 1, 2}, {2, 3 ,4}}. std::vector<std::vector<int32_t>> h_feature_constraints; try { reader.Read(&h_feature_constraints); } catch (dmlc::Error const& e) { LOG(FATAL) << "Failed to parse feature interaction constraint:\n" << param.interaction_constraints << "\n" << "With error:\n" << e.what(); } n_sets_ = h_feature_constraints.size(); size_t const n_feat_storage = BitField::ComputeStorageSize(n_features); if (n_feat_storage == 0 && n_features != 0) { LOG(FATAL) << "Wrong storage size, n_features: " << n_features; } // --- Initialize allowed features attached to nodes. if (param.max_depth == 0 && param.max_leaves == 0) { LOG(FATAL) << "Max leaves and max depth cannot both be unconstrained for gpu_hist."; } int32_t n_nodes {0}; if (param.max_depth != 0) { n_nodes = ::pow(2, param.max_depth + 1); } else { n_nodes = param.max_leaves * 2 - 1; } CHECK_NE(n_nodes, 0); node_constraints_.resize(n_nodes); node_constraints_storage_.resize(n_nodes); for (auto& n : node_constraints_storage_) { n.resize(BitField::ComputeStorageSize(n_features)); } for (size_t i = 0; i < node_constraints_storage_.size(); ++i) { auto span = dh::ToSpan(node_constraints_storage_[i]); node_constraints_[i] = BitField(span); } s_node_constraints_ = common::Span<BitField>(node_constraints_.data(), node_constraints_.size()); // Represent constraints as CSR format, flatten is the value vector, // ptr is row_ptr vector in CSR. std::vector<int32_t> h_feature_constraints_flatten; for (auto const& constraints : h_feature_constraints) { for (int32_t c : constraints) { h_feature_constraints_flatten.emplace_back(c); } } std::vector<int32_t> h_feature_constraints_ptr; size_t n_features_in_constraints = 0; h_feature_constraints_ptr.emplace_back(n_features_in_constraints); for (auto const& v : h_feature_constraints) { n_features_in_constraints += v.size(); h_feature_constraints_ptr.emplace_back(n_features_in_constraints); } // Copy the CSR to device. d_fconstraints_.resize(h_feature_constraints_flatten.size()); thrust::copy(h_feature_constraints_flatten.cbegin(), h_feature_constraints_flatten.cend(), d_fconstraints_.begin()); s_fconstraints_ = dh::ToSpan(d_fconstraints_); d_fconstraints_ptr_.resize(h_feature_constraints_ptr.size()); thrust::copy(h_feature_constraints_ptr.cbegin(), h_feature_constraints_ptr.cend(), d_fconstraints_ptr_.begin()); s_fconstraints_ptr_ = dh::ToSpan(d_fconstraints_ptr_); // --- Compute interaction sets attached to each feature. // Use a set to eliminate duplicated entries. std::vector<std::set<int32_t> > h_features_set(n_features); int32_t cid = 0; for (auto const& constraints : h_feature_constraints) { for (auto const& feat : constraints) { h_features_set.at(feat).insert(cid); } cid++; } // Compute device sets. std::vector<int32_t> h_sets; int32_t ptr = 0; std::vector<int32_t> h_sets_ptr {ptr}; for (auto const& feature : h_features_set) { for (auto constraint_id : feature) { h_sets.emplace_back(constraint_id); } // empty set is well defined here. ptr += feature.size(); h_sets_ptr.emplace_back(ptr); } d_sets_ = h_sets; d_sets_ptr_ = h_sets_ptr; s_sets_ = dh::ToSpan(d_sets_); s_sets_ptr_ = dh::ToSpan(d_sets_ptr_); d_feature_buffer_storage_.resize(BitField::ComputeStorageSize(n_features)); feature_buffer_ = dh::ToSpan(d_feature_buffer_storage_); // --- Initialize result buffers. output_buffer_bits_storage_.resize(BitField::ComputeStorageSize(n_features)); output_buffer_bits_ = BitField(dh::ToSpan(output_buffer_bits_storage_)); input_buffer_bits_storage_.resize(BitField::ComputeStorageSize(n_features)); input_buffer_bits_ = BitField(dh::ToSpan(input_buffer_bits_storage_)); result_buffer_.resize(n_features); s_result_buffer_ = dh::ToSpan(result_buffer_); } FeatureInteractionConstraint::FeatureInteractionConstraint( tree::TrainParam const& param, int32_t const n_features) : has_constraint_{true}, n_sets_{0} { this->Configure(param, n_features); } void FeatureInteractionConstraint::Reset() { for (auto& node : node_constraints_storage_) { thrust::fill(node.begin(), node.end(), 0); } } __global__ void ClearBuffersKernel( BitField result_buffer_output, BitField result_buffer_input) { auto tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < result_buffer_output.Size()) { result_buffer_output.Clear(tid); } if (tid < result_buffer_input.Size()) { result_buffer_input.Clear(tid); } } void FeatureInteractionConstraint::ClearBuffers() { CHECK_EQ(output_buffer_bits_.Size(), input_buffer_bits_.Size()); CHECK_LE(feature_buffer_.Size(), output_buffer_bits_.Size()); int constexpr kBlockThreads = 256; const int n_grids = static_cast<int>( common::DivRoundUp(input_buffer_bits_.Size(), kBlockThreads)); hipLaunchKernelGGL(( ClearBuffersKernel), dim3(n_grids), dim3(kBlockThreads), 0, 0, output_buffer_bits_, input_buffer_bits_); } common::Span<int32_t> FeatureInteractionConstraint::QueryNode(int32_t node_id) { if (!has_constraint_) { return {}; } CHECK_LT(node_id, s_node_constraints_.size()); ClearBuffers(); thrust::counting_iterator<int32_t> begin(0); thrust::counting_iterator<int32_t> end(result_buffer_.size()); auto p_result_buffer = result_buffer_.data(); BitField node_constraints = s_node_constraints_[node_id]; thrust::device_ptr<int32_t> const out_end = thrust::copy_if( thrust::device, begin, end, p_result_buffer, [=]__device__(int32_t pos) { bool res = node_constraints.Check(pos); return res; }); size_t const n_available = std::distance(result_buffer_.data(), out_end); return {s_result_buffer_.data(), s_result_buffer_.data() + n_available}; } __global__ void SetInputBufferKernel(common::Span<int32_t> feature_list_input, BitField result_buffer_input) { uint32_t tid = threadIdx.x + blockIdx.x * blockDim.x; if (tid < feature_list_input.size()) { result_buffer_input.Set(feature_list_input[tid]); } } __global__ void QueryFeatureListKernel(BitField node_constraints, BitField result_buffer_input, BitField result_buffer_output) { result_buffer_output |= node_constraints; result_buffer_output &= result_buffer_input; } common::Span<int32_t> FeatureInteractionConstraint::Query( common::Span<int32_t> feature_list, int32_t nid) { if (!has_constraint_ || nid == 0) { return feature_list; } ClearBuffers(); BitField node_constraints = s_node_constraints_[nid]; CHECK_EQ(input_buffer_bits_.Size(), output_buffer_bits_.Size()); int constexpr kBlockThreads = 256; const int n_grids = static_cast<int>( common::DivRoundUp(output_buffer_bits_.Size(), kBlockThreads)); hipLaunchKernelGGL(( SetInputBufferKernel), dim3(n_grids), dim3(kBlockThreads), 0, 0, feature_list, input_buffer_bits_); hipLaunchKernelGGL(( QueryFeatureListKernel), dim3(n_grids), dim3(kBlockThreads), 0, 0, node_constraints, input_buffer_bits_, output_buffer_bits_); thrust::counting_iterator<int32_t> begin(0); thrust::counting_iterator<int32_t> end(result_buffer_.size()); BitField local_result_buffer = output_buffer_bits_; thrust::device_ptr<int32_t> const out_end = thrust::copy_if( thrust::device, begin, end, result_buffer_.data(), [=]__device__(int32_t pos) { bool res = local_result_buffer.Check(pos); return res; }); size_t const n_available = std::distance(result_buffer_.data(), out_end); common::Span<int32_t> result = {s_result_buffer_.data(), s_result_buffer_.data() + n_available}; return result; } // Find interaction sets for each feature, then store all features in // those sets in a buffer. __global__ void RestoreFeatureListFromSetsKernel( BitField feature_buffer, int32_t fid, common::Span<int32_t> feature_interactions, common::Span<int32_t> feature_interactions_ptr, // of size n interaction set + 1 common::Span<int32_t> interactions_list, common::Span<int32_t> interactions_list_ptr) { auto const tid_x = threadIdx.x + blockIdx.x * blockDim.x; auto const tid_y = threadIdx.y + blockIdx.y * blockDim.y; // painful mapping: fid -> sets related to it -> features related to sets. auto const beg = interactions_list_ptr[fid]; auto const end = interactions_list_ptr[fid+1]; auto const n_sets = end - beg; if (tid_x < n_sets) { auto const set_id_pos = beg + tid_x; auto const set_id = interactions_list[set_id_pos]; auto const set_beg = feature_interactions_ptr[set_id]; auto const set_end = feature_interactions_ptr[set_id + 1]; auto const feature_pos = set_beg + tid_y; if (feature_pos < set_end) { feature_buffer.Set(feature_interactions[feature_pos]); } } } __global__ void InteractionConstraintSplitKernel(BitField feature, int32_t feature_id, BitField node, BitField left, BitField right) { auto tid = threadIdx.x + blockDim.x * blockIdx.x; if (tid > node.Size()) { return; } // enable constraints from feature node |= feature; // clear the buffer after use if (tid < feature.Size()) { feature.Clear(tid); } // enable constraints from parent left |= node; right |= node; if (tid == feature_id) { // enable the split feature, set all of them at last instead of // setting it for parent to avoid race. node.Set(feature_id); left.Set(feature_id); right.Set(feature_id); } } void FeatureInteractionConstraint::Split( int32_t node_id, int32_t feature_id, int32_t left_id, int32_t right_id) { if (!has_constraint_) { return; } CHECK_NE(node_id, left_id) << " Split node: " << node_id << " and its left child: " << left_id << " cannot be the same."; CHECK_NE(node_id, right_id) << " Split node: " << node_id << " and its left child: " << right_id << " cannot be the same."; CHECK_LT(right_id, s_node_constraints_.size()); CHECK_NE(s_node_constraints_.size(), 0); BitField node = s_node_constraints_[node_id]; BitField left = s_node_constraints_[left_id]; BitField right = s_node_constraints_[right_id]; dim3 const block3(16, 64, 1); dim3 const grid3(common::DivRoundUp(n_sets_, 16), common::DivRoundUp(s_fconstraints_.size(), 64)); hipLaunchKernelGGL(( RestoreFeatureListFromSetsKernel), dim3(grid3), dim3(block3), 0, 0, feature_buffer_, feature_id, s_fconstraints_, s_fconstraints_ptr_, s_sets_, s_sets_ptr_); int constexpr kBlockThreads = 256; const int n_grids = static_cast<int>(common::DivRoundUp(node.Size(), kBlockThreads)); hipLaunchKernelGGL(( InteractionConstraintSplitKernel), dim3(n_grids), dim3(kBlockThreads), 0, 0, feature_buffer_, feature_id, node, left, right); } } // namespace xgboost
c7779c6361ed91e48cb9beca3b97978a96e5e267.cu
/*! * Copyright 2019 XGBoost contributors */ #include <thrust/copy.h> #include <thrust/device_vector.h> #include <thrust/execution_policy.h> #include <thrust/iterator/counting_iterator.h> #include <xgboost/logging.h> #include <algorithm> #include <bitset> #include <string> #include <sstream> #include <set> #include "constraints.cuh" #include "param.h" #include "../common/span.h" #include "../common/device_helpers.cuh" namespace xgboost { BitField::value_type constexpr BitField::kValueSize; BitField::value_type constexpr BitField::kOne; size_t FeatureInteractionConstraint::Features() const { return d_sets_ptr_.size() - 1; } void FeatureInteractionConstraint::Configure( tree::TrainParam const& param, int32_t const n_features) { has_constraint_ = true; if (param.interaction_constraints.length() == 0) { has_constraint_ = false; return; } // --- Parse interaction constraints std::istringstream iss(param.interaction_constraints); dmlc::JSONReader reader(&iss); // Interaction constraints parsed from string parameter. After // parsing, this looks like {{0, 1, 2}, {2, 3 ,4}}. std::vector<std::vector<int32_t>> h_feature_constraints; try { reader.Read(&h_feature_constraints); } catch (dmlc::Error const& e) { LOG(FATAL) << "Failed to parse feature interaction constraint:\n" << param.interaction_constraints << "\n" << "With error:\n" << e.what(); } n_sets_ = h_feature_constraints.size(); size_t const n_feat_storage = BitField::ComputeStorageSize(n_features); if (n_feat_storage == 0 && n_features != 0) { LOG(FATAL) << "Wrong storage size, n_features: " << n_features; } // --- Initialize allowed features attached to nodes. if (param.max_depth == 0 && param.max_leaves == 0) { LOG(FATAL) << "Max leaves and max depth cannot both be unconstrained for gpu_hist."; } int32_t n_nodes {0}; if (param.max_depth != 0) { n_nodes = std::pow(2, param.max_depth + 1); } else { n_nodes = param.max_leaves * 2 - 1; } CHECK_NE(n_nodes, 0); node_constraints_.resize(n_nodes); node_constraints_storage_.resize(n_nodes); for (auto& n : node_constraints_storage_) { n.resize(BitField::ComputeStorageSize(n_features)); } for (size_t i = 0; i < node_constraints_storage_.size(); ++i) { auto span = dh::ToSpan(node_constraints_storage_[i]); node_constraints_[i] = BitField(span); } s_node_constraints_ = common::Span<BitField>(node_constraints_.data(), node_constraints_.size()); // Represent constraints as CSR format, flatten is the value vector, // ptr is row_ptr vector in CSR. std::vector<int32_t> h_feature_constraints_flatten; for (auto const& constraints : h_feature_constraints) { for (int32_t c : constraints) { h_feature_constraints_flatten.emplace_back(c); } } std::vector<int32_t> h_feature_constraints_ptr; size_t n_features_in_constraints = 0; h_feature_constraints_ptr.emplace_back(n_features_in_constraints); for (auto const& v : h_feature_constraints) { n_features_in_constraints += v.size(); h_feature_constraints_ptr.emplace_back(n_features_in_constraints); } // Copy the CSR to device. d_fconstraints_.resize(h_feature_constraints_flatten.size()); thrust::copy(h_feature_constraints_flatten.cbegin(), h_feature_constraints_flatten.cend(), d_fconstraints_.begin()); s_fconstraints_ = dh::ToSpan(d_fconstraints_); d_fconstraints_ptr_.resize(h_feature_constraints_ptr.size()); thrust::copy(h_feature_constraints_ptr.cbegin(), h_feature_constraints_ptr.cend(), d_fconstraints_ptr_.begin()); s_fconstraints_ptr_ = dh::ToSpan(d_fconstraints_ptr_); // --- Compute interaction sets attached to each feature. // Use a set to eliminate duplicated entries. std::vector<std::set<int32_t> > h_features_set(n_features); int32_t cid = 0; for (auto const& constraints : h_feature_constraints) { for (auto const& feat : constraints) { h_features_set.at(feat).insert(cid); } cid++; } // Compute device sets. std::vector<int32_t> h_sets; int32_t ptr = 0; std::vector<int32_t> h_sets_ptr {ptr}; for (auto const& feature : h_features_set) { for (auto constraint_id : feature) { h_sets.emplace_back(constraint_id); } // empty set is well defined here. ptr += feature.size(); h_sets_ptr.emplace_back(ptr); } d_sets_ = h_sets; d_sets_ptr_ = h_sets_ptr; s_sets_ = dh::ToSpan(d_sets_); s_sets_ptr_ = dh::ToSpan(d_sets_ptr_); d_feature_buffer_storage_.resize(BitField::ComputeStorageSize(n_features)); feature_buffer_ = dh::ToSpan(d_feature_buffer_storage_); // --- Initialize result buffers. output_buffer_bits_storage_.resize(BitField::ComputeStorageSize(n_features)); output_buffer_bits_ = BitField(dh::ToSpan(output_buffer_bits_storage_)); input_buffer_bits_storage_.resize(BitField::ComputeStorageSize(n_features)); input_buffer_bits_ = BitField(dh::ToSpan(input_buffer_bits_storage_)); result_buffer_.resize(n_features); s_result_buffer_ = dh::ToSpan(result_buffer_); } FeatureInteractionConstraint::FeatureInteractionConstraint( tree::TrainParam const& param, int32_t const n_features) : has_constraint_{true}, n_sets_{0} { this->Configure(param, n_features); } void FeatureInteractionConstraint::Reset() { for (auto& node : node_constraints_storage_) { thrust::fill(node.begin(), node.end(), 0); } } __global__ void ClearBuffersKernel( BitField result_buffer_output, BitField result_buffer_input) { auto tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < result_buffer_output.Size()) { result_buffer_output.Clear(tid); } if (tid < result_buffer_input.Size()) { result_buffer_input.Clear(tid); } } void FeatureInteractionConstraint::ClearBuffers() { CHECK_EQ(output_buffer_bits_.Size(), input_buffer_bits_.Size()); CHECK_LE(feature_buffer_.Size(), output_buffer_bits_.Size()); int constexpr kBlockThreads = 256; const int n_grids = static_cast<int>( common::DivRoundUp(input_buffer_bits_.Size(), kBlockThreads)); ClearBuffersKernel<<<n_grids, kBlockThreads>>>( output_buffer_bits_, input_buffer_bits_); } common::Span<int32_t> FeatureInteractionConstraint::QueryNode(int32_t node_id) { if (!has_constraint_) { return {}; } CHECK_LT(node_id, s_node_constraints_.size()); ClearBuffers(); thrust::counting_iterator<int32_t> begin(0); thrust::counting_iterator<int32_t> end(result_buffer_.size()); auto p_result_buffer = result_buffer_.data(); BitField node_constraints = s_node_constraints_[node_id]; thrust::device_ptr<int32_t> const out_end = thrust::copy_if( thrust::device, begin, end, p_result_buffer, [=]__device__(int32_t pos) { bool res = node_constraints.Check(pos); return res; }); size_t const n_available = std::distance(result_buffer_.data(), out_end); return {s_result_buffer_.data(), s_result_buffer_.data() + n_available}; } __global__ void SetInputBufferKernel(common::Span<int32_t> feature_list_input, BitField result_buffer_input) { uint32_t tid = threadIdx.x + blockIdx.x * blockDim.x; if (tid < feature_list_input.size()) { result_buffer_input.Set(feature_list_input[tid]); } } __global__ void QueryFeatureListKernel(BitField node_constraints, BitField result_buffer_input, BitField result_buffer_output) { result_buffer_output |= node_constraints; result_buffer_output &= result_buffer_input; } common::Span<int32_t> FeatureInteractionConstraint::Query( common::Span<int32_t> feature_list, int32_t nid) { if (!has_constraint_ || nid == 0) { return feature_list; } ClearBuffers(); BitField node_constraints = s_node_constraints_[nid]; CHECK_EQ(input_buffer_bits_.Size(), output_buffer_bits_.Size()); int constexpr kBlockThreads = 256; const int n_grids = static_cast<int>( common::DivRoundUp(output_buffer_bits_.Size(), kBlockThreads)); SetInputBufferKernel<<<n_grids, kBlockThreads>>>(feature_list, input_buffer_bits_); QueryFeatureListKernel<<<n_grids, kBlockThreads>>>( node_constraints, input_buffer_bits_, output_buffer_bits_); thrust::counting_iterator<int32_t> begin(0); thrust::counting_iterator<int32_t> end(result_buffer_.size()); BitField local_result_buffer = output_buffer_bits_; thrust::device_ptr<int32_t> const out_end = thrust::copy_if( thrust::device, begin, end, result_buffer_.data(), [=]__device__(int32_t pos) { bool res = local_result_buffer.Check(pos); return res; }); size_t const n_available = std::distance(result_buffer_.data(), out_end); common::Span<int32_t> result = {s_result_buffer_.data(), s_result_buffer_.data() + n_available}; return result; } // Find interaction sets for each feature, then store all features in // those sets in a buffer. __global__ void RestoreFeatureListFromSetsKernel( BitField feature_buffer, int32_t fid, common::Span<int32_t> feature_interactions, common::Span<int32_t> feature_interactions_ptr, // of size n interaction set + 1 common::Span<int32_t> interactions_list, common::Span<int32_t> interactions_list_ptr) { auto const tid_x = threadIdx.x + blockIdx.x * blockDim.x; auto const tid_y = threadIdx.y + blockIdx.y * blockDim.y; // painful mapping: fid -> sets related to it -> features related to sets. auto const beg = interactions_list_ptr[fid]; auto const end = interactions_list_ptr[fid+1]; auto const n_sets = end - beg; if (tid_x < n_sets) { auto const set_id_pos = beg + tid_x; auto const set_id = interactions_list[set_id_pos]; auto const set_beg = feature_interactions_ptr[set_id]; auto const set_end = feature_interactions_ptr[set_id + 1]; auto const feature_pos = set_beg + tid_y; if (feature_pos < set_end) { feature_buffer.Set(feature_interactions[feature_pos]); } } } __global__ void InteractionConstraintSplitKernel(BitField feature, int32_t feature_id, BitField node, BitField left, BitField right) { auto tid = threadIdx.x + blockDim.x * blockIdx.x; if (tid > node.Size()) { return; } // enable constraints from feature node |= feature; // clear the buffer after use if (tid < feature.Size()) { feature.Clear(tid); } // enable constraints from parent left |= node; right |= node; if (tid == feature_id) { // enable the split feature, set all of them at last instead of // setting it for parent to avoid race. node.Set(feature_id); left.Set(feature_id); right.Set(feature_id); } } void FeatureInteractionConstraint::Split( int32_t node_id, int32_t feature_id, int32_t left_id, int32_t right_id) { if (!has_constraint_) { return; } CHECK_NE(node_id, left_id) << " Split node: " << node_id << " and its left child: " << left_id << " cannot be the same."; CHECK_NE(node_id, right_id) << " Split node: " << node_id << " and its left child: " << right_id << " cannot be the same."; CHECK_LT(right_id, s_node_constraints_.size()); CHECK_NE(s_node_constraints_.size(), 0); BitField node = s_node_constraints_[node_id]; BitField left = s_node_constraints_[left_id]; BitField right = s_node_constraints_[right_id]; dim3 const block3(16, 64, 1); dim3 const grid3(common::DivRoundUp(n_sets_, 16), common::DivRoundUp(s_fconstraints_.size(), 64)); RestoreFeatureListFromSetsKernel<<<grid3, block3>>> (feature_buffer_, feature_id, s_fconstraints_, s_fconstraints_ptr_, s_sets_, s_sets_ptr_); int constexpr kBlockThreads = 256; const int n_grids = static_cast<int>(common::DivRoundUp(node.Size(), kBlockThreads)); InteractionConstraintSplitKernel<<<n_grids, kBlockThreads>>> (feature_buffer_, feature_id, node, left, right); } } // namespace xgboost
7c464a89b4a04e4631034081e5a3a44dfee1af99.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <math.h> #define PI 3.14159265358979 //#define INIC -0.7071067811865475244008 #define INIC -1.0 #define TPBXb 32 #define TPBYb 32 texture<float, hipTextureType2D, hipReadModeElementType> texSino; #define CUDA_CHECK_RETURN(value) { \ hipError_t _m_cudaStat = value; \ if (_m_cudaStat != hipSuccess) { \ fprintf(stderr, "Error %s at line %d in file %s\n", \ hipGetErrorString(_m_cudaStat), __LINE__, __FILE__); \ exit(1); \ } } __global__ void backp_kernel(float *image, int wdI, int nrays, int nangles, float delta, float dt, float dth) { int i, j, T; float t, cs1, cs2, cs3, cs4, k; float x, y; float cosk, sink; i = 2*(blockDim.x * blockIdx.x + threadIdx.x); j = 2*(blockDim.y * blockIdx.y + threadIdx.y); if ( ((i+1)<wdI) && ((j+1) < wdI) ){ cs1 = 0; cs2 = 0; cs3 = 0; cs4 = 0; for(k=0; k < nangles; k++) { sincosf(k * dth, &sink, &cosk); /////////////////////////// x = (float)INIC + i * delta; y = (float)INIC + j * delta; t = x*cosk + y*sink; T = (float)((t + 1)*dt); //if(T >= 0 && T <= (nrays-1)) cs1 = cs1 + tex2D(texSino, k + 0.5f, T + 0.5f); ////////////////////////// x = (float)INIC + (i+1) * delta; y = (float)INIC + j * delta; t = x*cosk + y*sink; T = (float)((t + 1)*dt); //if(T >= 0 && T <= (nrays-1)) cs2 = cs2 + tex2D(texSino, k + 0.5f, T + 0.5f); ////////////////////////// x = (float)INIC + i * delta; y = (float)INIC + (j+1) * delta; t = x*cosk + y*sink; T = (float)((t + 1)*dt); //if(T >= 0 && T <= (nrays-1)) cs3 = cs3 + tex2D(texSino, k + 0.5f, T + 0.5f); ////////////////////////// x = (float)INIC + (i+1) * delta; y = (float)INIC + (j+1) * delta; t = x*cosk + y*sink; T = (float)((t + 1)*dt); //if(T >= 0 && T <= (nrays-1)) cs4 = cs4 + tex2D(texSino, k + 0.5f, T + 0.5f); } image[(j)*wdI + (wdI-1-i)] = (cs1*dth); image[(j)*wdI + (wdI-1-i-1)] = (cs2*dth); image[(j+1)*wdI + (wdI-1-i)] = (cs3*dth); image[(j+1)*wdI + (wdI-1-i-1)] = (cs4*dth); } } void BackWithTexture(float *image, float *sino, int sizeImage, int nrays, int nangles){ float* d_output; int sizeSino = nrays * nangles * sizeof(float); float dt = 2.0/(nrays-1); float dth = PI/(nangles); float delta = (float) 2*fabsf(INIC)/(sizeImage-1); /////// KERNEL EXECUTION TIME TEST hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); float milliseconds = 0; hipEventRecord(start); ////////////////////////////////// // Allocate CUDA array in device memory (sinogram matrix) hipChannelFormatDesc channelDesc = hipCreateChannelDesc(32, 0, 0, 0,hipChannelFormatKindFloat); hipArray* cuArray; hipMallocArray(&cuArray, &channelDesc, nangles, nrays); // Copy to device memory the sinogram matrix hipMemcpyToArray(cuArray, 0, 0, sino, sizeSino , hipMemcpyHostToDevice); // Set texture parameters texSino.addressMode[0] = hipAddressModeBorder; texSino.addressMode[1] = hipAddressModeBorder; texSino.filterMode = hipFilterModeLinear; /*texSino.normalized = true; */ // Bind the array to the texture reference hipBindTextureToArray(texSino, cuArray, channelDesc); // Allocate GPU memory for the output image hipMalloc(&d_output, sizeof(float) * sizeImage *sizeImage); //GRID and BLOCKS SIZE dim3 threadsPerBlock(TPBXb,TPBYb); dim3 grid((sizeImage/threadsPerBlock.x)/2 + 1, (sizeImage/threadsPerBlock.y)/2 + 1); //KERNEL EXECUTION hipLaunchKernelGGL(( backp_kernel), dim3(grid), dim3(threadsPerBlock), 0, 0, d_output, sizeImage, nrays, nangles, delta, 1/dt, dth); hipDeviceSynchronize(); /////// PRINT KERNEL EXECUTION TIME hipEventRecord(stop); hipEventSynchronize(stop); hipEventElapsedTime(&milliseconds, start, stop); fprintf(stderr, "%f ms\n", milliseconds); /////////////////////////////////// //Copy the output image from device memory to host memory hipMemcpy (image , d_output , sizeImage*sizeImage*sizeof(float) , hipMemcpyDeviceToHost); hipUnbindTexture(texSino); hipFreeArray(cuArray); hipFree(d_output); hipDeviceReset(); } int main(int argc, char *argv[]) { int i, j; int sizeImage = atoi(argv[2]); int nrays = atoi(argv[3]); int nangles = atoi(argv[4]); FILE *fp=fopen(argv[1], "r"); float *image; float *sino; image = (float *)malloc(sizeImage*sizeImage*sizeof(float)); sino = (float *)malloc(nangles*nrays*sizeof(float)); for (i = 0; i < nangles*nrays; i++) fscanf(fp, "%f", &sino[i]); BackWithTexture(image, sino, sizeImage, nrays, nangles); for(i=0; i< sizeImage; i++) { for(j=0; j< sizeImage; j++) { fprintf(stdout, "%f ", image[sizeImage*(i) + j]); } fprintf(stdout, "\n"); } free(image); free(sino); fclose(fp); }
7c464a89b4a04e4631034081e5a3a44dfee1af99.cu
#include <stdio.h> #include <stdlib.h> #include <math.h> #define PI 3.14159265358979 //#define INIC -0.7071067811865475244008 #define INIC -1.0 #define TPBXb 32 #define TPBYb 32 texture<float, cudaTextureType2D, cudaReadModeElementType> texSino; #define CUDA_CHECK_RETURN(value) { \ cudaError_t _m_cudaStat = value; \ if (_m_cudaStat != cudaSuccess) { \ fprintf(stderr, "Error %s at line %d in file %s\n", \ cudaGetErrorString(_m_cudaStat), __LINE__, __FILE__); \ exit(1); \ } } __global__ void backp_kernel(float *image, int wdI, int nrays, int nangles, float delta, float dt, float dth) { int i, j, T; float t, cs1, cs2, cs3, cs4, k; float x, y; float cosk, sink; i = 2*(blockDim.x * blockIdx.x + threadIdx.x); j = 2*(blockDim.y * blockIdx.y + threadIdx.y); if ( ((i+1)<wdI) && ((j+1) < wdI) ){ cs1 = 0; cs2 = 0; cs3 = 0; cs4 = 0; for(k=0; k < nangles; k++) { sincosf(k * dth, &sink, &cosk); /////////////////////////// x = (float)INIC + i * delta; y = (float)INIC + j * delta; t = x*cosk + y*sink; T = (float)((t + 1)*dt); //if(T >= 0 && T <= (nrays-1)) cs1 = cs1 + tex2D(texSino, k + 0.5f, T + 0.5f); ////////////////////////// x = (float)INIC + (i+1) * delta; y = (float)INIC + j * delta; t = x*cosk + y*sink; T = (float)((t + 1)*dt); //if(T >= 0 && T <= (nrays-1)) cs2 = cs2 + tex2D(texSino, k + 0.5f, T + 0.5f); ////////////////////////// x = (float)INIC + i * delta; y = (float)INIC + (j+1) * delta; t = x*cosk + y*sink; T = (float)((t + 1)*dt); //if(T >= 0 && T <= (nrays-1)) cs3 = cs3 + tex2D(texSino, k + 0.5f, T + 0.5f); ////////////////////////// x = (float)INIC + (i+1) * delta; y = (float)INIC + (j+1) * delta; t = x*cosk + y*sink; T = (float)((t + 1)*dt); //if(T >= 0 && T <= (nrays-1)) cs4 = cs4 + tex2D(texSino, k + 0.5f, T + 0.5f); } image[(j)*wdI + (wdI-1-i)] = (cs1*dth); image[(j)*wdI + (wdI-1-i-1)] = (cs2*dth); image[(j+1)*wdI + (wdI-1-i)] = (cs3*dth); image[(j+1)*wdI + (wdI-1-i-1)] = (cs4*dth); } } void BackWithTexture(float *image, float *sino, int sizeImage, int nrays, int nangles){ float* d_output; int sizeSino = nrays * nangles * sizeof(float); float dt = 2.0/(nrays-1); float dth = PI/(nangles); float delta = (float) 2*fabsf(INIC)/(sizeImage-1); /////// KERNEL EXECUTION TIME TEST cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); float milliseconds = 0; cudaEventRecord(start); ////////////////////////////////// // Allocate CUDA array in device memory (sinogram matrix) cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc(32, 0, 0, 0,cudaChannelFormatKindFloat); cudaArray* cuArray; cudaMallocArray(&cuArray, &channelDesc, nangles, nrays); // Copy to device memory the sinogram matrix cudaMemcpyToArray(cuArray, 0, 0, sino, sizeSino , cudaMemcpyHostToDevice); // Set texture parameters texSino.addressMode[0] = cudaAddressModeBorder; texSino.addressMode[1] = cudaAddressModeBorder; texSino.filterMode = cudaFilterModeLinear; /*texSino.normalized = true; */ // Bind the array to the texture reference cudaBindTextureToArray(texSino, cuArray, channelDesc); // Allocate GPU memory for the output image cudaMalloc(&d_output, sizeof(float) * sizeImage *sizeImage); //GRID and BLOCKS SIZE dim3 threadsPerBlock(TPBXb,TPBYb); dim3 grid((sizeImage/threadsPerBlock.x)/2 + 1, (sizeImage/threadsPerBlock.y)/2 + 1); //KERNEL EXECUTION backp_kernel<<<grid, threadsPerBlock>>>(d_output, sizeImage, nrays, nangles, delta, 1/dt, dth); cudaDeviceSynchronize(); /////// PRINT KERNEL EXECUTION TIME cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&milliseconds, start, stop); fprintf(stderr, "%f ms\n", milliseconds); /////////////////////////////////// //Copy the output image from device memory to host memory cudaMemcpy (image , d_output , sizeImage*sizeImage*sizeof(float) , cudaMemcpyDeviceToHost); cudaUnbindTexture(texSino); cudaFreeArray(cuArray); cudaFree(d_output); cudaDeviceReset(); } int main(int argc, char *argv[]) { int i, j; int sizeImage = atoi(argv[2]); int nrays = atoi(argv[3]); int nangles = atoi(argv[4]); FILE *fp=fopen(argv[1], "r"); float *image; float *sino; image = (float *)malloc(sizeImage*sizeImage*sizeof(float)); sino = (float *)malloc(nangles*nrays*sizeof(float)); for (i = 0; i < nangles*nrays; i++) fscanf(fp, "%f", &sino[i]); BackWithTexture(image, sino, sizeImage, nrays, nangles); for(i=0; i< sizeImage; i++) { for(j=0; j< sizeImage; j++) { fprintf(stdout, "%f ", image[sizeImage*(i) + j]); } fprintf(stdout, "\n"); } free(image); free(sino); fclose(fp); }
6d9d81efa1cd44c0d5eae55dea6ed4ec6ce4c902.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 1993-2008 NVIDIA Corporation. All rights reserved. * * NOTICE TO USER: * * This source code is subject to NVIDIA ownership rights under U.S. and * international Copyright laws. Users and possessors of this source code * are hereby granted a nonexclusive, royalty-free license to use this code * in individual and commercial software. * * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE * OR PERFORMANCE OF THIS SOURCE CODE. * * U.S. Government End Users. This source code is a "commercial item" as * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of * "commercial computer software" and "commercial computer software * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) * and is provided to the U.S. Government only as a commercial end item. * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the * source code with only those rights set forth herein. * * Any use of this source code in individual and commercial software must * include, in the user documentation and internal comments to the code, * the above Disclaimer and U.S. Government End Users Notice. */ // includes, system #include <stdio.h> #include <assert.h> // Simple utility function to check for CUDA runtime errors void checkCUDAError(const char* msg); // Part3: implement the kernel __global__ void reverseArrayBlock(int *d_out, int *d_in) { int inOffset = blockDim.x * blockIdx.x; int outOffset = blockDim.x * (gridDim.x - 1 - blockIdx.x); int in = inOffset + threadIdx.x; int out = outOffset + (blockDim.x - 1 - threadIdx.x); d_out[out] = d_in[in]; } //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main( int argc, char** argv) { // pointer for host memory and size int *h_a; int dimA = 256 * 1024; // 256K elements (1MB total) // pointer for device memory int *d_b, *d_a; // define grid and block size int numThreadsPerBlock = 256; // Part 1: compute number of blocks needed based on array size and desired block size int numBlocks = dimA / numThreadsPerBlock; // allocate host and device memory size_t memSize = numBlocks * numThreadsPerBlock * sizeof(int); h_a = (int *) malloc(memSize); hipMalloc( (void **) &d_a, memSize ); hipMalloc( (void **) &d_b, memSize ); // Initialize input array on host for (int i = 0; i < dimA; ++i) { h_a[i] = i; } // Copy host array to device array hipMemcpy( d_a, h_a, memSize, hipMemcpyHostToDevice ); // launch kernel dim3 dimGrid(numBlocks); dim3 dimBlock(numThreadsPerBlock); hipLaunchKernelGGL(( reverseArrayBlock), dim3(dimGrid), dim3(dimBlock) , 0, 0, d_b, d_a ); // block until the device has completed hipDeviceSynchronize(); // check if kernel execution generated an error // Check for any CUDA errors checkCUDAError("kernel invocation"); // device to host copy hipMemcpy( h_a, d_b, memSize, hipMemcpyDeviceToHost ); // Check for any CUDA errors checkCUDAError("memcpy"); // verify the data returned to the host is correct for (int i = 0; i < dimA; i++) { assert(h_a[i] == dimA - 1 - i ); } // free device memory hipFree(d_a); hipFree(d_b); // free host memory free(h_a); // If the program makes it this far, then the results are correct and // there are no run-time errors. Good work! printf("Correct!\n"); return 0; } void checkCUDAError(const char *msg) { hipError_t err = hipGetLastError(); if( hipSuccess != err) { fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString( err) ); exit(EXIT_FAILURE); } }
6d9d81efa1cd44c0d5eae55dea6ed4ec6ce4c902.cu
/* * Copyright 1993-2008 NVIDIA Corporation. All rights reserved. * * NOTICE TO USER: * * This source code is subject to NVIDIA ownership rights under U.S. and * international Copyright laws. Users and possessors of this source code * are hereby granted a nonexclusive, royalty-free license to use this code * in individual and commercial software. * * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE * OR PERFORMANCE OF THIS SOURCE CODE. * * U.S. Government End Users. This source code is a "commercial item" as * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of * "commercial computer software" and "commercial computer software * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) * and is provided to the U.S. Government only as a commercial end item. * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the * source code with only those rights set forth herein. * * Any use of this source code in individual and commercial software must * include, in the user documentation and internal comments to the code, * the above Disclaimer and U.S. Government End Users Notice. */ // includes, system #include <stdio.h> #include <assert.h> // Simple utility function to check for CUDA runtime errors void checkCUDAError(const char* msg); // Part3: implement the kernel __global__ void reverseArrayBlock(int *d_out, int *d_in) { int inOffset = blockDim.x * blockIdx.x; int outOffset = blockDim.x * (gridDim.x - 1 - blockIdx.x); int in = inOffset + threadIdx.x; int out = outOffset + (blockDim.x - 1 - threadIdx.x); d_out[out] = d_in[in]; } //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main( int argc, char** argv) { // pointer for host memory and size int *h_a; int dimA = 256 * 1024; // 256K elements (1MB total) // pointer for device memory int *d_b, *d_a; // define grid and block size int numThreadsPerBlock = 256; // Part 1: compute number of blocks needed based on array size and desired block size int numBlocks = dimA / numThreadsPerBlock; // allocate host and device memory size_t memSize = numBlocks * numThreadsPerBlock * sizeof(int); h_a = (int *) malloc(memSize); cudaMalloc( (void **) &d_a, memSize ); cudaMalloc( (void **) &d_b, memSize ); // Initialize input array on host for (int i = 0; i < dimA; ++i) { h_a[i] = i; } // Copy host array to device array cudaMemcpy( d_a, h_a, memSize, cudaMemcpyHostToDevice ); // launch kernel dim3 dimGrid(numBlocks); dim3 dimBlock(numThreadsPerBlock); reverseArrayBlock<<< dimGrid, dimBlock >>>( d_b, d_a ); // block until the device has completed cudaThreadSynchronize(); // check if kernel execution generated an error // Check for any CUDA errors checkCUDAError("kernel invocation"); // device to host copy cudaMemcpy( h_a, d_b, memSize, cudaMemcpyDeviceToHost ); // Check for any CUDA errors checkCUDAError("memcpy"); // verify the data returned to the host is correct for (int i = 0; i < dimA; i++) { assert(h_a[i] == dimA - 1 - i ); } // free device memory cudaFree(d_a); cudaFree(d_b); // free host memory free(h_a); // If the program makes it this far, then the results are correct and // there are no run-time errors. Good work! printf("Correct!\n"); return 0; } void checkCUDAError(const char *msg) { cudaError_t err = cudaGetLastError(); if( cudaSuccess != err) { fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString( err) ); exit(EXIT_FAILURE); } }
547ad6358201c775f7c82f537efa752033f7b455.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "star3d3r-32x32-2-128_kernel.hu" __device__ double __sbref_wrap(double *sb, size_t index) { return sb[index]; } __global__ void kernel0_2(double *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 3 - 3); const AN5D_TYPE __c1Pad = (3); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 3 - 3); const AN5D_TYPE __c2Pad = (3); #define __c2 c2 const AN5D_TYPE __c3Len = (dimsize - 3 - 3); const AN5D_TYPE __c3Pad = (3); #define __c3 c3 const AN5D_TYPE __halo1 = 3; const AN5D_TYPE __halo2 = 3; const AN5D_TYPE __halo3 = 3; const AN5D_TYPE __side0Len = 2; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 20; const AN5D_TYPE __side3Len = 20; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3); const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __side3Num = (__c3Len + __side3Len - 1) / __side3Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid / __side3LenOl; const AN5D_TYPE __local_c3 = __tid % __side3LenOl; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num / __side3Num; const AN5D_TYPE __c2 = (blockIdx.x / __side3Num % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; const AN5D_TYPE __c3 = (blockIdx.x % __side3Num) * __side3Len + __local_c3 + __c3Pad - __OlLen3; double __reg_0_0; double __reg_0_1; double __reg_0_2; double __reg_0_3; double __reg_0_4; double __reg_0_5; double __reg_0_6; double __reg_1_0; double __reg_1_1; double __reg_1_2; double __reg_1_3; double __reg_1_4; double __reg_1_5; double __reg_1_6; __shared__ double __d_sb_double[__blockSize * 2]; double *__d_sb = __d_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2 && __c3 >= __c3Pad - __halo3 && __c3 < __c3Pad + __c3Len + __halo3; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len && __c3 >= __c3Pad && __c3 < __c3Pad + __c3Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1) && __local_c3 >= (__halo3 * 1) && __local_c3 < __side3LenOl - (__halo3 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2) && __local_c3 >= (__halo3 * 2) && __local_c3 < __side3LenOl - (__halo3 * 2); const AN5D_TYPE __storeValid = __writeValid2; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[(((__c0 % 2) * dimsize + __c1) * dimsize + __c2) * dimsize + __c3]; }} while (0) #define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3]) #define __REGREF(reg, i2, i3) reg #define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3) #define __CALCEXPR(__rn0, __a, __b, __c, __d, __e, __f, __g) do { __rn0 = (((((((((((((((((((0.25000f * (__REGREF(__d, 0, 0))) + (0.04276f * (__SBREF(__d_sb, 0, -3)))) + (0.04176f * (__SBREF(__d_sb, 0, -2)))) + (0.04076f * (__SBREF(__d_sb, 0, -1)))) + (0.04046f * (__SBREF(__d_sb, 0, 1)))) + (0.04146f * (__SBREF(__d_sb, 0, 2)))) + (0.04246f * (__SBREF(__d_sb, 0, 3)))) + (0.04096f * (__REGREF(__c, 0, 0)))) + (0.04066f * (__REGREF(__e, 0, 0)))) + (0.04086f * (__SBREF(__d_sb, -1, 0)))) + (0.04056f * (__SBREF(__d_sb, 1, 0)))) + (0.04196f * (__REGREF(__b, 0, 0)))) + (0.04166f * (__REGREF(__f, 0, 0)))) + (0.04186f * (__SBREF(__d_sb, -2, 0)))) + (0.04156f * (__SBREF(__d_sb, 2, 0)))) + (0.04296f * (__REGREF(__a, 0, 0)))) + (0.04266f * (__REGREF(__g, 0, 0)))) + (0.04286f * (__SBREF(__d_sb, -3, 0)))) + (0.04256f * (__SBREF(__d_sb, 3, 0)))); } while (0) #define __DB_SWITCH() do { __d_sb = &__d_sb_double[(__d_sb == __d_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c, d, e, f, g) do { __DB_SWITCH(); __d_sb[__tid] = d; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2, reg3, reg4, reg5, reg6) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4, reg5, reg6); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4, reg5, reg6); else out = reg3; } while (0) #define __STORE(h, reg0, reg1, reg2, reg3, reg4, reg5, reg6) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4, reg5, reg6); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4, reg5, reg6); } } while (0) if (__c1Id == 0) { __LOAD(__reg_1_0, 0); __LOAD(__reg_1_1, 1); __LOAD(__reg_1_2, 2); __LOAD(__reg_0_3, 3); __LOAD(__reg_0_4, 4); __LOAD(__reg_0_5, 5); __LOAD(__reg_0_6, 6); __CALC1(__reg_1_3, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6); __LOAD(__reg_0_0, 7); __CALC1(__reg_1_4, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0); __LOAD(__reg_0_1, 8); __CALC1(__reg_1_5, __reg_1_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0, __reg_0_1); __LOAD(__reg_0_2, 9); __CALC1(__reg_1_6, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0, __reg_0_1, __reg_0_2); __STORE(3, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6); __LOAD(__reg_0_3, 10); __CALC1(__reg_1_0, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __STORE(4, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_0); __LOAD(__reg_0_4, 11); __CALC1(__reg_1_1, __reg_0_5, __reg_0_6, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __STORE(5, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_0, __reg_1_1); __LOAD(__reg_0_5, 12); __CALC1(__reg_1_2, __reg_0_6, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5); __STORE(6, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_0, __reg_1_1, __reg_1_2); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __LOAD(__reg_0_3, 3); __LOAD(__reg_0_4, 4); __LOAD(__reg_0_5, 5); __LOAD(__reg_0_6, 6); __CALC1(__reg_1_3, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6); __LOAD(__reg_0_0, 7); __CALC1(__reg_1_4, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0); __LOAD(__reg_0_1, 8); __CALC1(__reg_1_5, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0, __reg_0_1); __LOAD(__reg_0_2, 9); __CALC1(__reg_1_6, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_3, 10); __CALC1(__reg_1_0, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __LOAD(__reg_0_4, 11); __CALC1(__reg_1_1, __reg_0_5, __reg_0_6, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __LOAD(__reg_0_5, 12); __CALC1(__reg_1_2, __reg_0_6, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5); __STORE(6, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_0, __reg_1_1, __reg_1_2); __DB_SWITCH(); __syncthreads(); } __d_sb = __d_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 13; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 7;) { __LOAD(__reg_0_6, __h); __CALC1(__reg_1_3, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6); __STORE(__h - 6, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_4, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0); __STORE(__h - 6, __reg_1_5, __reg_1_6, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_5, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0, __reg_0_1); __STORE(__h - 6, __reg_1_6, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_6, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0, __reg_0_1, __reg_0_2); __STORE(__h - 6, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6); __h++; __LOAD(__reg_0_3, __h); __CALC1(__reg_1_0, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __STORE(__h - 6, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_0); __h++; __LOAD(__reg_0_4, __h); __CALC1(__reg_1_1, __reg_0_5, __reg_0_6, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __STORE(__h - 6, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_0, __reg_1_1); __h++; __LOAD(__reg_0_5, __h); __CALC1(__reg_1_2, __reg_0_6, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5); __STORE(__h - 6, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_0, __reg_1_1, __reg_1_2); __h++; } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __STORE(__h - 6, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3); __STORE(__h - 5, __reg_1_5, __reg_1_6, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4); __STORE(__h - 4, __reg_1_6, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4, __reg_0_5); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_6, __h + 0); __CALC1(__reg_1_3, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6); __STORE(__h - 6, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __STORE(__h - 5, __reg_1_5, __reg_1_6, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4); __STORE(__h - 4, __reg_1_6, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_5); __STORE(__h - 3, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_5, __reg_0_6); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_6, __h + 0); __CALC1(__reg_1_3, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6); __STORE(__h - 6, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __LOAD(__reg_0_0, __h + 1); __CALC1(__reg_1_4, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0); __STORE(__h - 5, __reg_1_5, __reg_1_6, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __STORE(__h - 4, __reg_1_6, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_5); __STORE(__h - 3, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_5, __reg_0_6); __STORE(__h - 2, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_5, __reg_0_6, __reg_0_0); } else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_6, __h + 0); __CALC1(__reg_1_3, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6); __STORE(__h - 6, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __LOAD(__reg_0_0, __h + 1); __CALC1(__reg_1_4, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0); __STORE(__h - 5, __reg_1_5, __reg_1_6, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __LOAD(__reg_0_1, __h + 2); __CALC1(__reg_1_5, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0, __reg_0_1); __STORE(__h - 4, __reg_1_6, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5); __STORE(__h - 3, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_0_6); __STORE(__h - 2, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_0_6, __reg_0_0); __STORE(__h - 1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_0_6, __reg_0_0, __reg_0_1); } else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_6, __h + 0); __CALC1(__reg_1_3, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6); __STORE(__h - 6, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __LOAD(__reg_0_0, __h + 1); __CALC1(__reg_1_4, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0); __STORE(__h - 5, __reg_1_5, __reg_1_6, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __LOAD(__reg_0_1, __h + 2); __CALC1(__reg_1_5, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0, __reg_0_1); __STORE(__h - 4, __reg_1_6, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5); __LOAD(__reg_0_2, __h + 3); __CALC1(__reg_1_6, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0, __reg_0_1, __reg_0_2); __STORE(__h - 3, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6); __STORE(__h - 2, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_0_0); __STORE(__h - 1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_0_0, __reg_0_1); __STORE(__h + 0, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_0_0, __reg_0_1, __reg_0_2); } else if (__h + 5 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_6, __h + 0); __CALC1(__reg_1_3, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6); __STORE(__h - 6, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __LOAD(__reg_0_0, __h + 1); __CALC1(__reg_1_4, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0); __STORE(__h - 5, __reg_1_5, __reg_1_6, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __LOAD(__reg_0_1, __h + 2); __CALC1(__reg_1_5, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0, __reg_0_1); __STORE(__h - 4, __reg_1_6, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5); __LOAD(__reg_0_2, __h + 3); __CALC1(__reg_1_6, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0, __reg_0_1, __reg_0_2); __STORE(__h - 3, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6); __LOAD(__reg_0_3, __h + 4); __CALC1(__reg_1_0, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __STORE(__h - 2, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_0); __STORE(__h - 1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_0, __reg_0_1); __STORE(__h + 0, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_0, __reg_0_1, __reg_0_2); __STORE(__h + 1, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_0, __reg_0_1, __reg_0_2, __reg_0_3); } else if (__h + 6 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_6, __h + 0); __CALC1(__reg_1_3, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6); __STORE(__h - 6, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __LOAD(__reg_0_0, __h + 1); __CALC1(__reg_1_4, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0); __STORE(__h - 5, __reg_1_5, __reg_1_6, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __LOAD(__reg_0_1, __h + 2); __CALC1(__reg_1_5, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0, __reg_0_1); __STORE(__h - 4, __reg_1_6, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5); __LOAD(__reg_0_2, __h + 3); __CALC1(__reg_1_6, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0, __reg_0_1, __reg_0_2); __STORE(__h - 3, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6); __LOAD(__reg_0_3, __h + 4); __CALC1(__reg_1_0, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __STORE(__h - 2, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_0); __LOAD(__reg_0_4, __h + 5); __CALC1(__reg_1_1, __reg_0_5, __reg_0_6, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __STORE(__h - 1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_0, __reg_1_1); __STORE(__h + 0, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_0, __reg_1_1, __reg_0_2); __STORE(__h + 1, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3); __STORE(__h + 2, __reg_1_5, __reg_1_6, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3, __reg_0_4); } } else { for (__h = 13; __h <= __side1LenOl - 7;) { __LOAD(__reg_0_6, __h); __CALC1(__reg_1_3, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6); __STORE(__h - 6, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_4, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0); __STORE(__h - 6, __reg_1_5, __reg_1_6, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_5, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0, __reg_0_1); __STORE(__h - 6, __reg_1_6, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_6, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0, __reg_0_1, __reg_0_2); __STORE(__h - 6, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6); __h++; __LOAD(__reg_0_3, __h); __CALC1(__reg_1_0, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __STORE(__h - 6, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_0); __h++; __LOAD(__reg_0_4, __h); __CALC1(__reg_1_1, __reg_0_5, __reg_0_6, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __STORE(__h - 6, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_0, __reg_1_1); __h++; __LOAD(__reg_0_5, __h); __CALC1(__reg_1_2, __reg_0_6, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5); __STORE(__h - 6, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_0, __reg_1_1, __reg_1_2); __h++; } if (__h == __side1LenOl) return; __LOAD(__reg_0_6, __h); __CALC1(__reg_1_3, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6); __STORE(__h - 6, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_4, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0); __STORE(__h - 6, __reg_1_5, __reg_1_6, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_5, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0, __reg_0_1); __STORE(__h - 6, __reg_1_6, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_6, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0, __reg_0_1, __reg_0_2); __STORE(__h - 6, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_3, __h); __CALC1(__reg_1_0, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __STORE(__h - 6, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_4, __h); __CALC1(__reg_1_1, __reg_0_5, __reg_0_6, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __STORE(__h - 6, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_0, __reg_1_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_5, __h); __CALC1(__reg_1_2, __reg_0_6, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5); __STORE(__h - 6, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_0, __reg_1_1, __reg_1_2); __h++; } } __global__ void kernel0_1(double *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 3 - 3); const AN5D_TYPE __c1Pad = (3); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 3 - 3); const AN5D_TYPE __c2Pad = (3); #define __c2 c2 const AN5D_TYPE __c3Len = (dimsize - 3 - 3); const AN5D_TYPE __c3Pad = (3); #define __c3 c3 const AN5D_TYPE __halo1 = 3; const AN5D_TYPE __halo2 = 3; const AN5D_TYPE __halo3 = 3; const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 26; const AN5D_TYPE __side3Len = 26; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3); const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __side3Num = (__c3Len + __side3Len - 1) / __side3Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid / __side3LenOl; const AN5D_TYPE __local_c3 = __tid % __side3LenOl; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num / __side3Num; const AN5D_TYPE __c2 = (blockIdx.x / __side3Num % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; const AN5D_TYPE __c3 = (blockIdx.x % __side3Num) * __side3Len + __local_c3 + __c3Pad - __OlLen3; double __reg_0_0; double __reg_0_1; double __reg_0_2; double __reg_0_3; double __reg_0_4; double __reg_0_5; double __reg_0_6; __shared__ double __d_sb_double[__blockSize * 2]; double *__d_sb = __d_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2 && __c3 >= __c3Pad - __halo3 && __c3 < __c3Pad + __c3Len + __halo3; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len && __c3 >= __c3Pad && __c3 < __c3Pad + __c3Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1) && __local_c3 >= (__halo3 * 1) && __local_c3 < __side3LenOl - (__halo3 * 1); const AN5D_TYPE __storeValid = __writeValid1; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[(((__c0 % 2) * dimsize + __c1) * dimsize + __c2) * dimsize + __c3]; }} while (0) #define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3]) #define __REGREF(reg, i2, i3) reg #define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3) #define __CALCEXPR(__rn0, __a, __b, __c, __d, __e, __f, __g) do { __rn0 = (((((((((((((((((((0.25000f * (__REGREF(__d, 0, 0))) + (0.04276f * (__SBREF(__d_sb, 0, -3)))) + (0.04176f * (__SBREF(__d_sb, 0, -2)))) + (0.04076f * (__SBREF(__d_sb, 0, -1)))) + (0.04046f * (__SBREF(__d_sb, 0, 1)))) + (0.04146f * (__SBREF(__d_sb, 0, 2)))) + (0.04246f * (__SBREF(__d_sb, 0, 3)))) + (0.04096f * (__REGREF(__c, 0, 0)))) + (0.04066f * (__REGREF(__e, 0, 0)))) + (0.04086f * (__SBREF(__d_sb, -1, 0)))) + (0.04056f * (__SBREF(__d_sb, 1, 0)))) + (0.04196f * (__REGREF(__b, 0, 0)))) + (0.04166f * (__REGREF(__f, 0, 0)))) + (0.04186f * (__SBREF(__d_sb, -2, 0)))) + (0.04156f * (__SBREF(__d_sb, 2, 0)))) + (0.04296f * (__REGREF(__a, 0, 0)))) + (0.04266f * (__REGREF(__g, 0, 0)))) + (0.04286f * (__SBREF(__d_sb, -3, 0)))) + (0.04256f * (__SBREF(__d_sb, 3, 0)))); } while (0) #define __DB_SWITCH() do { __d_sb = &__d_sb_double[(__d_sb == __d_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c, d, e, f, g) do { __DB_SWITCH(); __d_sb[__tid] = d; __syncthreads(); } while (0) #define __STORE(h, reg0, reg1, reg2, reg3, reg4, reg5, reg6) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4, reg5, reg6); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4, reg5, reg6); } } while (0) if (__c1Id == 0) { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __LOAD(__reg_0_3, 3); __LOAD(__reg_0_4, 4); __LOAD(__reg_0_5, 5); __LOAD(__reg_0_6, 6); __STORE(3, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __LOAD(__reg_0_3, 3); __LOAD(__reg_0_4, 4); __LOAD(__reg_0_5, 5); __LOAD(__reg_0_6, 6); __STORE(3, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6); } __d_sb = __d_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 7; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 7;) { __LOAD(__reg_0_0, __h); __STORE(__h - 3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0); __h++; __LOAD(__reg_0_1, __h); __STORE(__h - 3, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0, __reg_0_1); __h++; __LOAD(__reg_0_2, __h); __STORE(__h - 3, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0, __reg_0_1, __reg_0_2); __h++; __LOAD(__reg_0_3, __h); __STORE(__h - 3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __h++; __LOAD(__reg_0_4, __h); __STORE(__h - 3, __reg_0_5, __reg_0_6, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __h++; __LOAD(__reg_0_5, __h); __STORE(__h - 3, __reg_0_6, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5); __h++; __LOAD(__reg_0_6, __h); __STORE(__h - 3, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __STORE(__h - 3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __STORE(__h - 3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0); __LOAD(__reg_0_1, __h + 1); __STORE(__h - 2, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0, __reg_0_1); } else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __STORE(__h - 3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0); __LOAD(__reg_0_1, __h + 1); __STORE(__h - 2, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0, __reg_0_1); __LOAD(__reg_0_2, __h + 2); __STORE(__h - 1, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0, __reg_0_1, __reg_0_2); } else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __STORE(__h - 3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0); __LOAD(__reg_0_1, __h + 1); __STORE(__h - 2, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0, __reg_0_1); __LOAD(__reg_0_2, __h + 2); __STORE(__h - 1, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_3, __h + 3); __STORE(__h + 0, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); } else if (__h + 5 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __STORE(__h - 3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0); __LOAD(__reg_0_1, __h + 1); __STORE(__h - 2, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0, __reg_0_1); __LOAD(__reg_0_2, __h + 2); __STORE(__h - 1, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_3, __h + 3); __STORE(__h + 0, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __LOAD(__reg_0_4, __h + 4); __STORE(__h + 1, __reg_0_5, __reg_0_6, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); } else if (__h + 6 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __STORE(__h - 3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0); __LOAD(__reg_0_1, __h + 1); __STORE(__h - 2, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0, __reg_0_1); __LOAD(__reg_0_2, __h + 2); __STORE(__h - 1, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_3, __h + 3); __STORE(__h + 0, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __LOAD(__reg_0_4, __h + 4); __STORE(__h + 1, __reg_0_5, __reg_0_6, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __LOAD(__reg_0_5, __h + 5); __STORE(__h + 2, __reg_0_6, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5); } } else { for (__h = 7; __h <= __side1LenOl - 7;) { __LOAD(__reg_0_0, __h); __STORE(__h - 3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0); __h++; __LOAD(__reg_0_1, __h); __STORE(__h - 3, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0, __reg_0_1); __h++; __LOAD(__reg_0_2, __h); __STORE(__h - 3, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0, __reg_0_1, __reg_0_2); __h++; __LOAD(__reg_0_3, __h); __STORE(__h - 3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __h++; __LOAD(__reg_0_4, __h); __STORE(__h - 3, __reg_0_5, __reg_0_6, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __h++; __LOAD(__reg_0_5, __h); __STORE(__h - 3, __reg_0_6, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5); __h++; __LOAD(__reg_0_6, __h); __STORE(__h - 3, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __STORE(__h - 3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __STORE(__h - 3, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0, __reg_0_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __STORE(__h - 3, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0, __reg_0_1, __reg_0_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_3, __h); __STORE(__h - 3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_4, __h); __STORE(__h - 3, __reg_0_5, __reg_0_6, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_5, __h); __STORE(__h - 3, __reg_0_6, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_6, __h); __STORE(__h - 3, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6); __h++; } }
547ad6358201c775f7c82f537efa752033f7b455.cu
#include "star3d3r-32x32-2-128_kernel.hu" __device__ double __sbref_wrap(double *sb, size_t index) { return sb[index]; } __global__ void kernel0_2(double *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 3 - 3); const AN5D_TYPE __c1Pad = (3); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 3 - 3); const AN5D_TYPE __c2Pad = (3); #define __c2 c2 const AN5D_TYPE __c3Len = (dimsize - 3 - 3); const AN5D_TYPE __c3Pad = (3); #define __c3 c3 const AN5D_TYPE __halo1 = 3; const AN5D_TYPE __halo2 = 3; const AN5D_TYPE __halo3 = 3; const AN5D_TYPE __side0Len = 2; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 20; const AN5D_TYPE __side3Len = 20; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3); const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __side3Num = (__c3Len + __side3Len - 1) / __side3Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid / __side3LenOl; const AN5D_TYPE __local_c3 = __tid % __side3LenOl; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num / __side3Num; const AN5D_TYPE __c2 = (blockIdx.x / __side3Num % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; const AN5D_TYPE __c3 = (blockIdx.x % __side3Num) * __side3Len + __local_c3 + __c3Pad - __OlLen3; double __reg_0_0; double __reg_0_1; double __reg_0_2; double __reg_0_3; double __reg_0_4; double __reg_0_5; double __reg_0_6; double __reg_1_0; double __reg_1_1; double __reg_1_2; double __reg_1_3; double __reg_1_4; double __reg_1_5; double __reg_1_6; __shared__ double __d_sb_double[__blockSize * 2]; double *__d_sb = __d_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2 && __c3 >= __c3Pad - __halo3 && __c3 < __c3Pad + __c3Len + __halo3; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len && __c3 >= __c3Pad && __c3 < __c3Pad + __c3Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1) && __local_c3 >= (__halo3 * 1) && __local_c3 < __side3LenOl - (__halo3 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2) && __local_c3 >= (__halo3 * 2) && __local_c3 < __side3LenOl - (__halo3 * 2); const AN5D_TYPE __storeValid = __writeValid2; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[(((__c0 % 2) * dimsize + __c1) * dimsize + __c2) * dimsize + __c3]; }} while (0) #define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3]) #define __REGREF(reg, i2, i3) reg #define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3) #define __CALCEXPR(__rn0, __a, __b, __c, __d, __e, __f, __g) do { __rn0 = (((((((((((((((((((0.25000f * (__REGREF(__d, 0, 0))) + (0.04276f * (__SBREF(__d_sb, 0, -3)))) + (0.04176f * (__SBREF(__d_sb, 0, -2)))) + (0.04076f * (__SBREF(__d_sb, 0, -1)))) + (0.04046f * (__SBREF(__d_sb, 0, 1)))) + (0.04146f * (__SBREF(__d_sb, 0, 2)))) + (0.04246f * (__SBREF(__d_sb, 0, 3)))) + (0.04096f * (__REGREF(__c, 0, 0)))) + (0.04066f * (__REGREF(__e, 0, 0)))) + (0.04086f * (__SBREF(__d_sb, -1, 0)))) + (0.04056f * (__SBREF(__d_sb, 1, 0)))) + (0.04196f * (__REGREF(__b, 0, 0)))) + (0.04166f * (__REGREF(__f, 0, 0)))) + (0.04186f * (__SBREF(__d_sb, -2, 0)))) + (0.04156f * (__SBREF(__d_sb, 2, 0)))) + (0.04296f * (__REGREF(__a, 0, 0)))) + (0.04266f * (__REGREF(__g, 0, 0)))) + (0.04286f * (__SBREF(__d_sb, -3, 0)))) + (0.04256f * (__SBREF(__d_sb, 3, 0)))); } while (0) #define __DB_SWITCH() do { __d_sb = &__d_sb_double[(__d_sb == __d_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c, d, e, f, g) do { __DB_SWITCH(); __d_sb[__tid] = d; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2, reg3, reg4, reg5, reg6) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4, reg5, reg6); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4, reg5, reg6); else out = reg3; } while (0) #define __STORE(h, reg0, reg1, reg2, reg3, reg4, reg5, reg6) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4, reg5, reg6); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4, reg5, reg6); } } while (0) if (__c1Id == 0) { __LOAD(__reg_1_0, 0); __LOAD(__reg_1_1, 1); __LOAD(__reg_1_2, 2); __LOAD(__reg_0_3, 3); __LOAD(__reg_0_4, 4); __LOAD(__reg_0_5, 5); __LOAD(__reg_0_6, 6); __CALC1(__reg_1_3, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6); __LOAD(__reg_0_0, 7); __CALC1(__reg_1_4, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0); __LOAD(__reg_0_1, 8); __CALC1(__reg_1_5, __reg_1_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0, __reg_0_1); __LOAD(__reg_0_2, 9); __CALC1(__reg_1_6, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0, __reg_0_1, __reg_0_2); __STORE(3, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6); __LOAD(__reg_0_3, 10); __CALC1(__reg_1_0, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __STORE(4, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_0); __LOAD(__reg_0_4, 11); __CALC1(__reg_1_1, __reg_0_5, __reg_0_6, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __STORE(5, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_0, __reg_1_1); __LOAD(__reg_0_5, 12); __CALC1(__reg_1_2, __reg_0_6, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5); __STORE(6, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_0, __reg_1_1, __reg_1_2); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __LOAD(__reg_0_3, 3); __LOAD(__reg_0_4, 4); __LOAD(__reg_0_5, 5); __LOAD(__reg_0_6, 6); __CALC1(__reg_1_3, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6); __LOAD(__reg_0_0, 7); __CALC1(__reg_1_4, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0); __LOAD(__reg_0_1, 8); __CALC1(__reg_1_5, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0, __reg_0_1); __LOAD(__reg_0_2, 9); __CALC1(__reg_1_6, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_3, 10); __CALC1(__reg_1_0, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __LOAD(__reg_0_4, 11); __CALC1(__reg_1_1, __reg_0_5, __reg_0_6, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __LOAD(__reg_0_5, 12); __CALC1(__reg_1_2, __reg_0_6, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5); __STORE(6, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_0, __reg_1_1, __reg_1_2); __DB_SWITCH(); __syncthreads(); } __d_sb = __d_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 13; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 7;) { __LOAD(__reg_0_6, __h); __CALC1(__reg_1_3, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6); __STORE(__h - 6, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_4, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0); __STORE(__h - 6, __reg_1_5, __reg_1_6, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_5, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0, __reg_0_1); __STORE(__h - 6, __reg_1_6, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_6, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0, __reg_0_1, __reg_0_2); __STORE(__h - 6, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6); __h++; __LOAD(__reg_0_3, __h); __CALC1(__reg_1_0, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __STORE(__h - 6, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_0); __h++; __LOAD(__reg_0_4, __h); __CALC1(__reg_1_1, __reg_0_5, __reg_0_6, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __STORE(__h - 6, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_0, __reg_1_1); __h++; __LOAD(__reg_0_5, __h); __CALC1(__reg_1_2, __reg_0_6, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5); __STORE(__h - 6, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_0, __reg_1_1, __reg_1_2); __h++; } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __STORE(__h - 6, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3); __STORE(__h - 5, __reg_1_5, __reg_1_6, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4); __STORE(__h - 4, __reg_1_6, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4, __reg_0_5); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_6, __h + 0); __CALC1(__reg_1_3, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6); __STORE(__h - 6, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __STORE(__h - 5, __reg_1_5, __reg_1_6, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4); __STORE(__h - 4, __reg_1_6, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_5); __STORE(__h - 3, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_5, __reg_0_6); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_6, __h + 0); __CALC1(__reg_1_3, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6); __STORE(__h - 6, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __LOAD(__reg_0_0, __h + 1); __CALC1(__reg_1_4, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0); __STORE(__h - 5, __reg_1_5, __reg_1_6, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __STORE(__h - 4, __reg_1_6, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_5); __STORE(__h - 3, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_5, __reg_0_6); __STORE(__h - 2, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_5, __reg_0_6, __reg_0_0); } else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_6, __h + 0); __CALC1(__reg_1_3, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6); __STORE(__h - 6, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __LOAD(__reg_0_0, __h + 1); __CALC1(__reg_1_4, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0); __STORE(__h - 5, __reg_1_5, __reg_1_6, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __LOAD(__reg_0_1, __h + 2); __CALC1(__reg_1_5, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0, __reg_0_1); __STORE(__h - 4, __reg_1_6, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5); __STORE(__h - 3, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_0_6); __STORE(__h - 2, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_0_6, __reg_0_0); __STORE(__h - 1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_0_6, __reg_0_0, __reg_0_1); } else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_6, __h + 0); __CALC1(__reg_1_3, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6); __STORE(__h - 6, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __LOAD(__reg_0_0, __h + 1); __CALC1(__reg_1_4, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0); __STORE(__h - 5, __reg_1_5, __reg_1_6, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __LOAD(__reg_0_1, __h + 2); __CALC1(__reg_1_5, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0, __reg_0_1); __STORE(__h - 4, __reg_1_6, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5); __LOAD(__reg_0_2, __h + 3); __CALC1(__reg_1_6, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0, __reg_0_1, __reg_0_2); __STORE(__h - 3, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6); __STORE(__h - 2, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_0_0); __STORE(__h - 1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_0_0, __reg_0_1); __STORE(__h + 0, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_0_0, __reg_0_1, __reg_0_2); } else if (__h + 5 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_6, __h + 0); __CALC1(__reg_1_3, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6); __STORE(__h - 6, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __LOAD(__reg_0_0, __h + 1); __CALC1(__reg_1_4, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0); __STORE(__h - 5, __reg_1_5, __reg_1_6, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __LOAD(__reg_0_1, __h + 2); __CALC1(__reg_1_5, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0, __reg_0_1); __STORE(__h - 4, __reg_1_6, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5); __LOAD(__reg_0_2, __h + 3); __CALC1(__reg_1_6, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0, __reg_0_1, __reg_0_2); __STORE(__h - 3, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6); __LOAD(__reg_0_3, __h + 4); __CALC1(__reg_1_0, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __STORE(__h - 2, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_0); __STORE(__h - 1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_0, __reg_0_1); __STORE(__h + 0, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_0, __reg_0_1, __reg_0_2); __STORE(__h + 1, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_0, __reg_0_1, __reg_0_2, __reg_0_3); } else if (__h + 6 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_6, __h + 0); __CALC1(__reg_1_3, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6); __STORE(__h - 6, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __LOAD(__reg_0_0, __h + 1); __CALC1(__reg_1_4, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0); __STORE(__h - 5, __reg_1_5, __reg_1_6, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __LOAD(__reg_0_1, __h + 2); __CALC1(__reg_1_5, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0, __reg_0_1); __STORE(__h - 4, __reg_1_6, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5); __LOAD(__reg_0_2, __h + 3); __CALC1(__reg_1_6, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0, __reg_0_1, __reg_0_2); __STORE(__h - 3, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6); __LOAD(__reg_0_3, __h + 4); __CALC1(__reg_1_0, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __STORE(__h - 2, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_0); __LOAD(__reg_0_4, __h + 5); __CALC1(__reg_1_1, __reg_0_5, __reg_0_6, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __STORE(__h - 1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_0, __reg_1_1); __STORE(__h + 0, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_0, __reg_1_1, __reg_0_2); __STORE(__h + 1, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3); __STORE(__h + 2, __reg_1_5, __reg_1_6, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3, __reg_0_4); } } else { for (__h = 13; __h <= __side1LenOl - 7;) { __LOAD(__reg_0_6, __h); __CALC1(__reg_1_3, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6); __STORE(__h - 6, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_4, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0); __STORE(__h - 6, __reg_1_5, __reg_1_6, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_5, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0, __reg_0_1); __STORE(__h - 6, __reg_1_6, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_6, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0, __reg_0_1, __reg_0_2); __STORE(__h - 6, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6); __h++; __LOAD(__reg_0_3, __h); __CALC1(__reg_1_0, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __STORE(__h - 6, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_0); __h++; __LOAD(__reg_0_4, __h); __CALC1(__reg_1_1, __reg_0_5, __reg_0_6, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __STORE(__h - 6, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_0, __reg_1_1); __h++; __LOAD(__reg_0_5, __h); __CALC1(__reg_1_2, __reg_0_6, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5); __STORE(__h - 6, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_0, __reg_1_1, __reg_1_2); __h++; } if (__h == __side1LenOl) return; __LOAD(__reg_0_6, __h); __CALC1(__reg_1_3, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6); __STORE(__h - 6, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_4, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0); __STORE(__h - 6, __reg_1_5, __reg_1_6, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_5, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0, __reg_0_1); __STORE(__h - 6, __reg_1_6, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_6, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0, __reg_0_1, __reg_0_2); __STORE(__h - 6, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_3, __h); __CALC1(__reg_1_0, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __STORE(__h - 6, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_4, __h); __CALC1(__reg_1_1, __reg_0_5, __reg_0_6, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __STORE(__h - 6, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_0, __reg_1_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_5, __h); __CALC1(__reg_1_2, __reg_0_6, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5); __STORE(__h - 6, __reg_1_3, __reg_1_4, __reg_1_5, __reg_1_6, __reg_1_0, __reg_1_1, __reg_1_2); __h++; } } __global__ void kernel0_1(double *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 3 - 3); const AN5D_TYPE __c1Pad = (3); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 3 - 3); const AN5D_TYPE __c2Pad = (3); #define __c2 c2 const AN5D_TYPE __c3Len = (dimsize - 3 - 3); const AN5D_TYPE __c3Pad = (3); #define __c3 c3 const AN5D_TYPE __halo1 = 3; const AN5D_TYPE __halo2 = 3; const AN5D_TYPE __halo3 = 3; const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 26; const AN5D_TYPE __side3Len = 26; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3); const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __side3Num = (__c3Len + __side3Len - 1) / __side3Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid / __side3LenOl; const AN5D_TYPE __local_c3 = __tid % __side3LenOl; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num / __side3Num; const AN5D_TYPE __c2 = (blockIdx.x / __side3Num % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; const AN5D_TYPE __c3 = (blockIdx.x % __side3Num) * __side3Len + __local_c3 + __c3Pad - __OlLen3; double __reg_0_0; double __reg_0_1; double __reg_0_2; double __reg_0_3; double __reg_0_4; double __reg_0_5; double __reg_0_6; __shared__ double __d_sb_double[__blockSize * 2]; double *__d_sb = __d_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2 && __c3 >= __c3Pad - __halo3 && __c3 < __c3Pad + __c3Len + __halo3; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len && __c3 >= __c3Pad && __c3 < __c3Pad + __c3Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1) && __local_c3 >= (__halo3 * 1) && __local_c3 < __side3LenOl - (__halo3 * 1); const AN5D_TYPE __storeValid = __writeValid1; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[(((__c0 % 2) * dimsize + __c1) * dimsize + __c2) * dimsize + __c3]; }} while (0) #define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3]) #define __REGREF(reg, i2, i3) reg #define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3) #define __CALCEXPR(__rn0, __a, __b, __c, __d, __e, __f, __g) do { __rn0 = (((((((((((((((((((0.25000f * (__REGREF(__d, 0, 0))) + (0.04276f * (__SBREF(__d_sb, 0, -3)))) + (0.04176f * (__SBREF(__d_sb, 0, -2)))) + (0.04076f * (__SBREF(__d_sb, 0, -1)))) + (0.04046f * (__SBREF(__d_sb, 0, 1)))) + (0.04146f * (__SBREF(__d_sb, 0, 2)))) + (0.04246f * (__SBREF(__d_sb, 0, 3)))) + (0.04096f * (__REGREF(__c, 0, 0)))) + (0.04066f * (__REGREF(__e, 0, 0)))) + (0.04086f * (__SBREF(__d_sb, -1, 0)))) + (0.04056f * (__SBREF(__d_sb, 1, 0)))) + (0.04196f * (__REGREF(__b, 0, 0)))) + (0.04166f * (__REGREF(__f, 0, 0)))) + (0.04186f * (__SBREF(__d_sb, -2, 0)))) + (0.04156f * (__SBREF(__d_sb, 2, 0)))) + (0.04296f * (__REGREF(__a, 0, 0)))) + (0.04266f * (__REGREF(__g, 0, 0)))) + (0.04286f * (__SBREF(__d_sb, -3, 0)))) + (0.04256f * (__SBREF(__d_sb, 3, 0)))); } while (0) #define __DB_SWITCH() do { __d_sb = &__d_sb_double[(__d_sb == __d_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c, d, e, f, g) do { __DB_SWITCH(); __d_sb[__tid] = d; __syncthreads(); } while (0) #define __STORE(h, reg0, reg1, reg2, reg3, reg4, reg5, reg6) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4, reg5, reg6); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4, reg5, reg6); } } while (0) if (__c1Id == 0) { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __LOAD(__reg_0_3, 3); __LOAD(__reg_0_4, 4); __LOAD(__reg_0_5, 5); __LOAD(__reg_0_6, 6); __STORE(3, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __LOAD(__reg_0_3, 3); __LOAD(__reg_0_4, 4); __LOAD(__reg_0_5, 5); __LOAD(__reg_0_6, 6); __STORE(3, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6); } __d_sb = __d_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 7; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 7;) { __LOAD(__reg_0_0, __h); __STORE(__h - 3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0); __h++; __LOAD(__reg_0_1, __h); __STORE(__h - 3, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0, __reg_0_1); __h++; __LOAD(__reg_0_2, __h); __STORE(__h - 3, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0, __reg_0_1, __reg_0_2); __h++; __LOAD(__reg_0_3, __h); __STORE(__h - 3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __h++; __LOAD(__reg_0_4, __h); __STORE(__h - 3, __reg_0_5, __reg_0_6, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __h++; __LOAD(__reg_0_5, __h); __STORE(__h - 3, __reg_0_6, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5); __h++; __LOAD(__reg_0_6, __h); __STORE(__h - 3, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __STORE(__h - 3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __STORE(__h - 3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0); __LOAD(__reg_0_1, __h + 1); __STORE(__h - 2, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0, __reg_0_1); } else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __STORE(__h - 3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0); __LOAD(__reg_0_1, __h + 1); __STORE(__h - 2, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0, __reg_0_1); __LOAD(__reg_0_2, __h + 2); __STORE(__h - 1, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0, __reg_0_1, __reg_0_2); } else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __STORE(__h - 3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0); __LOAD(__reg_0_1, __h + 1); __STORE(__h - 2, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0, __reg_0_1); __LOAD(__reg_0_2, __h + 2); __STORE(__h - 1, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_3, __h + 3); __STORE(__h + 0, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); } else if (__h + 5 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __STORE(__h - 3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0); __LOAD(__reg_0_1, __h + 1); __STORE(__h - 2, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0, __reg_0_1); __LOAD(__reg_0_2, __h + 2); __STORE(__h - 1, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_3, __h + 3); __STORE(__h + 0, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __LOAD(__reg_0_4, __h + 4); __STORE(__h + 1, __reg_0_5, __reg_0_6, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); } else if (__h + 6 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __STORE(__h - 3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0); __LOAD(__reg_0_1, __h + 1); __STORE(__h - 2, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0, __reg_0_1); __LOAD(__reg_0_2, __h + 2); __STORE(__h - 1, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_3, __h + 3); __STORE(__h + 0, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __LOAD(__reg_0_4, __h + 4); __STORE(__h + 1, __reg_0_5, __reg_0_6, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __LOAD(__reg_0_5, __h + 5); __STORE(__h + 2, __reg_0_6, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5); } } else { for (__h = 7; __h <= __side1LenOl - 7;) { __LOAD(__reg_0_0, __h); __STORE(__h - 3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0); __h++; __LOAD(__reg_0_1, __h); __STORE(__h - 3, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0, __reg_0_1); __h++; __LOAD(__reg_0_2, __h); __STORE(__h - 3, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0, __reg_0_1, __reg_0_2); __h++; __LOAD(__reg_0_3, __h); __STORE(__h - 3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __h++; __LOAD(__reg_0_4, __h); __STORE(__h - 3, __reg_0_5, __reg_0_6, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __h++; __LOAD(__reg_0_5, __h); __STORE(__h - 3, __reg_0_6, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5); __h++; __LOAD(__reg_0_6, __h); __STORE(__h - 3, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __STORE(__h - 3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __STORE(__h - 3, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0, __reg_0_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __STORE(__h - 3, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0, __reg_0_1, __reg_0_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_3, __h); __STORE(__h - 3, __reg_0_4, __reg_0_5, __reg_0_6, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_4, __h); __STORE(__h - 3, __reg_0_5, __reg_0_6, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_5, __h); __STORE(__h - 3, __reg_0_6, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_6, __h); __STORE(__h - 3, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_5, __reg_0_6); __h++; } }
cef22a57b8a51293ca52112e52eab683c571d1d0.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <stdio.h> #include <stdlib.h> __global__ void what_is_my_id(unsigned int * const block, unsigned int * const thread, unsigned int * const warp, unsigned int * const calc_thread) { // Thread_ID is block_index * block_size + thread_index inside this block const unsigned int idx = (blockIdx.x * blockDim.x) + threadIdx.x; block[idx] = blockIdx.x; thread[idx] = threadIdx.x; warp[idx] = threadIdx.x / warpSize; // Use build in variable warpSize=32 to calculate actual warp calc_thread[idx] = idx; } #define ARRAY_SIZE 128 #define ARRAY_SIZE_IN_BYTES (sizeof(unsigned int) * (ARRAY_SIZE)) unsigned int cpu_block[ARRAY_SIZE]; unsigned int cpu_thread[ARRAY_SIZE]; unsigned int cpu_warp[ARRAY_SIZE]; unsigned int cpu_calc_thread[ARRAY_SIZE]; int main(void){ // Total threads: 64 * 2 = 128 const unsigned int num_blocks = 2; const unsigned int num_threads = 64; // Declare pointers for GPU based params unsigned int * gpu_block; unsigned int * gpu_thread; unsigned int * gpu_warp; unsigned int * gpu_calc_thread; // Declaration of loop iterator unsigned int i; // Allocate four arrays on GPU hipMalloc((void **)&gpu_block, ARRAY_SIZE_IN_BYTES); hipMalloc((void **)&gpu_thread, ARRAY_SIZE_IN_BYTES); hipMalloc((void **)&gpu_warp, ARRAY_SIZE_IN_BYTES); hipMalloc((void **)&gpu_calc_thread, ARRAY_SIZE_IN_BYTES); // Execute kernel hipLaunchKernelGGL(( what_is_my_id), dim3(num_blocks), dim3(num_threads), 0, 0, gpu_block, gpu_thread, gpu_warp, gpu_calc_thread); hipMemcpy(cpu_block, gpu_block, ARRAY_SIZE_IN_BYTES, hipMemcpyDeviceToHost); hipMemcpy(cpu_thread, gpu_thread, ARRAY_SIZE_IN_BYTES, hipMemcpyDeviceToHost); hipMemcpy(cpu_warp, gpu_warp, ARRAY_SIZE_IN_BYTES, hipMemcpyDeviceToHost); hipMemcpy(cpu_calc_thread, gpu_calc_thread, ARRAY_SIZE_IN_BYTES, hipMemcpyDeviceToHost); hipFree(gpu_block); hipFree(gpu_thread); hipFree(gpu_warp); hipFree(gpu_calc_thread); for(i=0;i<ARRAY_SIZE;i++){ printf("Calculcated thread: %3u | Block: %3u | Warp: %3u | Thread: %3u\n", cpu_calc_thread[i], cpu_block[i], cpu_warp[i], cpu_thread[i]); } }
cef22a57b8a51293ca52112e52eab683c571d1d0.cu
#include <cuda.h> #include <stdio.h> #include <stdlib.h> __global__ void what_is_my_id(unsigned int * const block, unsigned int * const thread, unsigned int * const warp, unsigned int * const calc_thread) { // Thread_ID is block_index * block_size + thread_index inside this block const unsigned int idx = (blockIdx.x * blockDim.x) + threadIdx.x; block[idx] = blockIdx.x; thread[idx] = threadIdx.x; warp[idx] = threadIdx.x / warpSize; // Use build in variable warpSize=32 to calculate actual warp calc_thread[idx] = idx; } #define ARRAY_SIZE 128 #define ARRAY_SIZE_IN_BYTES (sizeof(unsigned int) * (ARRAY_SIZE)) unsigned int cpu_block[ARRAY_SIZE]; unsigned int cpu_thread[ARRAY_SIZE]; unsigned int cpu_warp[ARRAY_SIZE]; unsigned int cpu_calc_thread[ARRAY_SIZE]; int main(void){ // Total threads: 64 * 2 = 128 const unsigned int num_blocks = 2; const unsigned int num_threads = 64; // Declare pointers for GPU based params unsigned int * gpu_block; unsigned int * gpu_thread; unsigned int * gpu_warp; unsigned int * gpu_calc_thread; // Declaration of loop iterator unsigned int i; // Allocate four arrays on GPU cudaMalloc((void **)&gpu_block, ARRAY_SIZE_IN_BYTES); cudaMalloc((void **)&gpu_thread, ARRAY_SIZE_IN_BYTES); cudaMalloc((void **)&gpu_warp, ARRAY_SIZE_IN_BYTES); cudaMalloc((void **)&gpu_calc_thread, ARRAY_SIZE_IN_BYTES); // Execute kernel what_is_my_id<<<num_blocks, num_threads>>>(gpu_block, gpu_thread, gpu_warp, gpu_calc_thread); cudaMemcpy(cpu_block, gpu_block, ARRAY_SIZE_IN_BYTES, cudaMemcpyDeviceToHost); cudaMemcpy(cpu_thread, gpu_thread, ARRAY_SIZE_IN_BYTES, cudaMemcpyDeviceToHost); cudaMemcpy(cpu_warp, gpu_warp, ARRAY_SIZE_IN_BYTES, cudaMemcpyDeviceToHost); cudaMemcpy(cpu_calc_thread, gpu_calc_thread, ARRAY_SIZE_IN_BYTES, cudaMemcpyDeviceToHost); cudaFree(gpu_block); cudaFree(gpu_thread); cudaFree(gpu_warp); cudaFree(gpu_calc_thread); for(i=0;i<ARRAY_SIZE;i++){ printf("Calculcated thread: %3u | Block: %3u | Warp: %3u | Thread: %3u\n", cpu_calc_thread[i], cpu_block[i], cpu_warp[i], cpu_thread[i]); } }
820e1f9da5e47745ba2ea5776e68163d3fe3ced1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #define VERBOSE 0 __global__ void add(int *a, int *b, int *c, int nElems) { // note that add has no variables in its scope, instead it reads and // modifies variables that live elsewhere. int iElem = blockDim.x * blockIdx.x + threadIdx.x; if (iElem < nElems) { c[iElem] = a[iElem] + b[iElem]; } else { // we're indexing beyond the end of the array; do nothing } } void irand(int *arr, int nElems) { int iElem; for (iElem = 0; iElem < nElems; iElem+=1) { int r = rand() % 10; arr[iElem] = r; } } void zeros(int *arr, int nElems) { int iElem; for (iElem = 0; iElem < nElems; iElem+=1) { arr[iElem] = 0; } } void printarr(const char* formatString, int *arr, int nElems) { int iElem; for (iElem = 0; iElem < nElems; iElem += 1) { printf(formatString, iElem, arr[iElem]); } } int main(void) { // define the length of the vectors we're adding int nElems = 2048*2048; // define how many bytes is an integer (on this system) int nBytes = nElems * sizeof(int); // declare pointers to three arrays of integers in the host's memory space int *h_a; int *h_b; int *h_c; // preallocate the memory space for h_a, h_b, and h_c h_a = (int *)malloc(nBytes); h_b = (int *)malloc(nBytes); h_c = (int *)malloc(nBytes); // initialize h_a, h_b with random ints, initialize h_c with zeros. irand(h_a, nElems); irand(h_b, nElems); zeros(h_c, nElems); if (VERBOSE == 1) { // print the arrays to see what's going on printf("\n---- before ----\n"); printarr("h_a[%d] = %d\n", h_a, nElems); printf("\n"); printarr("h_b[%d] = %d\n", h_b, nElems); printf("\n"); printarr("h_c[%d] = %d\n", h_c, nElems); printf("\n"); } // declare pointers to three arrays of integers in the device's memory space // (the pointer itself lives in host memory, while its value points to an // address in the device memory?) int *d_a; int *d_b; int *d_c; // allocate nbytes memory on the device for each of the d_a, d_b, // and d_c variables hipMalloc((void **)&d_a, nBytes); hipMalloc((void **)&d_b, nBytes); hipMalloc((void **)&d_c, nBytes); // copy nBytes of memory located at &h_a on the host to variable d_a // on the device (then do the same for &h_b, d_b) hipMemcpy(d_a, h_a, nBytes, hipMemcpyHostToDevice); hipMemcpy(d_b, h_b, nBytes, hipMemcpyHostToDevice); int nBlocks; int nThreadsPerBlock = 512; if (nElems < nThreadsPerBlock) { // we need at least 1 block nBlocks = 1; } else { nBlocks = nElems / nThreadsPerBlock; } // call the integer add kernel with multiple blocks and threads, pass // it the values of d_a, d_b, as well as the (uninitialized) value // of d_c hipLaunchKernelGGL(( add), dim3(nBlocks),dim3(nThreadsPerBlock), 0, 0, d_a, d_b, d_c, nElems); // copy the results from the device back to the host hipMemcpy(h_c, d_c, nBytes, hipMemcpyDeviceToHost); if (VERBOSE == 1) { // print the arrays to see what's going on printf("---- after ----\n"); printarr("h_a[%d] = %d\n", h_a, nElems); printf("\n"); printarr("h_b[%d] = %d\n", h_b, nElems); printf("\n"); printarr("h_c[%d] = %d\n", h_c, nElems); printf("\n"); } // free up the memory on the device that we hipMalloc'ed earlier. hipFree(d_a); hipFree(d_b); hipFree(d_c); // free up the memory on the host that we malloc'ed earlier. free(h_a); free(h_b); free(h_c); return 0; }
820e1f9da5e47745ba2ea5776e68163d3fe3ced1.cu
#include <stdio.h> #include <stdlib.h> #define VERBOSE 0 __global__ void add(int *a, int *b, int *c, int nElems) { // note that add has no variables in its scope, instead it reads and // modifies variables that live elsewhere. int iElem = blockDim.x * blockIdx.x + threadIdx.x; if (iElem < nElems) { c[iElem] = a[iElem] + b[iElem]; } else { // we're indexing beyond the end of the array; do nothing } } void irand(int *arr, int nElems) { int iElem; for (iElem = 0; iElem < nElems; iElem+=1) { int r = rand() % 10; arr[iElem] = r; } } void zeros(int *arr, int nElems) { int iElem; for (iElem = 0; iElem < nElems; iElem+=1) { arr[iElem] = 0; } } void printarr(const char* formatString, int *arr, int nElems) { int iElem; for (iElem = 0; iElem < nElems; iElem += 1) { printf(formatString, iElem, arr[iElem]); } } int main(void) { // define the length of the vectors we're adding int nElems = 2048*2048; // define how many bytes is an integer (on this system) int nBytes = nElems * sizeof(int); // declare pointers to three arrays of integers in the host's memory space int *h_a; int *h_b; int *h_c; // preallocate the memory space for h_a, h_b, and h_c h_a = (int *)malloc(nBytes); h_b = (int *)malloc(nBytes); h_c = (int *)malloc(nBytes); // initialize h_a, h_b with random ints, initialize h_c with zeros. irand(h_a, nElems); irand(h_b, nElems); zeros(h_c, nElems); if (VERBOSE == 1) { // print the arrays to see what's going on printf("\n---- before ----\n"); printarr("h_a[%d] = %d\n", h_a, nElems); printf("\n"); printarr("h_b[%d] = %d\n", h_b, nElems); printf("\n"); printarr("h_c[%d] = %d\n", h_c, nElems); printf("\n"); } // declare pointers to three arrays of integers in the device's memory space // (the pointer itself lives in host memory, while its value points to an // address in the device memory?) int *d_a; int *d_b; int *d_c; // allocate nbytes memory on the device for each of the d_a, d_b, // and d_c variables cudaMalloc((void **)&d_a, nBytes); cudaMalloc((void **)&d_b, nBytes); cudaMalloc((void **)&d_c, nBytes); // copy nBytes of memory located at &h_a on the host to variable d_a // on the device (then do the same for &h_b, d_b) cudaMemcpy(d_a, h_a, nBytes, cudaMemcpyHostToDevice); cudaMemcpy(d_b, h_b, nBytes, cudaMemcpyHostToDevice); int nBlocks; int nThreadsPerBlock = 512; if (nElems < nThreadsPerBlock) { // we need at least 1 block nBlocks = 1; } else { nBlocks = nElems / nThreadsPerBlock; } // call the integer add kernel with multiple blocks and threads, pass // it the values of d_a, d_b, as well as the (uninitialized) value // of d_c add<<<nBlocks,nThreadsPerBlock>>>(d_a, d_b, d_c, nElems); // copy the results from the device back to the host cudaMemcpy(h_c, d_c, nBytes, cudaMemcpyDeviceToHost); if (VERBOSE == 1) { // print the arrays to see what's going on printf("---- after ----\n"); printarr("h_a[%d] = %d\n", h_a, nElems); printf("\n"); printarr("h_b[%d] = %d\n", h_b, nElems); printf("\n"); printarr("h_c[%d] = %d\n", h_c, nElems); printf("\n"); } // free up the memory on the device that we cudaMalloc'ed earlier. cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); // free up the memory on the host that we malloc'ed earlier. free(h_a); free(h_b); free(h_c); return 0; }
e213ff6e99e3e6a22619f996af497c52ff69a4b7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <algorithm> #include <cfloat> #include <vector> #include "caffe/layer.hpp" #include "caffe/util/math_functions.hpp" #include "caffe/util/gpu_util.cuh" #include "caffe/layers/spatial_transformer_layer.hpp" namespace caffe { template <typename Dtype> __host__ __device__ void forwardTransformAffine(Dtype& pdB,Dtype& phB,Dtype& pwB,int pd,int ph,int pw, const Dtype* tfmMatrix) { pdB = pd; phB = tfmMatrix[0] * ph + tfmMatrix[1] * pw + tfmMatrix[2]; pwB = tfmMatrix[3] * ph + tfmMatrix[4] * pw + tfmMatrix[5]; } template <typename Dtype> __host__ __device__ void dxdTAffine(Dtype* dddT,Dtype* dhdT,Dtype* dwdT,int pd, int ph, int pw) { dddT[0] = 0; dddT[1] = 0; dddT[2] = 0; dddT[3] = 0; dddT[4] = 0; dddT[5] = 0; dhdT[0] = ph; dhdT[1] = pw; dhdT[2] = 1; dhdT[3] = 0; dhdT[4] = 0; dhdT[5] = 0; dwdT[0] = 0; dwdT[1] = 0; dwdT[2] = 0; dwdT[3] = ph; dwdT[4] = pw; dwdT[5] = 1; } template <typename Dtype> __host__ __device__ void forwardTransformAffine3D(Dtype& pdB,Dtype& phB,Dtype& pwB,int pd,int ph,int pw, const Dtype* tfmMatrix) { pdB = tfmMatrix[0] * pd + tfmMatrix[1] * ph + tfmMatrix[2] * pw + tfmMatrix[3]; phB = tfmMatrix[4] * pd + tfmMatrix[5] * ph + tfmMatrix[6] * pw + tfmMatrix[7]; pwB = tfmMatrix[8] * pd + tfmMatrix[9] * ph + tfmMatrix[10] * pw + tfmMatrix[11]; } template <typename Dtype> __host__ __device__ void dxdTAffine3D(Dtype* dddT,Dtype* dhdT,Dtype* dwdT,int pd, int ph, int pw) { dddT[0] = pd; dddT[1] = ph; dddT[2] = pw; dddT[3] = 1; dddT[4] = 0; dddT[5] = 0; dddT[6] = 0; dddT[7] = 0; dddT[8] = 0; dddT[9] = 0; dddT[10]= 0; dddT[11]= 0; dhdT[0] = 0; dhdT[1] = 0; dhdT[2] = 0; dhdT[3] = 0; dhdT[4] = pd; dhdT[5] = ph; dhdT[6] = pw; dhdT[7] = 1; dhdT[8] = 0; dhdT[9] = 0; dhdT[10]= 0; dhdT[11]= 0; dwdT[0] = 0; dwdT[1] = 0; dwdT[2] = 0; dwdT[3] = 0; dwdT[4] = 0; dwdT[5] = 0; dwdT[6] = 0; dwdT[7] = 0; dwdT[8] = pd; dwdT[9] = ph; dwdT[10]= pw; dwdT[11]= 1; } template <typename Dtype> __host__ __device__ void forwardTransformInverseAffine(Dtype& pdB,Dtype& phB,Dtype& pwB,int pd,int ph,int pw, const Dtype* tfmMatrix) { Dtype det = tfmMatrix[0] * tfmMatrix[4] - tfmMatrix[1] * tfmMatrix[3]; Dtype inv0 = tfmMatrix[4] / det; Dtype inv1 = -tfmMatrix[1]/det; Dtype inv3 = -tfmMatrix[3] / det; Dtype inv4 = tfmMatrix[0] / det; Dtype inv2 = -(inv0 * tfmMatrix[2] + inv1 * tfmMatrix[5]); Dtype inv5 = -(inv3 * tfmMatrix[2] + inv4 * tfmMatrix[5]); pdB = pd; phB = inv0 * ph + inv1 * pw + inv2; pwB = inv3 * ph + inv4 * pw + inv5; } template <typename Dtype> __host__ __device__ void dxdTInverseAffine(Dtype* dddT,Dtype* dhdT,Dtype* dwdT,int pd, int ph, int pw, const Dtype* tfmMatrix) { const Dtype*p=tfmMatrix; Dtype det = (p[0] * p[4] - p[1] * p[3]); Dtype det2 = det*det; dddT[0]=0;dddT[1]=0;dddT[2]=0;dddT[3]=0;dddT[4]=0;dddT[5]=0; dhdT[0] = (-p[4] * (p[1] * p[5] - p[2] * p[4] + p[4] * ph - p[1] * pw)) / det2; dwdT[0] = (p[3] * (p[1] * p[5] - p[2] * p[4] + p[4] * ph - p[1] * pw)) / det2; dhdT[1] = (p[4] * (p[0] * p[5] - p[2] * p[3] + p[3] * ph - p[0] * pw)) / det2; dwdT[1] = (-p[3] * (p[0] * p[5] - p[2] * p[3] + p[3] * ph - p[0] * pw)) / det2; dhdT[2] = (p[1] * p[3] * p[4] - p[0] * p[4] * p[0] * p[4]) / det2; dwdT[2] = (p[3] * (p[0] * p[4] - p[1] * p[3])) / det2; dhdT[3] = (p[1] * (p[1] * p[5] - p[2] * p[4] + p[4] * ph - p[1] * pw)) / det2; dwdT[3] = (-p[0] * (p[1] * p[5] - p[2] * p[4] + p[4] * ph - p[1] * pw)) / det2; dhdT[4] = (-p[1] * (p[0] * p[5] - p[2] * p[3] + p[3] * ph - p[0] * pw)) / det2; dwdT[4] = (p[0] * (p[0] * p[5] - p[2] * p[3] + p[3] * ph - p[0] * pw)) / det2; dhdT[5] = (p[1] * (p[0] * p[4] - p[1] * p[3])) / det2; dwdT[5] = (-p[4] * p[0] * p[4] * p[0] + p[1] * p[3] * p[0]) / det2; } template <typename Dtype> void precomputeInverseAffine3D(Dtype* dxdT_coeffs_, const Dtype* tfmMatrix) { int LUT[9] = {9,2,1, 2,9,0, 1,0,9}; Dtype det = tfmMatrix[4*0+0] * tfmMatrix[4*1+1] * tfmMatrix[4*2+2] -tfmMatrix[4*0+0] * tfmMatrix[4*1+2] * tfmMatrix[4*2+1] +tfmMatrix[4*0+1] * tfmMatrix[4*1+2] * tfmMatrix[4*2+0] -tfmMatrix[4*0+1] * tfmMatrix[4*1+0] * tfmMatrix[4*2+2] +tfmMatrix[4*0+2] * tfmMatrix[4*1+0] * tfmMatrix[4*2+1] -tfmMatrix[4*0+2] * tfmMatrix[4*1+1] * tfmMatrix[4*2+0]; Dtype detUV[9]; detUV[3*0+0] = tfmMatrix[4*1+1]*tfmMatrix[4*2+2]-tfmMatrix[4*1+2]*tfmMatrix[4*2+1]; detUV[3*0+1] = tfmMatrix[4*1+0]*tfmMatrix[4*2+2]-tfmMatrix[4*1+2]*tfmMatrix[4*2+0]; detUV[3*0+2] = tfmMatrix[4*1+0]*tfmMatrix[4*2+1]-tfmMatrix[4*1+1]*tfmMatrix[4*2+0]; detUV[3*1+0] = tfmMatrix[4*0+1]*tfmMatrix[4*2+2]-tfmMatrix[4*0+2]*tfmMatrix[4*2+1]; detUV[3*1+1] = tfmMatrix[4*0+0]*tfmMatrix[4*2+2]-tfmMatrix[4*0+2]*tfmMatrix[4*2+0]; detUV[3*1+2] = tfmMatrix[4*0+0]*tfmMatrix[4*2+1]-tfmMatrix[4*0+1]*tfmMatrix[4*2+0]; detUV[3*2+0] = tfmMatrix[4*0+1]*tfmMatrix[4*1+2]-tfmMatrix[4*0+2]*tfmMatrix[4*1+1]; detUV[3*2+1] = tfmMatrix[4*0+0]*tfmMatrix[4*1+2]-tfmMatrix[4*0+2]*tfmMatrix[4*1+0]; detUV[3*2+2] = tfmMatrix[4*0+0]*tfmMatrix[4*1+1]-tfmMatrix[4*0+1]*tfmMatrix[4*1+0]; for (int pRIt = 0;pRIt<3;++pRIt) { for (int pUIt = 0;pUIt<3;++pUIt) {// X = d h w for (int pCIt = 0;pCIt<3;++pCIt) { for (int pVIt = 0;pVIt<3;++pVIt) { // pd ph and pw coeffs // dFnc(U)dT() = [ a/det - (c*detVU*(-1)^(V+U))/det^2 ] * [pd;ph;pw;1] int W = LUT[3*pRIt+pVIt]; int Q = LUT[3*pCIt+pUIt]; Dtype a = ((pUIt==pCIt||pVIt==pRIt)?0: tfmMatrix[4*W+Q]*((((pUIt-pCIt+3)%3)+((pVIt-pRIt+3)%3))%2?-1:1) ); Dtype c = detUV[3*pRIt+pCIt]*((pRIt+pCIt)%2?-1:1); dxdT_coeffs_[12*(4*pRIt+pCIt)+4*pUIt+pVIt]= a/det - (c*detUV[3*pVIt+pUIt]*((pVIt+pUIt)%2?-1:1))/det/det;// coord coeffs for dSkewRotation dxdT_coeffs_[12*(4*pRIt+3)+4*pUIt+pVIt]=0; // coord coeffs for dTranslation } dxdT_coeffs_[12*(4*pRIt+pCIt)+4*pUIt+3]=-tfmMatrix[4*0+3]*dxdT_coeffs_[12*(4*pRIt+pCIt)+4*pUIt+0] -tfmMatrix[4*1+3]*dxdT_coeffs_[12*(4*pRIt+pCIt)+4*pUIt+1] -tfmMatrix[4*2+3]*dxdT_coeffs_[12*(4*pRIt+pCIt)+4*pUIt+2]; // scalar coeffs for dSkewRotation } dxdT_coeffs_[12*(4*pRIt+3)+4*pUIt+3]=-detUV[3*pRIt+pUIt]/det * ((pRIt+pUIt)%2?-1:1); // scalar coeffs for dTranslation } } } template <typename Dtype> __host__ __device__ void forwardTransformInverseAffine3D(Dtype& pdB,Dtype& phB,Dtype& pwB,int pd,int ph,int pw, const Dtype* tfmMatrix) { Dtype det = tfmMatrix[4*0+0] * tfmMatrix[4*1+1] * tfmMatrix[4*2+2] -tfmMatrix[4*0+0] * tfmMatrix[4*1+2] * tfmMatrix[4*2+1] +tfmMatrix[4*0+1] * tfmMatrix[4*1+2] * tfmMatrix[4*2+0] -tfmMatrix[4*0+1] * tfmMatrix[4*1+0] * tfmMatrix[4*2+2] +tfmMatrix[4*0+2] * tfmMatrix[4*1+0] * tfmMatrix[4*2+1] -tfmMatrix[4*0+2] * tfmMatrix[4*1+1] * tfmMatrix[4*2+0]; Dtype detUV[9]; detUV[3*0+0] = tfmMatrix[4*1+1]*tfmMatrix[4*2+2]-tfmMatrix[4*1+2]*tfmMatrix[4*2+1]; detUV[3*0+1] = tfmMatrix[4*1+0]*tfmMatrix[4*2+2]-tfmMatrix[4*1+2]*tfmMatrix[4*2+0]; detUV[3*0+2] = tfmMatrix[4*1+0]*tfmMatrix[4*2+1]-tfmMatrix[4*1+1]*tfmMatrix[4*2+0]; detUV[3*1+0] = tfmMatrix[4*0+1]*tfmMatrix[4*2+2]-tfmMatrix[4*0+2]*tfmMatrix[4*2+1]; detUV[3*1+1] = tfmMatrix[4*0+0]*tfmMatrix[4*2+2]-tfmMatrix[4*0+2]*tfmMatrix[4*2+0]; detUV[3*1+2] = tfmMatrix[4*0+0]*tfmMatrix[4*2+1]-tfmMatrix[4*0+1]*tfmMatrix[4*2+0]; detUV[3*2+0] = tfmMatrix[4*0+1]*tfmMatrix[4*1+2]-tfmMatrix[4*0+2]*tfmMatrix[4*1+1]; detUV[3*2+1] = tfmMatrix[4*0+0]*tfmMatrix[4*1+2]-tfmMatrix[4*0+2]*tfmMatrix[4*1+0]; detUV[3*2+2] = tfmMatrix[4*0+0]*tfmMatrix[4*1+1]-tfmMatrix[4*0+1]*tfmMatrix[4*1+0]; pdB =( detUV[3*0+0] * (pd-tfmMatrix[4*0+3]) + detUV[3*1+0]*(ph-tfmMatrix[4*1+3]) + detUV[3*2+0]*(pw-tfmMatrix[4*2+3]) )/det; phB =( detUV[3*0+1] * (pd-tfmMatrix[4*0+3]) + detUV[3*1+1]*(ph-tfmMatrix[4*1+3]) + detUV[3*2+1]*(pw-tfmMatrix[4*2+3]) )/det; pwB =( detUV[3*0+2] * (pd-tfmMatrix[4*0+3]) + detUV[3*1+2]*(ph-tfmMatrix[4*1+3]) + detUV[3*2+2]*(pw-tfmMatrix[4*2+3]) )/det; } template <typename Dtype> __host__ __device__ void dxdTInverseAffine3D(const Dtype* dxdT_coeffs,Dtype* dddT,Dtype* dhdT,Dtype* dwdT,int pd, int ph, int pw) { for (int paramIt=0;paramIt<1;++paramIt) { dddT[paramIt]=dxdT_coeffs[12*paramIt+4*0+0]*pd + dxdT_coeffs[12*paramIt+4*0+1]*ph + dxdT_coeffs[12*paramIt+4*0+2]*pw + dxdT_coeffs[12*paramIt+4*0+3]; dhdT[paramIt]=dxdT_coeffs[12*paramIt+4*1+0]*pd + dxdT_coeffs[12*paramIt+4*1+1]*ph + dxdT_coeffs[12*paramIt+4*1+2]*pw + dxdT_coeffs[12*paramIt+4*1+3]; dwdT[paramIt]=dxdT_coeffs[12*paramIt+4*2+0]*pd + dxdT_coeffs[12*paramIt+4*2+1]*ph + dxdT_coeffs[12*paramIt+4*2+2]*pw + dxdT_coeffs[12*paramIt+4*2+3]; } } template <typename Dtype> __host__ __device__ void forwardInterpolate(Dtype* top_data, const Dtype* bottom_data, int index, int depth, int height, int width, Dtype pdB, Dtype phB, Dtype pwB) { int ipdB, ipwB, iphB; Dtype tln, bln, trn, brn, tlf, blf, trf, brf; pdB = max(0., min(static_cast<Dtype>(depth - 1), pdB)); phB = max(0., min(static_cast<Dtype>(height - 1), phB)); pwB = max(0., min(static_cast<Dtype>(width - 1), pwB)); ipwB = floor(pwB); iphB = floor(phB); ipdB = floor(pdB); tln = bottom_data[ipdB*width*height + iphB*width + ipwB]; bln = bottom_data[ipdB*width*height + (iphB + 1)*width + ipwB]; trn = bottom_data[ipdB*width*height + iphB*width + (ipwB + 1)]; brn = bottom_data[ipdB*width*height + (iphB + 1)*width + (ipwB + 1)]; tlf = bottom_data[(ipdB+1)*width*height + iphB*width + ipwB]; blf = bottom_data[(ipdB+1)*width*height + (iphB + 1)*width + ipwB]; trf = bottom_data[(ipdB+1)*width*height + iphB*width + (ipwB + 1)]; brf = bottom_data[(ipdB+1)*width*height + (iphB + 1)*width + (ipwB + 1)]; top_data[index] = tln*(1 - (pdB - ipdB)) * (1 - (phB - iphB)) * (1 - (pwB - ipwB)) + bln*(1 - (pdB - ipdB)) *( (phB - iphB)) * (1 - (pwB - ipwB)) + trn*(1 - (pdB - ipdB)) *(1 - (phB - iphB)) * ((pwB - ipwB)) + brn*(1 - (pdB - ipdB)) *((phB - iphB)) * ((pwB - ipwB)) + tlf*((pdB - ipdB)) *(1 - (phB - iphB)) * (1 - (pwB - ipwB)) + blf*((pdB - ipdB)) *( (phB - iphB)) * (1 - (pwB - ipwB)) + trf*((pdB - ipdB)) *(1 - (phB - iphB)) * ((pwB - ipwB)) + brf*((pdB - ipdB)) *((phB - iphB)) * ((pwB - ipwB)); } template <typename Dtype> __global__ void AffineForward(const int nthreads, const Dtype* bottom_data, const int num, const int channels, const int depth, const int height, const int width, const Dtype* tfmMatrix, const int num_parameters, Dtype* top_data, const int grid_d, const int grid_h, const int grid_w, SpatialTransformerParameter_TransformType transform_type) { CUDA_KERNEL_LOOP(index, nthreads) { Dtype pdB, pwB, phB; int pw = index % grid_w; int ph = (index / grid_w) % grid_h; int pd = (index / (grid_w * grid_h)) % grid_d; int c = (index / (grid_w * grid_h * grid_d)) % channels; int n = index / (grid_w * grid_h * grid_d * channels); bottom_data += (n * channels + c) * height * width * depth; tfmMatrix += n*num_parameters; switch (transform_type) { case SpatialTransformerParameter_TransformType_AFFINE: forwardTransformAffine(pdB, phB, pwB, pd, ph, pw, tfmMatrix); break; case SpatialTransformerParameter_TransformType_INVERSE_AFFINE: forwardTransformInverseAffine(pdB, phB, pwB, pd, ph, pw, tfmMatrix); break; case SpatialTransformerParameter_TransformType_AFFINE3D: forwardTransformAffine3D(pdB, phB, pwB, pd, ph, pw, tfmMatrix); break; case SpatialTransformerParameter_TransformType_INVERSE_AFFINE3D: forwardTransformInverseAffine3D(pdB, phB, pwB, pd, ph, pw, tfmMatrix); break; } // TODO genericize boundary handling - for now assume smooth boundary extension (extend boundary value outside the image) forwardInterpolate(top_data, bottom_data, index, depth, height, width, pdB, phB, pwB); } } template <typename Dtype> void SpatialTransformerLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { SpatialTransformerParameter spatial_transformer_param = this->layer_param_.spatial_transformer_param(); const Dtype* params; int firstDataBlob; if (spatial_transformer_param.const_params().size()) { params = constParamsBlob_.gpu_data(); firstDataBlob = 0; } else { params = bottom[0]->gpu_data(); firstDataBlob = 1; } for (int i = firstDataBlob; i < bottom.size(); ++i) { Blob<Dtype>* bottom_ = bottom[i]; Blob<Dtype>* top_ = top[i - firstDataBlob]; const Dtype* bottom_data = bottom_->gpu_data(); Dtype* top_data = top_->mutable_gpu_data(); // // This line causes an error on the second top if there are constant parameters // int tDepth; if (bottom_->num_axes()==4) { tDepth=1; } else { tDepth=bottom_->shape(-3); } const int depth = tDepth; const int width = bottom_->shape(-1); const int height = bottom_->shape(-2); int count = top_->count(); // NOLINT_NEXT_LINE(whitespace/operators) AffineForward<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >( count, bottom_data, bottom_->shape(0), bottom_->shape(1), depth, height, width, params, num_parameters_, top_data, grid_d_, grid_h_, grid_w_, spatial_transformer_param.type()); CUDA_POST_KERNEL_CHECK; } } template <typename Dtype> __global__ void AffineBackward(const int nthreads, const Dtype* top_diff, const int num, const int channels, const int grid_d, const int grid_h, const int grid_w, const Dtype * tfmMatrix, Dtype* param_diff, const Dtype* buffer, const int num_parameters, const Dtype* bottom_data, Dtype* bottom_diff, const int depth, const int height, const int width, SpatialTransformerParameter_TransformType transform_type, bool propagate_down_data, bool propagate_down_param) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the grid index Dtype dddT[12]; // temporary storage for partial derivatives with respect to transform parameters Dtype dhdT[12]; // Hack currently set for largest possible number of parameters Dtype dwdT[12]; int pw = index % grid_w; int ph = (index / grid_w) % grid_h; int pd = (index / (grid_w * grid_h)) % grid_d; int c = (index / (grid_w * grid_h * grid_d)) % channels; int n = index / (grid_w * grid_h * grid_d * channels); Dtype pwB, phB, pdB, tln, bln, trn, brn, tlf, blf, trf, brf; int ipwB, iphB, ipdB; top_diff += (n * channels + c) * grid_h * grid_w * grid_d; bottom_diff += (n * channels + c) * height * width * depth; bottom_data += (n * channels + c) * height * width * depth; tfmMatrix += num_parameters*n; param_diff += num_parameters*n; switch (transform_type) { case SpatialTransformerParameter_TransformType_AFFINE3D: forwardTransformAffine3D(pdB,phB,pwB, pd, ph, pw, tfmMatrix); break; case SpatialTransformerParameter_TransformType_INVERSE_AFFINE3D: forwardTransformInverseAffine3D(pdB,phB,pwB, pd, ph, pw, tfmMatrix); break; case SpatialTransformerParameter_TransformType_AFFINE: forwardTransformAffine(pdB,phB,pwB, pd, ph, pw, tfmMatrix); break; case SpatialTransformerParameter_TransformType_INVERSE_AFFINE: forwardTransformInverseAffine(pdB,phB,pwB, pd, ph, pw, tfmMatrix); break; } // TODO genericize boundary handling - for now assume smooth boundary extension (extend boundary value outside the image) pdB = max(0., min(static_cast<Dtype>(depth - 1), pdB)); phB = max(0., min(static_cast<Dtype>(height - 1), phB)); pwB = max(0., min(static_cast<Dtype>(width - 1), pwB)); // This will be similar for other transformation (with same sampling kernel) ipwB = floor(pwB); iphB = floor(phB); ipdB = floor(pdB); if (propagate_down_data) { caffe_gpu_atomic_add(top_diff[ph*grid_w + pw] * (1-(pdB-ipdB)) * (1-(phB-iphB)) * (1 - (pwB-ipwB)), bottom_diff + ipdB *width*height + iphB *width + ipwB); caffe_gpu_atomic_add(top_diff[ph*grid_w + pw] * (1-(pdB-ipdB)) * (1-(phB-iphB)) * ( (pwB-ipwB)), bottom_diff + ipdB *width*height + iphB *width + (ipwB + 1)); caffe_gpu_atomic_add(top_diff[ph*grid_w + pw] * (1-(pdB-ipdB)) * ( (phB-iphB)) * (1 - (pwB-ipwB)), bottom_diff + ipdB *width*height + (iphB + 1)*width + ipwB); caffe_gpu_atomic_add(top_diff[ph*grid_w + pw] * (1-(pdB-ipdB)) * ( (phB-iphB)) * ( (pwB-ipwB)), bottom_diff + ipdB *width*height + (iphB + 1)*width + (ipwB + 1)); caffe_gpu_atomic_add(top_diff[ph*grid_w + pw] * ( (pdB-ipdB)) * (1-(phB-iphB)) * (1 - (pwB-ipwB)), bottom_diff + (ipdB + 1)*width*height + iphB *width + ipwB); caffe_gpu_atomic_add(top_diff[ph*grid_w + pw] * ( (pdB-ipdB)) * (1-(phB-iphB)) * ( (pwB-ipwB)), bottom_diff + (ipdB + 1)*width*height + iphB *width + (ipwB + 1)); caffe_gpu_atomic_add(top_diff[ph*grid_w + pw] * ( (pdB-ipdB)) * ( (phB-iphB)) * (1 - (pwB-ipwB)), bottom_diff + (ipdB + 1)*width*height + (iphB + 1)*width + ipwB); caffe_gpu_atomic_add(top_diff[ph*grid_w + pw] * ( (pdB-ipdB)) * ( (phB-iphB)) * ( (pwB-ipwB)), bottom_diff + (ipdB + 1)*width*height + (iphB + 1)*width + (ipwB + 1)); } if (propagate_down_param) { tln = bottom_data[ipdB*height*width + iphB*width + ipwB]; bln = bottom_data[ipdB*height*width + (iphB + 1)*width + ipwB]; trn = bottom_data[ipdB*height*width + iphB*width + (ipwB + 1)]; brn = bottom_data[ipdB*height*width + (iphB + 1)*width + (ipwB + 1)]; tlf = bottom_data[(ipdB+1)*height*width + iphB*width + ipwB]; blf = bottom_data[(ipdB+1)*height*width + (iphB + 1)*width + ipwB]; trf = bottom_data[(ipdB+1)*height*width + iphB*width + (ipwB + 1)]; brf = bottom_data[(ipdB+1)*height*width + (iphB + 1)*width + (ipwB + 1)]; // This depends on the transformation function: switch (transform_type) { case SpatialTransformerParameter_TransformType_AFFINE3D: dxdTAffine3D(dddT, dhdT, dwdT, pd, ph, pw); break; case SpatialTransformerParameter_TransformType_INVERSE_AFFINE3D: dxdTInverseAffine3D(buffer+n*144, dddT, dhdT, dwdT, pd, ph, pw); break; case SpatialTransformerParameter_TransformType_AFFINE: dxdTAffine(dddT, dhdT, dwdT, pd, ph, pw); break; case SpatialTransformerParameter_TransformType_INVERSE_AFFINE: dxdTInverseAffine3D(buffer, dddT, dhdT, dwdT, pd, ph, pw); break; } // This will be similar for other transformations (except with all partial derivatives) for (int param_it = 0; param_it < num_parameters; param_it++){ caffe_gpu_atomic_add( dddT[param_it] * (tlf*(iphB - phB + 1)*(ipwB - pwB + 1) - tln*(iphB - phB + 1)*(ipwB - pwB + 1) - trf*(ipwB - pwB)*(iphB - phB + 1) + trn*(ipwB - pwB)*(iphB - phB + 1) - blf*(iphB - phB)*(ipwB - pwB + 1) + bln*(iphB - phB)*(ipwB - pwB + 1) + brf*(iphB - phB)*(ipwB - pwB) - brn*(iphB - phB)*(ipwB - pwB)) + dhdT[param_it] * (tlf*(ipdB - pdB)*(ipwB - pwB + 1) - tln*(ipdB - pdB + 1)*(ipwB - pwB + 1) - trf*(ipdB - pdB)*(ipwB - pwB) + trn*(ipwB - pwB)*(ipdB - pdB + 1) - blf*(ipdB - pdB)*(ipwB - pwB + 1) + bln*(ipdB - pdB + 1)*(ipwB - pwB + 1) + brf*(ipdB - pdB)*(ipwB - pwB) - brn*(ipwB - pwB)*(ipdB - pdB + 1)) + dwdT[param_it] * (tlf*(ipdB - pdB)*(iphB - phB + 1) - tln*(ipdB - pdB + 1)*(iphB - phB + 1) - trf*(ipdB - pdB)*(iphB - phB + 1) + trn*(ipdB - pdB + 1)*(iphB - phB + 1) - blf*(ipdB - pdB)*(iphB - phB) + bln*(iphB - phB)*(ipdB - pdB + 1) + brf*(ipdB - pdB)*(iphB - phB) - brn*(iphB - phB)*(ipdB - pdB + 1)) , param_diff+param_it); } } } } template <typename Dtype> void SpatialTransformerLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { SpatialTransformerParameter spatial_transformer_param = this->layer_param_.spatial_transformer_param(); const Dtype* params_cpu; const Dtype* params; int firstDataBlob; Dtype* param_diff; bool hasConstParams; if (spatial_transformer_param.const_params().size()) { params = constParamsBlob_.gpu_data(); firstDataBlob = 0; param_diff = constParamsBlob_.mutable_gpu_diff(); caffe_gpu_set(constParamsBlob_.count(), Dtype(0.), param_diff); hasConstParams = true; } else { params = bottom[0]->gpu_data(); firstDataBlob = 1; param_diff = bottom[0]->mutable_gpu_diff(); caffe_gpu_set(bottom[0]->count(), Dtype(0.), param_diff); hasConstParams = false; } for (int i = firstDataBlob; i < bottom.size(); ++i) { if ((hasConstParams && !propagate_down[i]) || (!hasConstParams && !propagate_down[i] && !propagate_down[0])) { continue; } Blob<Dtype>* bottom_ = bottom[i]; Blob<Dtype>* top_ = top[i - firstDataBlob]; const Dtype* top_diff = top_->gpu_diff(); Dtype* bottom_diff = bottom_->mutable_gpu_diff(); const Dtype* bottom_data = bottom_->gpu_data(); caffe_gpu_set(bottom_->count(), Dtype(0.), bottom_diff); // This is different from the pooling example // since I am iterating over the resampling grid // instead of over the bottom pixels // the original was // const int count = bottom_->count(); const int count = top_->count(); //Precomputation const Dtype* buffer = 0; switch (spatial_transformer_param.type()) { case SpatialTransformerParameter_TransformType_INVERSE_AFFINE3D: if (hasConstParams) { params_cpu= constParamsBlob_.cpu_data(); } else { params_cpu= bottom[0]->cpu_data(); } for (int n = 0; n < bottom_->num(); ++n) { precomputeInverseAffine3D((buffer_[0]->mutable_cpu_diff())+144*n, params_cpu + n*num_parameters_) ; } buffer = buffer_[0]->gpu_data(); } int tDepth; if (bottom_->num_axes()==4) { tDepth=1; } else { tDepth=bottom_->shape(-3); } const int depth = tDepth; const int width = bottom_->shape(-1); const int height = bottom_->shape(-2); // NOLINT_NEXT_LINE(whitespace/operators) AffineBackward<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >( count, top_diff, top_->num(), top_->channels(), grid_d_, grid_h_, grid_w_, params, param_diff, buffer, num_parameters_, bottom_data, bottom_diff, depth, height, width, spatial_transformer_param.type(), propagate_down[i], propagate_down[0] && !hasConstParams); CUDA_POST_KERNEL_CHECK; } } INSTANTIATE_LAYER_GPU_FUNCS(SpatialTransformerLayer); } // namespace caffe
e213ff6e99e3e6a22619f996af497c52ff69a4b7.cu
#include <algorithm> #include <cfloat> #include <vector> #include "caffe/layer.hpp" #include "caffe/util/math_functions.hpp" #include "caffe/util/gpu_util.cuh" #include "caffe/layers/spatial_transformer_layer.hpp" namespace caffe { template <typename Dtype> __host__ __device__ void forwardTransformAffine(Dtype& pdB,Dtype& phB,Dtype& pwB,int pd,int ph,int pw, const Dtype* tfmMatrix) { pdB = pd; phB = tfmMatrix[0] * ph + tfmMatrix[1] * pw + tfmMatrix[2]; pwB = tfmMatrix[3] * ph + tfmMatrix[4] * pw + tfmMatrix[5]; } template <typename Dtype> __host__ __device__ void dxdTAffine(Dtype* dddT,Dtype* dhdT,Dtype* dwdT,int pd, int ph, int pw) { dddT[0] = 0; dddT[1] = 0; dddT[2] = 0; dddT[3] = 0; dddT[4] = 0; dddT[5] = 0; dhdT[0] = ph; dhdT[1] = pw; dhdT[2] = 1; dhdT[3] = 0; dhdT[4] = 0; dhdT[5] = 0; dwdT[0] = 0; dwdT[1] = 0; dwdT[2] = 0; dwdT[3] = ph; dwdT[4] = pw; dwdT[5] = 1; } template <typename Dtype> __host__ __device__ void forwardTransformAffine3D(Dtype& pdB,Dtype& phB,Dtype& pwB,int pd,int ph,int pw, const Dtype* tfmMatrix) { pdB = tfmMatrix[0] * pd + tfmMatrix[1] * ph + tfmMatrix[2] * pw + tfmMatrix[3]; phB = tfmMatrix[4] * pd + tfmMatrix[5] * ph + tfmMatrix[6] * pw + tfmMatrix[7]; pwB = tfmMatrix[8] * pd + tfmMatrix[9] * ph + tfmMatrix[10] * pw + tfmMatrix[11]; } template <typename Dtype> __host__ __device__ void dxdTAffine3D(Dtype* dddT,Dtype* dhdT,Dtype* dwdT,int pd, int ph, int pw) { dddT[0] = pd; dddT[1] = ph; dddT[2] = pw; dddT[3] = 1; dddT[4] = 0; dddT[5] = 0; dddT[6] = 0; dddT[7] = 0; dddT[8] = 0; dddT[9] = 0; dddT[10]= 0; dddT[11]= 0; dhdT[0] = 0; dhdT[1] = 0; dhdT[2] = 0; dhdT[3] = 0; dhdT[4] = pd; dhdT[5] = ph; dhdT[6] = pw; dhdT[7] = 1; dhdT[8] = 0; dhdT[9] = 0; dhdT[10]= 0; dhdT[11]= 0; dwdT[0] = 0; dwdT[1] = 0; dwdT[2] = 0; dwdT[3] = 0; dwdT[4] = 0; dwdT[5] = 0; dwdT[6] = 0; dwdT[7] = 0; dwdT[8] = pd; dwdT[9] = ph; dwdT[10]= pw; dwdT[11]= 1; } template <typename Dtype> __host__ __device__ void forwardTransformInverseAffine(Dtype& pdB,Dtype& phB,Dtype& pwB,int pd,int ph,int pw, const Dtype* tfmMatrix) { Dtype det = tfmMatrix[0] * tfmMatrix[4] - tfmMatrix[1] * tfmMatrix[3]; Dtype inv0 = tfmMatrix[4] / det; Dtype inv1 = -tfmMatrix[1]/det; Dtype inv3 = -tfmMatrix[3] / det; Dtype inv4 = tfmMatrix[0] / det; Dtype inv2 = -(inv0 * tfmMatrix[2] + inv1 * tfmMatrix[5]); Dtype inv5 = -(inv3 * tfmMatrix[2] + inv4 * tfmMatrix[5]); pdB = pd; phB = inv0 * ph + inv1 * pw + inv2; pwB = inv3 * ph + inv4 * pw + inv5; } template <typename Dtype> __host__ __device__ void dxdTInverseAffine(Dtype* dddT,Dtype* dhdT,Dtype* dwdT,int pd, int ph, int pw, const Dtype* tfmMatrix) { const Dtype*p=tfmMatrix; Dtype det = (p[0] * p[4] - p[1] * p[3]); Dtype det2 = det*det; dddT[0]=0;dddT[1]=0;dddT[2]=0;dddT[3]=0;dddT[4]=0;dddT[5]=0; dhdT[0] = (-p[4] * (p[1] * p[5] - p[2] * p[4] + p[4] * ph - p[1] * pw)) / det2; dwdT[0] = (p[3] * (p[1] * p[5] - p[2] * p[4] + p[4] * ph - p[1] * pw)) / det2; dhdT[1] = (p[4] * (p[0] * p[5] - p[2] * p[3] + p[3] * ph - p[0] * pw)) / det2; dwdT[1] = (-p[3] * (p[0] * p[5] - p[2] * p[3] + p[3] * ph - p[0] * pw)) / det2; dhdT[2] = (p[1] * p[3] * p[4] - p[0] * p[4] * p[0] * p[4]) / det2; dwdT[2] = (p[3] * (p[0] * p[4] - p[1] * p[3])) / det2; dhdT[3] = (p[1] * (p[1] * p[5] - p[2] * p[4] + p[4] * ph - p[1] * pw)) / det2; dwdT[3] = (-p[0] * (p[1] * p[5] - p[2] * p[4] + p[4] * ph - p[1] * pw)) / det2; dhdT[4] = (-p[1] * (p[0] * p[5] - p[2] * p[3] + p[3] * ph - p[0] * pw)) / det2; dwdT[4] = (p[0] * (p[0] * p[5] - p[2] * p[3] + p[3] * ph - p[0] * pw)) / det2; dhdT[5] = (p[1] * (p[0] * p[4] - p[1] * p[3])) / det2; dwdT[5] = (-p[4] * p[0] * p[4] * p[0] + p[1] * p[3] * p[0]) / det2; } template <typename Dtype> void precomputeInverseAffine3D(Dtype* dxdT_coeffs_, const Dtype* tfmMatrix) { int LUT[9] = {9,2,1, 2,9,0, 1,0,9}; Dtype det = tfmMatrix[4*0+0] * tfmMatrix[4*1+1] * tfmMatrix[4*2+2] -tfmMatrix[4*0+0] * tfmMatrix[4*1+2] * tfmMatrix[4*2+1] +tfmMatrix[4*0+1] * tfmMatrix[4*1+2] * tfmMatrix[4*2+0] -tfmMatrix[4*0+1] * tfmMatrix[4*1+0] * tfmMatrix[4*2+2] +tfmMatrix[4*0+2] * tfmMatrix[4*1+0] * tfmMatrix[4*2+1] -tfmMatrix[4*0+2] * tfmMatrix[4*1+1] * tfmMatrix[4*2+0]; Dtype detUV[9]; detUV[3*0+0] = tfmMatrix[4*1+1]*tfmMatrix[4*2+2]-tfmMatrix[4*1+2]*tfmMatrix[4*2+1]; detUV[3*0+1] = tfmMatrix[4*1+0]*tfmMatrix[4*2+2]-tfmMatrix[4*1+2]*tfmMatrix[4*2+0]; detUV[3*0+2] = tfmMatrix[4*1+0]*tfmMatrix[4*2+1]-tfmMatrix[4*1+1]*tfmMatrix[4*2+0]; detUV[3*1+0] = tfmMatrix[4*0+1]*tfmMatrix[4*2+2]-tfmMatrix[4*0+2]*tfmMatrix[4*2+1]; detUV[3*1+1] = tfmMatrix[4*0+0]*tfmMatrix[4*2+2]-tfmMatrix[4*0+2]*tfmMatrix[4*2+0]; detUV[3*1+2] = tfmMatrix[4*0+0]*tfmMatrix[4*2+1]-tfmMatrix[4*0+1]*tfmMatrix[4*2+0]; detUV[3*2+0] = tfmMatrix[4*0+1]*tfmMatrix[4*1+2]-tfmMatrix[4*0+2]*tfmMatrix[4*1+1]; detUV[3*2+1] = tfmMatrix[4*0+0]*tfmMatrix[4*1+2]-tfmMatrix[4*0+2]*tfmMatrix[4*1+0]; detUV[3*2+2] = tfmMatrix[4*0+0]*tfmMatrix[4*1+1]-tfmMatrix[4*0+1]*tfmMatrix[4*1+0]; for (int pRIt = 0;pRIt<3;++pRIt) { for (int pUIt = 0;pUIt<3;++pUIt) {// X = d h w for (int pCIt = 0;pCIt<3;++pCIt) { for (int pVIt = 0;pVIt<3;++pVIt) { // pd ph and pw coeffs // dFnc(U)dT() = [ a/det - (c*detVU*(-1)^(V+U))/det^2 ] * [pd;ph;pw;1] int W = LUT[3*pRIt+pVIt]; int Q = LUT[3*pCIt+pUIt]; Dtype a = ((pUIt==pCIt||pVIt==pRIt)?0: tfmMatrix[4*W+Q]*((((pUIt-pCIt+3)%3)+((pVIt-pRIt+3)%3))%2?-1:1) ); Dtype c = detUV[3*pRIt+pCIt]*((pRIt+pCIt)%2?-1:1); dxdT_coeffs_[12*(4*pRIt+pCIt)+4*pUIt+pVIt]= a/det - (c*detUV[3*pVIt+pUIt]*((pVIt+pUIt)%2?-1:1))/det/det;// coord coeffs for dSkewRotation dxdT_coeffs_[12*(4*pRIt+3)+4*pUIt+pVIt]=0; // coord coeffs for dTranslation } dxdT_coeffs_[12*(4*pRIt+pCIt)+4*pUIt+3]=-tfmMatrix[4*0+3]*dxdT_coeffs_[12*(4*pRIt+pCIt)+4*pUIt+0] -tfmMatrix[4*1+3]*dxdT_coeffs_[12*(4*pRIt+pCIt)+4*pUIt+1] -tfmMatrix[4*2+3]*dxdT_coeffs_[12*(4*pRIt+pCIt)+4*pUIt+2]; // scalar coeffs for dSkewRotation } dxdT_coeffs_[12*(4*pRIt+3)+4*pUIt+3]=-detUV[3*pRIt+pUIt]/det * ((pRIt+pUIt)%2?-1:1); // scalar coeffs for dTranslation } } } template <typename Dtype> __host__ __device__ void forwardTransformInverseAffine3D(Dtype& pdB,Dtype& phB,Dtype& pwB,int pd,int ph,int pw, const Dtype* tfmMatrix) { Dtype det = tfmMatrix[4*0+0] * tfmMatrix[4*1+1] * tfmMatrix[4*2+2] -tfmMatrix[4*0+0] * tfmMatrix[4*1+2] * tfmMatrix[4*2+1] +tfmMatrix[4*0+1] * tfmMatrix[4*1+2] * tfmMatrix[4*2+0] -tfmMatrix[4*0+1] * tfmMatrix[4*1+0] * tfmMatrix[4*2+2] +tfmMatrix[4*0+2] * tfmMatrix[4*1+0] * tfmMatrix[4*2+1] -tfmMatrix[4*0+2] * tfmMatrix[4*1+1] * tfmMatrix[4*2+0]; Dtype detUV[9]; detUV[3*0+0] = tfmMatrix[4*1+1]*tfmMatrix[4*2+2]-tfmMatrix[4*1+2]*tfmMatrix[4*2+1]; detUV[3*0+1] = tfmMatrix[4*1+0]*tfmMatrix[4*2+2]-tfmMatrix[4*1+2]*tfmMatrix[4*2+0]; detUV[3*0+2] = tfmMatrix[4*1+0]*tfmMatrix[4*2+1]-tfmMatrix[4*1+1]*tfmMatrix[4*2+0]; detUV[3*1+0] = tfmMatrix[4*0+1]*tfmMatrix[4*2+2]-tfmMatrix[4*0+2]*tfmMatrix[4*2+1]; detUV[3*1+1] = tfmMatrix[4*0+0]*tfmMatrix[4*2+2]-tfmMatrix[4*0+2]*tfmMatrix[4*2+0]; detUV[3*1+2] = tfmMatrix[4*0+0]*tfmMatrix[4*2+1]-tfmMatrix[4*0+1]*tfmMatrix[4*2+0]; detUV[3*2+0] = tfmMatrix[4*0+1]*tfmMatrix[4*1+2]-tfmMatrix[4*0+2]*tfmMatrix[4*1+1]; detUV[3*2+1] = tfmMatrix[4*0+0]*tfmMatrix[4*1+2]-tfmMatrix[4*0+2]*tfmMatrix[4*1+0]; detUV[3*2+2] = tfmMatrix[4*0+0]*tfmMatrix[4*1+1]-tfmMatrix[4*0+1]*tfmMatrix[4*1+0]; pdB =( detUV[3*0+0] * (pd-tfmMatrix[4*0+3]) + detUV[3*1+0]*(ph-tfmMatrix[4*1+3]) + detUV[3*2+0]*(pw-tfmMatrix[4*2+3]) )/det; phB =( detUV[3*0+1] * (pd-tfmMatrix[4*0+3]) + detUV[3*1+1]*(ph-tfmMatrix[4*1+3]) + detUV[3*2+1]*(pw-tfmMatrix[4*2+3]) )/det; pwB =( detUV[3*0+2] * (pd-tfmMatrix[4*0+3]) + detUV[3*1+2]*(ph-tfmMatrix[4*1+3]) + detUV[3*2+2]*(pw-tfmMatrix[4*2+3]) )/det; } template <typename Dtype> __host__ __device__ void dxdTInverseAffine3D(const Dtype* dxdT_coeffs,Dtype* dddT,Dtype* dhdT,Dtype* dwdT,int pd, int ph, int pw) { for (int paramIt=0;paramIt<1;++paramIt) { dddT[paramIt]=dxdT_coeffs[12*paramIt+4*0+0]*pd + dxdT_coeffs[12*paramIt+4*0+1]*ph + dxdT_coeffs[12*paramIt+4*0+2]*pw + dxdT_coeffs[12*paramIt+4*0+3]; dhdT[paramIt]=dxdT_coeffs[12*paramIt+4*1+0]*pd + dxdT_coeffs[12*paramIt+4*1+1]*ph + dxdT_coeffs[12*paramIt+4*1+2]*pw + dxdT_coeffs[12*paramIt+4*1+3]; dwdT[paramIt]=dxdT_coeffs[12*paramIt+4*2+0]*pd + dxdT_coeffs[12*paramIt+4*2+1]*ph + dxdT_coeffs[12*paramIt+4*2+2]*pw + dxdT_coeffs[12*paramIt+4*2+3]; } } template <typename Dtype> __host__ __device__ void forwardInterpolate(Dtype* top_data, const Dtype* bottom_data, int index, int depth, int height, int width, Dtype pdB, Dtype phB, Dtype pwB) { int ipdB, ipwB, iphB; Dtype tln, bln, trn, brn, tlf, blf, trf, brf; pdB = max(0., min(static_cast<Dtype>(depth - 1), pdB)); phB = max(0., min(static_cast<Dtype>(height - 1), phB)); pwB = max(0., min(static_cast<Dtype>(width - 1), pwB)); ipwB = floor(pwB); iphB = floor(phB); ipdB = floor(pdB); tln = bottom_data[ipdB*width*height + iphB*width + ipwB]; bln = bottom_data[ipdB*width*height + (iphB + 1)*width + ipwB]; trn = bottom_data[ipdB*width*height + iphB*width + (ipwB + 1)]; brn = bottom_data[ipdB*width*height + (iphB + 1)*width + (ipwB + 1)]; tlf = bottom_data[(ipdB+1)*width*height + iphB*width + ipwB]; blf = bottom_data[(ipdB+1)*width*height + (iphB + 1)*width + ipwB]; trf = bottom_data[(ipdB+1)*width*height + iphB*width + (ipwB + 1)]; brf = bottom_data[(ipdB+1)*width*height + (iphB + 1)*width + (ipwB + 1)]; top_data[index] = tln*(1 - (pdB - ipdB)) * (1 - (phB - iphB)) * (1 - (pwB - ipwB)) + bln*(1 - (pdB - ipdB)) *( (phB - iphB)) * (1 - (pwB - ipwB)) + trn*(1 - (pdB - ipdB)) *(1 - (phB - iphB)) * ((pwB - ipwB)) + brn*(1 - (pdB - ipdB)) *((phB - iphB)) * ((pwB - ipwB)) + tlf*((pdB - ipdB)) *(1 - (phB - iphB)) * (1 - (pwB - ipwB)) + blf*((pdB - ipdB)) *( (phB - iphB)) * (1 - (pwB - ipwB)) + trf*((pdB - ipdB)) *(1 - (phB - iphB)) * ((pwB - ipwB)) + brf*((pdB - ipdB)) *((phB - iphB)) * ((pwB - ipwB)); } template <typename Dtype> __global__ void AffineForward(const int nthreads, const Dtype* bottom_data, const int num, const int channels, const int depth, const int height, const int width, const Dtype* tfmMatrix, const int num_parameters, Dtype* top_data, const int grid_d, const int grid_h, const int grid_w, SpatialTransformerParameter_TransformType transform_type) { CUDA_KERNEL_LOOP(index, nthreads) { Dtype pdB, pwB, phB; int pw = index % grid_w; int ph = (index / grid_w) % grid_h; int pd = (index / (grid_w * grid_h)) % grid_d; int c = (index / (grid_w * grid_h * grid_d)) % channels; int n = index / (grid_w * grid_h * grid_d * channels); bottom_data += (n * channels + c) * height * width * depth; tfmMatrix += n*num_parameters; switch (transform_type) { case SpatialTransformerParameter_TransformType_AFFINE: forwardTransformAffine(pdB, phB, pwB, pd, ph, pw, tfmMatrix); break; case SpatialTransformerParameter_TransformType_INVERSE_AFFINE: forwardTransformInverseAffine(pdB, phB, pwB, pd, ph, pw, tfmMatrix); break; case SpatialTransformerParameter_TransformType_AFFINE3D: forwardTransformAffine3D(pdB, phB, pwB, pd, ph, pw, tfmMatrix); break; case SpatialTransformerParameter_TransformType_INVERSE_AFFINE3D: forwardTransformInverseAffine3D(pdB, phB, pwB, pd, ph, pw, tfmMatrix); break; } // TODO genericize boundary handling - for now assume smooth boundary extension (extend boundary value outside the image) forwardInterpolate(top_data, bottom_data, index, depth, height, width, pdB, phB, pwB); } } template <typename Dtype> void SpatialTransformerLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { SpatialTransformerParameter spatial_transformer_param = this->layer_param_.spatial_transformer_param(); const Dtype* params; int firstDataBlob; if (spatial_transformer_param.const_params().size()) { params = constParamsBlob_.gpu_data(); firstDataBlob = 0; } else { params = bottom[0]->gpu_data(); firstDataBlob = 1; } for (int i = firstDataBlob; i < bottom.size(); ++i) { Blob<Dtype>* bottom_ = bottom[i]; Blob<Dtype>* top_ = top[i - firstDataBlob]; const Dtype* bottom_data = bottom_->gpu_data(); Dtype* top_data = top_->mutable_gpu_data(); // // This line causes an error on the second top if there are constant parameters // int tDepth; if (bottom_->num_axes()==4) { tDepth=1; } else { tDepth=bottom_->shape(-3); } const int depth = tDepth; const int width = bottom_->shape(-1); const int height = bottom_->shape(-2); int count = top_->count(); // NOLINT_NEXT_LINE(whitespace/operators) AffineForward<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >( count, bottom_data, bottom_->shape(0), bottom_->shape(1), depth, height, width, params, num_parameters_, top_data, grid_d_, grid_h_, grid_w_, spatial_transformer_param.type()); CUDA_POST_KERNEL_CHECK; } } template <typename Dtype> __global__ void AffineBackward(const int nthreads, const Dtype* top_diff, const int num, const int channels, const int grid_d, const int grid_h, const int grid_w, const Dtype * tfmMatrix, Dtype* param_diff, const Dtype* buffer, const int num_parameters, const Dtype* bottom_data, Dtype* bottom_diff, const int depth, const int height, const int width, SpatialTransformerParameter_TransformType transform_type, bool propagate_down_data, bool propagate_down_param) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the grid index Dtype dddT[12]; // temporary storage for partial derivatives with respect to transform parameters Dtype dhdT[12]; // Hack currently set for largest possible number of parameters Dtype dwdT[12]; int pw = index % grid_w; int ph = (index / grid_w) % grid_h; int pd = (index / (grid_w * grid_h)) % grid_d; int c = (index / (grid_w * grid_h * grid_d)) % channels; int n = index / (grid_w * grid_h * grid_d * channels); Dtype pwB, phB, pdB, tln, bln, trn, brn, tlf, blf, trf, brf; int ipwB, iphB, ipdB; top_diff += (n * channels + c) * grid_h * grid_w * grid_d; bottom_diff += (n * channels + c) * height * width * depth; bottom_data += (n * channels + c) * height * width * depth; tfmMatrix += num_parameters*n; param_diff += num_parameters*n; switch (transform_type) { case SpatialTransformerParameter_TransformType_AFFINE3D: forwardTransformAffine3D(pdB,phB,pwB, pd, ph, pw, tfmMatrix); break; case SpatialTransformerParameter_TransformType_INVERSE_AFFINE3D: forwardTransformInverseAffine3D(pdB,phB,pwB, pd, ph, pw, tfmMatrix); break; case SpatialTransformerParameter_TransformType_AFFINE: forwardTransformAffine(pdB,phB,pwB, pd, ph, pw, tfmMatrix); break; case SpatialTransformerParameter_TransformType_INVERSE_AFFINE: forwardTransformInverseAffine(pdB,phB,pwB, pd, ph, pw, tfmMatrix); break; } // TODO genericize boundary handling - for now assume smooth boundary extension (extend boundary value outside the image) pdB = max(0., min(static_cast<Dtype>(depth - 1), pdB)); phB = max(0., min(static_cast<Dtype>(height - 1), phB)); pwB = max(0., min(static_cast<Dtype>(width - 1), pwB)); // This will be similar for other transformation (with same sampling kernel) ipwB = floor(pwB); iphB = floor(phB); ipdB = floor(pdB); if (propagate_down_data) { caffe_gpu_atomic_add(top_diff[ph*grid_w + pw] * (1-(pdB-ipdB)) * (1-(phB-iphB)) * (1 - (pwB-ipwB)), bottom_diff + ipdB *width*height + iphB *width + ipwB); caffe_gpu_atomic_add(top_diff[ph*grid_w + pw] * (1-(pdB-ipdB)) * (1-(phB-iphB)) * ( (pwB-ipwB)), bottom_diff + ipdB *width*height + iphB *width + (ipwB + 1)); caffe_gpu_atomic_add(top_diff[ph*grid_w + pw] * (1-(pdB-ipdB)) * ( (phB-iphB)) * (1 - (pwB-ipwB)), bottom_diff + ipdB *width*height + (iphB + 1)*width + ipwB); caffe_gpu_atomic_add(top_diff[ph*grid_w + pw] * (1-(pdB-ipdB)) * ( (phB-iphB)) * ( (pwB-ipwB)), bottom_diff + ipdB *width*height + (iphB + 1)*width + (ipwB + 1)); caffe_gpu_atomic_add(top_diff[ph*grid_w + pw] * ( (pdB-ipdB)) * (1-(phB-iphB)) * (1 - (pwB-ipwB)), bottom_diff + (ipdB + 1)*width*height + iphB *width + ipwB); caffe_gpu_atomic_add(top_diff[ph*grid_w + pw] * ( (pdB-ipdB)) * (1-(phB-iphB)) * ( (pwB-ipwB)), bottom_diff + (ipdB + 1)*width*height + iphB *width + (ipwB + 1)); caffe_gpu_atomic_add(top_diff[ph*grid_w + pw] * ( (pdB-ipdB)) * ( (phB-iphB)) * (1 - (pwB-ipwB)), bottom_diff + (ipdB + 1)*width*height + (iphB + 1)*width + ipwB); caffe_gpu_atomic_add(top_diff[ph*grid_w + pw] * ( (pdB-ipdB)) * ( (phB-iphB)) * ( (pwB-ipwB)), bottom_diff + (ipdB + 1)*width*height + (iphB + 1)*width + (ipwB + 1)); } if (propagate_down_param) { tln = bottom_data[ipdB*height*width + iphB*width + ipwB]; bln = bottom_data[ipdB*height*width + (iphB + 1)*width + ipwB]; trn = bottom_data[ipdB*height*width + iphB*width + (ipwB + 1)]; brn = bottom_data[ipdB*height*width + (iphB + 1)*width + (ipwB + 1)]; tlf = bottom_data[(ipdB+1)*height*width + iphB*width + ipwB]; blf = bottom_data[(ipdB+1)*height*width + (iphB + 1)*width + ipwB]; trf = bottom_data[(ipdB+1)*height*width + iphB*width + (ipwB + 1)]; brf = bottom_data[(ipdB+1)*height*width + (iphB + 1)*width + (ipwB + 1)]; // This depends on the transformation function: switch (transform_type) { case SpatialTransformerParameter_TransformType_AFFINE3D: dxdTAffine3D(dddT, dhdT, dwdT, pd, ph, pw); break; case SpatialTransformerParameter_TransformType_INVERSE_AFFINE3D: dxdTInverseAffine3D(buffer+n*144, dddT, dhdT, dwdT, pd, ph, pw); break; case SpatialTransformerParameter_TransformType_AFFINE: dxdTAffine(dddT, dhdT, dwdT, pd, ph, pw); break; case SpatialTransformerParameter_TransformType_INVERSE_AFFINE: dxdTInverseAffine3D(buffer, dddT, dhdT, dwdT, pd, ph, pw); break; } // This will be similar for other transformations (except with all partial derivatives) for (int param_it = 0; param_it < num_parameters; param_it++){ caffe_gpu_atomic_add( dddT[param_it] * (tlf*(iphB - phB + 1)*(ipwB - pwB + 1) - tln*(iphB - phB + 1)*(ipwB - pwB + 1) - trf*(ipwB - pwB)*(iphB - phB + 1) + trn*(ipwB - pwB)*(iphB - phB + 1) - blf*(iphB - phB)*(ipwB - pwB + 1) + bln*(iphB - phB)*(ipwB - pwB + 1) + brf*(iphB - phB)*(ipwB - pwB) - brn*(iphB - phB)*(ipwB - pwB)) + dhdT[param_it] * (tlf*(ipdB - pdB)*(ipwB - pwB + 1) - tln*(ipdB - pdB + 1)*(ipwB - pwB + 1) - trf*(ipdB - pdB)*(ipwB - pwB) + trn*(ipwB - pwB)*(ipdB - pdB + 1) - blf*(ipdB - pdB)*(ipwB - pwB + 1) + bln*(ipdB - pdB + 1)*(ipwB - pwB + 1) + brf*(ipdB - pdB)*(ipwB - pwB) - brn*(ipwB - pwB)*(ipdB - pdB + 1)) + dwdT[param_it] * (tlf*(ipdB - pdB)*(iphB - phB + 1) - tln*(ipdB - pdB + 1)*(iphB - phB + 1) - trf*(ipdB - pdB)*(iphB - phB + 1) + trn*(ipdB - pdB + 1)*(iphB - phB + 1) - blf*(ipdB - pdB)*(iphB - phB) + bln*(iphB - phB)*(ipdB - pdB + 1) + brf*(ipdB - pdB)*(iphB - phB) - brn*(iphB - phB)*(ipdB - pdB + 1)) , param_diff+param_it); } } } } template <typename Dtype> void SpatialTransformerLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { SpatialTransformerParameter spatial_transformer_param = this->layer_param_.spatial_transformer_param(); const Dtype* params_cpu; const Dtype* params; int firstDataBlob; Dtype* param_diff; bool hasConstParams; if (spatial_transformer_param.const_params().size()) { params = constParamsBlob_.gpu_data(); firstDataBlob = 0; param_diff = constParamsBlob_.mutable_gpu_diff(); caffe_gpu_set(constParamsBlob_.count(), Dtype(0.), param_diff); hasConstParams = true; } else { params = bottom[0]->gpu_data(); firstDataBlob = 1; param_diff = bottom[0]->mutable_gpu_diff(); caffe_gpu_set(bottom[0]->count(), Dtype(0.), param_diff); hasConstParams = false; } for (int i = firstDataBlob; i < bottom.size(); ++i) { if ((hasConstParams && !propagate_down[i]) || (!hasConstParams && !propagate_down[i] && !propagate_down[0])) { continue; } Blob<Dtype>* bottom_ = bottom[i]; Blob<Dtype>* top_ = top[i - firstDataBlob]; const Dtype* top_diff = top_->gpu_diff(); Dtype* bottom_diff = bottom_->mutable_gpu_diff(); const Dtype* bottom_data = bottom_->gpu_data(); caffe_gpu_set(bottom_->count(), Dtype(0.), bottom_diff); // This is different from the pooling example // since I am iterating over the resampling grid // instead of over the bottom pixels // the original was // const int count = bottom_->count(); const int count = top_->count(); //Precomputation const Dtype* buffer = 0; switch (spatial_transformer_param.type()) { case SpatialTransformerParameter_TransformType_INVERSE_AFFINE3D: if (hasConstParams) { params_cpu= constParamsBlob_.cpu_data(); } else { params_cpu= bottom[0]->cpu_data(); } for (int n = 0; n < bottom_->num(); ++n) { precomputeInverseAffine3D((buffer_[0]->mutable_cpu_diff())+144*n, params_cpu + n*num_parameters_) ; } buffer = buffer_[0]->gpu_data(); } int tDepth; if (bottom_->num_axes()==4) { tDepth=1; } else { tDepth=bottom_->shape(-3); } const int depth = tDepth; const int width = bottom_->shape(-1); const int height = bottom_->shape(-2); // NOLINT_NEXT_LINE(whitespace/operators) AffineBackward<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >( count, top_diff, top_->num(), top_->channels(), grid_d_, grid_h_, grid_w_, params, param_diff, buffer, num_parameters_, bottom_data, bottom_diff, depth, height, width, spatial_transformer_param.type(), propagate_down[i], propagate_down[0] && !hasConstParams); CUDA_POST_KERNEL_CHECK; } } INSTANTIATE_LAYER_GPU_FUNCS(SpatialTransformerLayer); } // namespace caffe